changeset 6060:b3fe59626fdc

Merge
author anoll
date Wed, 26 Feb 2014 02:38:46 -0800
parents 524b54a7f1b5 4e7ee57b57bf
children fae50ee0308d
files agent/src/share/classes/sun/jvm/hotspot/memory/FreeList.java src/share/vm/code/nmethod.cpp src/share/vm/utilities/dtrace_usdt2_disabled.hpp
diffstat 176 files changed, 5042 insertions(+), 6571 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Wed Feb 26 11:29:47 2014 +0100
+++ b/.hgtags	Wed Feb 26 02:38:46 2014 -0800
@@ -404,3 +404,4 @@
 fca262db9c4309f99d2f5542ab0780e45c2f1578 jdk8-b120
 ce2d7e46f3c7e41241f3b407705a4071323a11ab jdk9-b00
 050a626a88951140df874f7b163e304d07b6c296 jdk9-b01
+b188446de75bda5fc52d102cddf242c3ef5ecbdf jdk9-b02
--- a/THIRD_PARTY_README	Wed Feb 26 11:29:47 2014 +0100
+++ b/THIRD_PARTY_README	Wed Feb 26 02:38:46 2014 -0800
@@ -2,11 +2,12 @@
 -----------------------------
 
 %% This notice is provided with respect to ASM Bytecode Manipulation 
-Framework v3.1, which is included with JRE 7, JDK 7, and OpenJDK 7.
+Framework v5.0, which may be included with JRE 8, and JDK 8, and 
+OpenJDK 8.
 
 --- begin of LICENSE ---
 
-Copyright (c) 2000-2005 INRIA, France Telecom
+Copyright (c) 2000-2011 France Télécom
 All rights reserved.
 
 Redistribution and use in source and binary forms, with or without
@@ -40,8 +41,41 @@
 
 --------------------------------------------------------------------------------
 
-%% This notice is provided with respect to CodeViewer 1.0, which is included 
-with JDK 7.
+%% This notice is provided with respect to BSDiff v4.3, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+Copyright 2003-2005 Colin Percival
+All rights reserved
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted providing that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to CodeViewer 1.0, which may be
+included with JDK 8.
 
 --- begin of LICENSE ---
 
@@ -81,8 +115,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Cryptix AES 3.2.0, which is
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to Cryptix AES 3.2.0, which may be
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -121,7 +155,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to CUP Parser Generator for 
-Java 0.10k, which is included with JRE 7, JDK 7, and OpenJDK 7.
+Java 0.10k, which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -148,7 +182,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to Document Object Model (DOM) Level 2
-& 3, which is included with JRE 7, JDK 7, and OpenJDK 7.
+& 3, which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -212,19 +246,52 @@
 
 -------------------------------------------------------------------------------
 
+%% This notice is provided with respect to Dynalink v0.5, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+Copyright (c) 2009-2013, Attila Szegedi
+
+All rights reserved.Redistribution and use in source and binary forms, with or
+without modification, are permitted provided that the following conditions are
+met:* Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.  * Redistributions in
+binary form must reproduce the above copyright notice,   this list of
+conditions and the following disclaimer in the documentation  and/or other
+materials provided with the distribution.  * Neither the name of Attila
+Szegedi nor the names of its contributors may be used to endorse or promote
+products derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF SUCH DAMAGE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
 %% This notice is provided with respect to Elliptic Curve Cryptography, which 
-is included with JRE 7, JDK 7, and OpenJDK 7.
+may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 You are receiving a copy of the Elliptic Curve Cryptography library in source
-form with the JDK 7 source distribution and object code in the JRE 7 & JDK 7
-runtime.
-
-The terms of the Oracle license do NOT apply to the Elliptic Curve
-Cryptography library program; it is licensed under the following license,
-separately from the Oracle programs you receive. If you do not wish to install
-this program, you may delete the library named libsunec.so (on Solaris and
-Linux systems) or sunec.dll (on Windows systems) from the JRE bin directory
-reserved for native libraries.
+form with the JDK 8 and OpenJDK 8 source distributions, and as object code in
+the JRE 8 & JDK 8 runtimes.
+
+In the case of the JRE 8 & JDK 8 runtimes, the terms of the Oracle license do
+NOT apply to the Elliptic Curve Cryptography library; it is licensed under the
+following license, separately from Oracle's JDK & JRE.  If you do not wish to
+install the Elliptic Curve Cryptography library, you may delete the library
+named libsunec.so (on Solaris and Linux systems) or sunec.dll (on Windows
+systems) from the JRE bin directory reserved for native libraries.
+
 
 --- begin of LICENSE ---
 
@@ -735,13 +802,138 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to FontConfig 2.5, which is 
-included with JRE 7, JDK 7, and OpenJDK 7 source distributions on
+%% This notice is provided with respect to  ECMAScript Language
+Specification ECMA-262 Edition 5.1 which may be included with 
+JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+Copyright notice
+Copyright © 2011 Ecma International
+Ecma International
+Rue du Rhone 114
+CH-1204 Geneva
+Tel: +41 22 849 6000
+Fax: +41 22 849 6001
+Web: http://www.ecma-international.org
+
+This document and possible translations of it may be copied and furnished to
+others, and derivative works that comment on or otherwise explain it or assist
+in its implementation may be prepared, copied, published, and distributed, in
+whole or in part, without restriction of any kind, provided that the above
+copyright notice and this section are included on all such copies and derivative
+works. However, this document itself may not be modified in any way, including
+by removing the copyright notice or references to Ecma International, except as
+needed for the purpose of developing any document or deliverable produced by
+Ecma International (in which case the rules applied to copyrights must be
+followed) or as required to translate it into languages other than English. The
+limited permissions granted above are perpetual and will not be revoked by Ecma
+International or its successors or assigns. This document and the information
+contained herein is provided on an "AS IS" basis and ECMA INTERNATIONAL
+DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY
+WARRANTY THAT THE USE OF THE INFORMATION HEREIN WILL NOT INFRINGE ANY OWNERSHIP
+RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR
+PURPOSE." Software License
+
+All Software contained in this document ("Software)" is protected by copyright
+and is being made available under the "BSD License", included below. This
+Software may be subject to third party rights (rights from parties other than
+Ecma International), including patent rights, and no licenses under such third
+party rights are granted under this license even if the third party concerned is
+a member of Ecma International. SEE THE ECMA CODE OF CONDUCT IN PATENT MATTERS
+AVAILABLE AT http://www.ecma-international.org/memento/codeofconduct.htm FOR
+INFORMATION REGARDING THE LICENSING OF PATENT CLAIMS THAT ARE REQUIRED TO
+IMPLEMENT ECMA INTERNATIONAL STANDARDS*. Redistribution and use in source and
+binary forms, with or without modification, are permitted provided that the
+following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation and/or
+other materials provided with the distribution.
+
+3. Neither the name of the authors nor Ecma International may be used to endorse
+or promote products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE ECMA INTERNATIONAL "AS IS" AND ANY EXPRESS OR
+IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
+SHALL ECMA INTERNATIONAL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
+OF SUCH DAMAGE.
+--- end of LICENSE ---
+
+%% This notice is provided with respect to Dynalink library which is included
+with the Nashorn technology.
+
+--- begin of LICENSE ---
+Copyright (c) 2009-2013, Attila Szegedi
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+* Neither the name of the copyright holder nor the names of
+  contributors may be used to endorse or promote products derived from
+  this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDER
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+--- end of LICENSE ---
+
+%% This notice is provided with respect to Joni library which is included
+with the Nashorn technology.
+
+--- begin of LICENSE ---
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to FontConfig 2.5, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8 source distributions on
 Linux and Solaris.
 
 --- begin of LICENSE ---
 
-Copyright © 2001,2003 Keith Packard
+Copyright © 2001,2003 Keith Packard
 
 Permission to use, copy, modify, distribute, and sell this software and its
 documentation for any purpose is hereby granted without fee, provided that the
@@ -765,7 +957,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to IAIK PKCS#11 Wrapper, 
-which is included with JRE 7, JDK 7, and OpenJDK 7.
+which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -816,7 +1008,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to ICU4C 4.0.1 and ICU4J 4.4, which 
-is included with JRE 7, JDK 7, and OpenJDK 7.
+may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -852,8 +1044,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to IJG JPEG 6b, which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to IJG JPEG 6b, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -891,8 +1083,35 @@
 
 --------------------------------------------------------------------------------
 
-%% This notice is provided with respect to JOpt-Simple v3.0,  which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to Joni v1.1.9, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to JOpt-Simple v3.0,  which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -921,8 +1140,39 @@
 
 --------------------------------------------------------------------------------
 
+%% This notice is provided with respect to JSON, which may be included 
+with JRE 8 & JDK 8.
+
+--- begin of LICENSE ---
+
+Copyright (c) 2002 JSON.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+The Software shall be used for Good, not Evil.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
 %% This notice is provided with respect to Kerberos functionality, which 
-which is included with JRE 7, JDK 7, and OpenJDK 7.
+which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -934,7 +1184,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to Kerberos functionality from 
-FundsXpress, INC., which is included with JRE 7, JDK 7, and OpenJDK 7.
+FundsXpress, INC., which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -967,8 +1217,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Kronos OpenGL headers, which is 
-included with JDK 7 and OpenJDK 7 source distributions.
+%% This notice is provided with respect to Kronos OpenGL headers, which may be 
+included with JDK 8 and OpenJDK 8 source distributions.
 
 --- begin of LICENSE ---
 
@@ -1000,8 +1250,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to libpng 1.2.18, which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to libpng 1.5.4, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1014,8 +1264,10 @@
 If you modify libpng you may insert additional notices immediately following
 this sentence.
 
-libpng versions 1.2.6, August 15, 2004, through 1.2.18, May 15, 2007, are
-Copyright (c) 2004, 2006-2007 Glenn Randers-Pehrson, and are
+This code is released under the libpng license.
+
+libpng versions 1.2.6, August 15, 2004, through 1.5.4, July 7, 2011, are
+Copyright (c) 2004, 2006-2011 Glenn Randers-Pehrson, and are
 distributed according to the same disclaimer and license as libpng-1.2.5
 with the following individual added to the list of Contributing Authors
 
@@ -1112,14 +1364,14 @@
 
 Glenn Randers-Pehrson
 glennrp at users.sourceforge.net
-May 15, 2007
+July 7, 2011
 
 --- end of LICENSE ---
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to libungif 4.1.3, which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to libungif 4.1.3, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1147,8 +1399,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Little CMS 2.0, which is 
-included with OpenJDK 7.
+%% This notice is provided with respect to Little CMS 2.4, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1183,7 +1435,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to Mesa 3D Graphics Library v4.1,
-which is included with JRE 7, JDK 7, and OpenJDK 7 source distributions.
+which may be included with JRE 8, JDK 8, and OpenJDK 8 source distributions.
 
 --- begin of LICENSE ---
 
@@ -1213,8 +1465,402 @@
 
 -------------------------------------------------------------------------------
 
+%% This notice is provided with respect to Mozilla Network Security
+Services (NSS), which is supplied with the JDK test suite in the OpenJDK
+source code repository. It is licensed under Mozilla Public License (MPL),
+version 2.0.
+
+The NSS libraries are supplied in executable form, built from unmodified
+NSS source code labeled with the "NSS_3.13.1_RTM" release tag.
+
+The NSS source code is available in the OpenJDK source code repository at:
+    jdk/test/sun/security/pkcs11/nss/src
+
+The NSS libraries are available in the OpenJDK source code repository at:
+    jdk/test/sun/security/pkcs11/nss/lib
+
+--- begin of LICENSE ---
+
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+    means each individual or legal entity that creates, contributes to
+    the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+    means the combination of the Contributions of others (if any) used
+    by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+    means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+    means Source Code Form to which the initial Contributor has attached
+    the notice in Exhibit A, the Executable Form of such Source Code
+    Form, and Modifications of such Source Code Form, in each case
+    including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+    means
+
+    (a) that the initial Contributor has attached the notice described
+        in Exhibit B to the Covered Software; or
+
+    (b) that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the
+        terms of a Secondary License.
+
+1.6. "Executable Form"
+    means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+    means a work that combines Covered Software with other material, in 
+    a separate file or files, that is not Covered Software.
+
+1.8. "License"
+    means this document.
+
+1.9. "Licensable"
+    means having the right to grant, to the maximum extent possible,
+    whether at the time of the initial grant or subsequently, any and
+    all of the rights conveyed by this License.
+
+1.10. "Modifications"
+    means any of the following:
+
+    (a) any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered
+        Software; or
+
+    (b) any new file in Source Code Form that contains any Covered
+        Software.
+
+1.11. "Patent Claims" of a Contributor
+    means any patent claim(s), including without limitation, method,
+    process, and apparatus claims, in any patent Licensable by such
+    Contributor that would be infringed, but for the grant of the
+    License, by the making, using, selling, offering for sale, having
+    made, import, or transfer of either its Contributions or its
+    Contributor Version.
+
+1.12. "Secondary License"
+    means either the GNU General Public License, Version 2.0, the GNU
+    Lesser General Public License, Version 2.1, the GNU Affero General
+    Public License, Version 3.0, or any later versions of those
+    licenses.
+
+1.13. "Source Code Form"
+    means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+    means an individual or a legal entity exercising rights under this
+    License. For legal entities, "You" includes any entity that
+    controls, is controlled by, or is under common control with You. For
+    purposes of this definition, "control" means (a) the power, direct
+    or indirect, to cause the direction or management of such entity,
+    whether by contract or otherwise, or (b) ownership of more than
+    fifty percent (50%) of the outstanding shares or beneficial
+    ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+    Licensable by such Contributor to use, reproduce, make available,
+    modify, display, perform, distribute, and otherwise exploit its
+    Contributions, either on an unmodified basis, with Modifications, or
+    as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+    for sale, have made, import, and otherwise transfer either its
+    Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+    or
+
+(b) for infringements caused by: (i) Your and any other third party's
+    modifications of Covered Software, or (ii) the combination of its
+    Contributions with other software (except as part of its Contributor
+    Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+    its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+    Form, as described in Section 3.1, and You must inform recipients of
+    the Executable Form how they can obtain a copy of such Source Code
+    Form by reasonable means in a timely manner, at a charge no more
+    than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+    License, or sublicense it under different terms, provided that the
+    license for the Executable Form does not attempt to limit or alter
+    the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+*                                                                      *
+*  6. Disclaimer of Warranty                                           *
+*  -------------------------                                           *
+*                                                                      *
+*  Covered Software is provided under this License on an "as is"       *
+*  basis, without warranty of any kind, either expressed, implied, or  *
+*  statutory, including, without limitation, warranties that the       *
+*  Covered Software is free of defects, merchantable, fit for a        *
+*  particular purpose or non-infringing. The entire risk as to the     *
+*  quality and performance of the Covered Software is with You.        *
+*  Should any Covered Software prove defective in any respect, You     *
+*  (not any Contributor) assume the cost of any necessary servicing,   *
+*  repair, or correction. This disclaimer of warranty constitutes an   *
+*  essential part of this License. No use of any Covered Software is   *
+*  authorized under this License except under this disclaimer.         *
+*                                                                      *
+************************************************************************
+
+************************************************************************
+*                                                                      *
+*  7. Limitation of Liability                                          *
+*  --------------------------                                          *
+*                                                                      *
+*  Under no circumstances and under no legal theory, whether tort      *
+*  (including negligence), contract, or otherwise, shall any           *
+*  Contributor, or anyone who distributes Covered Software as          *
+*  permitted above, be liable to You for any direct, indirect,         *
+*  special, incidental, or consequential damages of any character      *
+*  including, without limitation, damages for lost profits, loss of    *
+*  goodwill, work stoppage, computer failure or malfunction, or any    *
+*  and all other commercial damages or losses, even if such party      *
+*  shall have been informed of the possibility of such damages. This   *
+*  limitation of liability shall not apply to liability for death or   *
+*  personal injury resulting from such party's negligence to the       *
+*  extent applicable law prohibits such limitation. Some               *
+*  jurisdictions do not allow the exclusion or limitation of           *
+*  incidental or consequential damages, so this exclusion and          *
+*  limitation may not apply to You.                                    *
+*                                                                      *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+  This Source Code Form is subject to the terms of the Mozilla Public
+  License, v. 2.0. If a copy of the MPL was not distributed with this
+  file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+  This Source Code Form is "Incompatible With Secondary Licenses", as
+  defined by the Mozilla Public License, v. 2.0.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
 %% This notice is provided with respect to PC/SC Lite for Suse Linux v.1.1.1,
-which is included with JRE 7, JDK 7, and OpenJDK 7 on Linux and Solaris.
+which may be included with JRE 8, JDK 8, and OpenJDK 8 on Linux and Solaris.
 
 --- begin of LICENSE ---
 
@@ -1257,8 +1903,30 @@
 
 -------------------------------------------------------------------------------
 
+%% This notice is provided with respect to PorterStemmer v4, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+See: http://tartarus.org/~martin/PorterStemmer
+
+The software is completely free for any purpose, unless notes at the head of
+the program text indicates otherwise (which is rare). In any case, the notes
+about licensing are never more restrictive than the BSD License.
+
+In every case where the software is not written by me (Martin Porter), this
+licensing arrangement has been endorsed by the contributor, and it is
+therefore unnecessary to ask the contributor again to confirm it.
+
+I have not asked any contributors (or their employers, if they have them) for
+proofs that they have the right to distribute their software in this way.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
 %% This notice is provided with respect to Relax NG Object/Parser v.20050510,
-which is included with JRE 7, JDK 7, and OpenJDK 7.
+which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1285,8 +1953,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to RelaxNGCC v1.12, which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to RelaxNGCC v1.12, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1335,487 +2003,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Mozilla Rhino v1.7R3, which 
-is included with JRE 7, JDK 7, and OpenJDK 7
-
---- begin of LICENSE ---
-
-                          MOZILLA PUBLIC LICENSE
-                                Version 1.1
-
-                              ---------------
-
-1. Definitions.
-
-     1.0.1. "Commercial Use" means distribution or otherwise making the
-     Covered Code available to a third party.
-
-     1.1. "Contributor" means each entity that creates or contributes to
-     the creation of Modifications.
-
-     1.2. "Contributor Version" means the combination of the Original
-     Code, prior Modifications used by a Contributor, and the Modifications
-     made by that particular Contributor.
-
-     1.3. "Covered Code" means the Original Code or Modifications or the
-     combination of the Original Code and Modifications, in each case
-     including portions thereof.
-
-     1.4. "Electronic Distribution Mechanism" means a mechanism generally
-     accepted in the software development community for the electronic
-     transfer of data.
-
-     1.5. "Executable" means Covered Code in any form other than Source
-     Code.
-
-     1.6. "Initial Developer" means the individual or entity identified
-     as the Initial Developer in the Source Code notice required by Exhibit
-     A.
-
-     1.7. "Larger Work" means a work which combines Covered Code or
-     portions thereof with code not governed by the terms of this License.
-
-     1.8. "License" means this document.
-
-     1.8.1. "Licensable" means having the right to grant, to the maximum
-     extent possible, whether at the time of the initial grant or
-     subsequently acquired, any and all of the rights conveyed herein.
-
-     1.9. "Modifications" means any addition to or deletion from the
-     substance or structure of either the Original Code or any previous
-     Modifications. When Covered Code is released as a series of files, a
-     Modification is:
-          A. Any addition to or deletion from the contents of a file
-          containing Original Code or previous Modifications.
-
-          B. Any new file that contains any part of the Original Code or
-          previous Modifications.
-
-     1.10. "Original Code" means Source Code of computer software code
-     which is described in the Source Code notice required by Exhibit A as
-     Original Code, and which, at the time of its release under this
-     License is not already Covered Code governed by this License.
-
-     1.10.1. "Patent Claims" means any patent claim(s), now owned or
-     hereafter acquired, including without limitation,  method, process,
-     and apparatus claims, in any patent Licensable by grantor.
-
-     1.11. "Source Code" means the preferred form of the Covered Code for
-     making modifications to it, including all modules it contains, plus
-     any associated interface definition files, scripts used to control
-     compilation and installation of an Executable, or source code
-     differential comparisons against either the Original Code or another
-     well known, available Covered Code of the Contributor's choice. The
-     Source Code can be in a compressed or archival form, provided the
-     appropriate decompression or de-archiving software is widely available
-     for no charge.
-
-     1.12. "You" (or "Your")  means an individual or a legal entity
-     exercising rights under, and complying with all of the terms of, this
-     License or a future version of this License issued under Section 6.1.
-     For legal entities, "You" includes any entity which controls, is
-     controlled by, or is under common control with You. For purposes of
-     this definition, "control" means (a) the power, direct or indirect,
-     to cause the direction or management of such entity, whether by
-     contract or otherwise, or (b) ownership of more than fifty percent
-     (50%) of the outstanding shares or beneficial ownership of such
-     entity.
-
-2. Source Code License.
-
-     2.1. The Initial Developer Grant.
-     The Initial Developer hereby grants You a world-wide, royalty-free,
-     non-exclusive license, subject to third party intellectual property
-     claims:
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Initial Developer to use, reproduce,
-          modify, display, perform, sublicense and distribute the Original
-          Code (or portions thereof) with or without Modifications, and/or
-          as part of a Larger Work; and
-
-          (b) under Patents Claims infringed by the making, using or
-          selling of Original Code, to make, have made, use, practice,
-          sell, and offer for sale, and/or otherwise dispose of the
-          Original Code (or portions thereof).
-
-          (c) the licenses granted in this Section 2.1(a) and (b) are
-          effective on the date Initial Developer first distributes
-          Original Code under the terms of this License.
-
-          (d) Notwithstanding Section 2.1(b) above, no patent license is
-          granted: 1) for code that You delete from the Original Code; 2)
-          separate from the Original Code;  or 3) for infringements caused
-          by: i) the modification of the Original Code or ii) the
-          combination of the Original Code with other software or devices.
-
-     2.2. Contributor Grant.
-     Subject to third party intellectual property claims, each Contributor
-     hereby grants You a world-wide, royalty-free, non-exclusive license
-
-          (a)  under intellectual property rights (other than patent or
-          trademark) Licensable by Contributor, to use, reproduce, modify,
-          display, perform, sublicense and distribute the Modifications
-          created by such Contributor (or portions thereof) either on an
-          unmodified basis, with other Modifications, as Covered Code
-          and/or as part of a Larger Work; and
-
-          (b) under Patent Claims infringed by the making, using, or
-          selling of  Modifications made by that Contributor either alone
-          and/or in combination with its Contributor Version (or portions
-          of such combination), to make, use, sell, offer for sale, have
-          made, and/or otherwise dispose of: 1) Modifications made by that
-          Contributor (or portions thereof); and 2) the combination of
-          Modifications made by that Contributor with its Contributor
-          Version (or portions of such combination).
-
-          (c) the licenses granted in Sections 2.2(a) and 2.2(b) are
-          effective on the date Contributor first makes Commercial Use of
-          the Covered Code.
-
-          (d)    Notwithstanding Section 2.2(b) above, no patent license is
-          granted: 1) for any code that Contributor has deleted from the
-          Contributor Version; 2)  separate from the Contributor Version;
-          3)  for infringements caused by: i) third party modifications of
-          Contributor Version or ii)  the combination of Modifications made
-          by that Contributor with other software  (except as part of the
-          Contributor Version) or other devices; or 4) under Patent Claims
-          infringed by Covered Code in the absence of Modifications made by
-          that Contributor.
-
-3. Distribution Obligations.
-
-     3.1. Application of License.
-     The Modifications which You create or to which You contribute are
-     governed by the terms of this License, including without limitation
-     Section 2.2. The Source Code version of Covered Code may be
-     distributed only under the terms of this License or a future version
-     of this License released under Section 6.1, and You must include a
-     copy of this License with every copy of the Source Code You
-     distribute. You may not offer or impose any terms on any Source Code
-     version that alters or restricts the applicable version of this
-     License or the recipients' rights hereunder. However, You may include
-     an additional document offering the additional rights described in
-     Section 3.5.
-
-     3.2. Availability of Source Code.
-     Any Modification which You create or to which You contribute must be
-     made available in Source Code form under the terms of this License
-     either on the same media as an Executable version or via an accepted
-     Electronic Distribution Mechanism to anyone to whom you made an
-     Executable version available; and if made available via Electronic
-     Distribution Mechanism, must remain available for at least twelve (12)
-     months after the date it initially became available, or at least six
-     (6) months after a subsequent version of that particular Modification
-     has been made available to such recipients. You are responsible for
-     ensuring that the Source Code version remains available even if the
-     Electronic Distribution Mechanism is maintained by a third party.
-
-     3.3. Description of Modifications.
-     You must cause all Covered Code to which You contribute to contain a
-     file documenting the changes You made to create that Covered Code and
-     the date of any change. You must include a prominent statement that
-     the Modification is derived, directly or indirectly, from Original
-     Code provided by the Initial Developer and including the name of the
-     Initial Developer in (a) the Source Code, and (b) in any notice in an
-     Executable version or related documentation in which You describe the
-     origin or ownership of the Covered Code.
-
-     3.4. Intellectual Property Matters
-          (a) Third Party Claims.
-          If Contributor has knowledge that a license under a third party's
-          intellectual property rights is required to exercise the rights
-          granted by such Contributor under Sections 2.1 or 2.2,
-          Contributor must include a text file with the Source Code
-          distribution titled "LEGAL" which describes the claim and the
-          party making the claim in sufficient detail that a recipient will
-          know whom to contact. If Contributor obtains such knowledge after
-          the Modification is made available as described in Section 3.2,
-          Contributor shall promptly modify the LEGAL file in all copies
-          Contributor makes available thereafter and shall take other steps
-          (such as notifying appropriate mailing lists or newsgroups)
-          reasonably calculated to inform those who received the Covered
-          Code that new knowledge has been obtained.
-
-          (b) Contributor APIs.
-          If Contributor's Modifications include an application programming
-          interface and Contributor has knowledge of patent licenses which
-          are reasonably necessary to implement that API, Contributor must
-          also include this information in the LEGAL file.
-
-               (c)    Representations.
-          Contributor represents that, except as disclosed pursuant to
-          Section 3.4(a) above, Contributor believes that Contributor's
-          Modifications are Contributor's original creation(s) and/or
-          Contributor has sufficient rights to grant the rights conveyed by
-          this License.
-
-     3.5. Required Notices.
-     You must duplicate the notice in Exhibit A in each file of the Source
-     Code.  If it is not possible to put such notice in a particular Source
-     Code file due to its structure, then You must include such notice in a
-     location (such as a relevant directory) where a user would be likely
-     to look for such a notice.  If You created one or more Modification(s)
-     You may add your name as a Contributor to the notice described in
-     Exhibit A.  You must also duplicate this License in any documentation
-     for the Source Code where You describe recipients' rights or ownership
-     rights relating to Covered Code.  You may choose to offer, and to
-     charge a fee for, warranty, support, indemnity or liability
-     obligations to one or more recipients of Covered Code. However, You
-     may do so only on Your own behalf, and not on behalf of the Initial
-     Developer or any Contributor. You must make it absolutely clear than
-     any such warranty, support, indemnity or liability obligation is
-     offered by You alone, and You hereby agree to indemnify the Initial
-     Developer and every Contributor for any liability incurred by the
-     Initial Developer or such Contributor as a result of warranty,
-     support, indemnity or liability terms You offer.
-
-     3.6. Distribution of Executable Versions.
-     You may distribute Covered Code in Executable form only if the
-     requirements of Section 3.1-3.5 have been met for that Covered Code,
-     and if You include a notice stating that the Source Code version of
-     the Covered Code is available under the terms of this License,
-     including a description of how and where You have fulfilled the
-     obligations of Section 3.2. The notice must be conspicuously included
-     in any notice in an Executable version, related documentation or
-     collateral in which You describe recipients' rights relating to the
-     Covered Code. You may distribute the Executable version of Covered
-     Code or ownership rights under a license of Your choice, which may
-     contain terms different from this License, provided that You are in
-     compliance with the terms of this License and that the license for the
-     Executable version does not attempt to limit or alter the recipient's
-     rights in the Source Code version from the rights set forth in this
-     License. If You distribute the Executable version under a different
-     license You must make it absolutely clear that any terms which differ
-     from this License are offered by You alone, not by the Initial
-     Developer or any Contributor. You hereby agree to indemnify the
-     Initial Developer and every Contributor for any liability incurred by
-     the Initial Developer or such Contributor as a result of any such
-     terms You offer.
-
-     3.7. Larger Works.
-     You may create a Larger Work by combining Covered Code with other code
-     not governed by the terms of this License and distribute the Larger
-     Work as a single product. In such a case, You must make sure the
-     requirements of this License are fulfilled for the Covered Code.
-
-4. Inability to Comply Due to Statute or Regulation.
-
-     If it is impossible for You to comply with any of the terms of this
-     License with respect to some or all of the Covered Code due to
-     statute, judicial order, or regulation then You must: (a) comply with
-     the terms of this License to the maximum extent possible; and (b)
-     describe the limitations and the code they affect. Such description
-     must be included in the LEGAL file described in Section 3.4 and must
-     be included with all distributions of the Source Code. Except to the
-     extent prohibited by statute or regulation, such description must be
-     sufficiently detailed for a recipient of ordinary skill to be able to
-     understand it.
-
-5. Application of this License.
-
-     This License applies to code to which the Initial Developer has
-     attached the notice in Exhibit A and to related Covered Code.
-
-6. Versions of the License.
-
-     6.1. New Versions.
-     Netscape Communications Corporation ("Netscape") may publish revised
-     and/or new versions of the License from time to time. Each version
-     will be given a distinguishing version number.
-
-     6.2. Effect of New Versions.
-     Once Covered Code has been published under a particular version of the
-     License, You may always continue to use it under the terms of that
-     version. You may also choose to use such Covered Code under the terms
-     of any subsequent version of the License published by Netscape. No one
-     other than Netscape has the right to modify the terms applicable to
-     Covered Code created under this License.
-
-     6.3. Derivative Works.
-     If You create or use a modified version of this License (which you may
-     only do in order to apply it to code which is not already Covered Code
-     governed by this License), You must (a) rename Your license so that
-     the phrases "Mozilla", "MOZILLAPL", "MOZPL", "Netscape",
-     "MPL", "NPL" or any confusingly similar phrase do not appear in your
-     license (except to note that your license differs from this License)
-     and (b) otherwise make it clear that Your version of the license
-     contains terms which differ from the Mozilla Public License and
-     Netscape Public License. (Filling in the name of the Initial
-     Developer, Original Code or Contributor in the notice described in
-     Exhibit A shall not of themselves be deemed to be modifications of
-     this License.)
-
-7. DISCLAIMER OF WARRANTY.
-
-     COVERED CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS,
-     WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
-     WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF
-     DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING.
-     THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE
-     IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT,
-     YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE
-     COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. THIS DISCLAIMER
-     OF WARRANTY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF
-     ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
-
-8. TERMINATION.
-
-     8.1.  This License and the rights granted hereunder will terminate
-     automatically if You fail to comply with terms herein and fail to cure
-     such breach within 30 days of becoming aware of the breach. All
-     sublicenses to the Covered Code which are properly granted shall
-     survive any termination of this License. Provisions which, by their
-     nature, must remain in effect beyond the termination of this License
-     shall survive.
-
-     8.2.  If You initiate litigation by asserting a patent infringement
-     claim (excluding declatory judgment actions) against Initial Developer
-     or a Contributor (the Initial Developer or Contributor against whom
-     You file such action is referred to as "Participant")  alleging that:
-
-     (a)  such Participant's Contributor Version directly or indirectly
-     infringes any patent, then any and all rights granted by such
-     Participant to You under Sections 2.1 and/or 2.2 of this License
-     shall, upon 60 days notice from Participant terminate prospectively,
-     unless if within 60 days after receipt of notice You either: (i)
-     agree in writing to pay Participant a mutually agreeable reasonable
-     royalty for Your past and future use of Modifications made by such
-     Participant, or (ii) withdraw Your litigation claim with respect to
-     the Contributor Version against such Participant.  If within 60 days
-     of notice, a reasonable royalty and payment arrangement are not
-     mutually agreed upon in writing by the parties or the litigation claim
-     is not withdrawn, the rights granted by Participant to You under
-     Sections 2.1 and/or 2.2 automatically terminate at the expiration of
-     the 60 day notice period specified above.
-
-     (b)  any software, hardware, or device, other than such Participant's
-     Contributor Version, directly or indirectly infringes any patent, then
-     any rights granted to You by such Participant under Sections 2.1(b)
-     and 2.2(b) are revoked effective as of the date You first made, used,
-     sold, distributed, or had made, Modifications made by that
-     Participant.
-
-     8.3.  If You assert a patent infringement claim against Participant
-     alleging that such Participant's Contributor Version directly or
-     indirectly infringes any patent where such claim is resolved (such as
-     by license or settlement) prior to the initiation of patent
-     infringement litigation, then the reasonable value of the licenses
-     granted by such Participant under Sections 2.1 or 2.2 shall be taken
-     into account in determining the amount or value of any payment or
-     license.
-
-     8.4.  In the event of termination under Sections 8.1 or 8.2 above,
-     all end user license agreements (excluding distributors and resellers)
-     which have been validly granted by You or any distributor hereunder
-     prior to termination shall survive termination.
-
-9. LIMITATION OF LIABILITY.
-
-     UNDER NO CIRCUMSTANCES AND UNDER NO LEGAL THEORY, WHETHER TORT
-     (INCLUDING NEGLIGENCE), CONTRACT, OR OTHERWISE, SHALL YOU, THE INITIAL
-     DEVELOPER, ANY OTHER CONTRIBUTOR, OR ANY DISTRIBUTOR OF COVERED CODE,
-     OR ANY SUPPLIER OF ANY OF SUCH PARTIES, BE LIABLE TO ANY PERSON FOR
-     ANY INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES OF ANY
-     CHARACTER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF GOODWILL,
-     WORK STOPPAGE, COMPUTER FAILURE OR MALFUNCTION, OR ANY AND ALL OTHER
-     COMMERCIAL DAMAGES OR LOSSES, EVEN IF SUCH PARTY SHALL HAVE BEEN
-     INFORMED OF THE POSSIBILITY OF SUCH DAMAGES. THIS LIMITATION OF
-     LIABILITY SHALL NOT APPLY TO LIABILITY FOR DEATH OR PERSONAL INJURY
-     RESULTING FROM SUCH PARTY'S NEGLIGENCE TO THE EXTENT APPLICABLE LAW
-     PROHIBITS SUCH LIMITATION. SOME JURISDICTIONS DO NOT ALLOW THE
-     EXCLUSION OR LIMITATION OF INCIDENTAL OR CONSEQUENTIAL DAMAGES, SO
-     THIS EXCLUSION AND LIMITATION MAY NOT APPLY TO YOU.
-
-10. U.S. GOVERNMENT END USERS.
-
-     The Covered Code is a "commercial item," as that term is defined in
-     48 C.F.R. 2.101 (Oct. 1995), consisting of "commercial computer
-     software" and "commercial computer software documentation," as such
-     terms are used in 48 C.F.R. 12.212 (Sept. 1995). Consistent with 48
-     C.F.R. 12.212 and 48 C.F.R. 227.7202-1 through 227.7202-4 (June 1995),
-     all U.S. Government End Users acquire Covered Code with only those
-     rights set forth herein.
-
-11. MISCELLANEOUS.
-
-     This License represents the complete agreement concerning subject
-     matter hereof. If any provision of this License is held to be
-     unenforceable, such provision shall be reformed only to the extent
-     necessary to make it enforceable. This License shall be governed by
-     California law provisions (except to the extent applicable law, if
-     any, provides otherwise), excluding its conflict-of-law provisions.
-     With respect to disputes in which at least one party is a citizen of,
-     or an entity chartered or registered to do business in the United
-     States of America, any litigation relating to this License shall be
-     subject to the jurisdiction of the Federal Courts of the Northern
-     District of California, with venue lying in Santa Clara County,
-     California, with the losing party responsible for costs, including
-     without limitation, court costs and reasonable attorneys' fees and
-     expenses. The application of the United Nations Convention on
-     Contracts for the International Sale of Goods is expressly excluded.
-     Any law or regulation which provides that the language of a contract
-     shall be construed against the drafter shall not apply to this
-     License.
-
-12. RESPONSIBILITY FOR CLAIMS.
-
-     As between Initial Developer and the Contributors, each party is
-     responsible for claims and damages arising, directly or indirectly,
-     out of its utilization of rights under this License and You agree to
-     work with Initial Developer and Contributors to distribute such
-     responsibility on an equitable basis. Nothing herein is intended or
-     shall be deemed to constitute any admission of liability.
-
-13. MULTIPLE-LICENSED CODE.
-
-     Initial Developer may designate portions of the Covered Code as
-     "Multiple-Licensed".  "Multiple-Licensed" means that the Initial
-     Developer permits you to utilize portions of the Covered Code under
-     Your choice of the NPL or the alternative licenses, if any, specified
-     by the Initial Developer in the file described in Exhibit A.
-
-EXHIBIT A - Mozilla Public License.
-
-     ``The contents of this file are subject to the Mozilla Public License
-     Version 1.1 (the "License"); you may not use this file except in
-     compliance with the License. You may obtain a copy of the License at
-     http://www.mozilla.org/MPL/
-
-     Software distributed under the License is distributed on an "AS IS"
-     basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
-     License for the specific language governing rights and limitations
-     under the License.
-
-     The Original Code is ______________________________________.
-
-     The Initial Developer of the Original Code is ________________________.
-     Portions created by ______________________ are Copyright (C) ______
-     _______________________. All Rights Reserved.
-
-     Contributor(s): ______________________________________.
-
-     Alternatively, the contents of this file may be used under the terms
-     of the _____ license (the  "[___] License"), in which case the
-     provisions of [______] License are applicable instead of those
-     above.  If you wish to allow use of your version of this file only
-     under the terms of the [____] License and not to allow others to use
-     your version of this file under the MPL, indicate your decision by
-     deleting  the provisions above and replace  them with the notice and
-     other provisions required by the [___] License.  If you do not delete
-     the provisions above, a recipient may use your version of this file
-     under either the MPL or the [___] License."
-
-     [NOTE: The text of this Exhibit A may differ slightly from the text of
-     the notices in the Source Code files of the Original Code. You should
-     use the text of this Exhibit A rather than the text found in the
-     Original Code Source Code for Your Modifications.]
-
---- end of LICENSE ---
-
--------------------------------------------------------------------------------
-
-%% This notice is provided with respect to SAX 2.0.1, which is included 
-with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to SAX 2.0.1, which may be included 
+with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1876,8 +2065,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to SoftFloat version 2b, which is 
-included with JRE 7, JDK 7, and OpenJDK 7 on Linux/ARM.
+%% This notice is provided with respect to SoftFloat version 2b, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8 on Linux/ARM.
 
 --- begin of LICENSE ---
 
@@ -1909,21 +2098,12 @@
 
 -------------------------------------------------------------------------------
 
-%% Portions licensed from Taligent, Inc.
-
--------------------------------------------------------------------------------
-
-%% This notice is provided with respect to Thai Dictionary, which is 
-included with JRE 7, JDK 7, and OpenJDK 7.
+%% This notice is provided with respect to Sparkle 1.5,
+which may be included with JRE 8 on Mac OS X.
 
 --- begin of LICENSE ---
 
-Copyright (C) 1982 The Royal Institute, Thai Royal Government.
-
-Copyright (C) 1998 National Electronics and Computer Technology Center,
-National Science and Technology Development Agency,
-Ministry of Science Technology and Environment,
-Thai Royal Government.
+Copyright (c) 2012 Sparkle.org and Andy Matuschak
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
@@ -1947,8 +2127,46 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to Unicode 6.0.0, CLDR v1.4.1, & CLDR
-v1.9, which is included with JRE 7, JDK 7, and OpenJDK 7.
+%% Portions licensed from Taligent, Inc.
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Thai Dictionary, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8.
+
+--- begin of LICENSE ---
+
+Copyright (C) 1982 The Royal Institute, Thai Royal Government.
+
+Copyright (C) 1998 National Electronics and Computer Technology Center,
+National Science and Technology Development Agency,
+Ministry of Science Technology and Environment,
+Thai Royal Government.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+--- end of LICENSE ---
+
+-------------------------------------------------------------------------------
+
+%% This notice is provided with respect to Unicode 6.2.0 & CLDR 21.0.1
+which may be included with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
@@ -1959,7 +2177,7 @@
 Trademark Usage Policy.
 
 A. Unicode Copyright.
-   1. Copyright © 1991-2011 Unicode, Inc. All rights reserved.
+   1. Copyright © 1991-2013 Unicode, Inc. All rights reserved.
 
    2. Certain documents and files on this website contain a legend indicating
       that "Modification is permitted." Any person is hereby authorized,
@@ -2094,7 +2312,7 @@
 
 COPYRIGHT AND PERMISSION NOTICE
 
-Copyright © 1991-2011 Unicode, Inc. All rights reserved. Distributed under the
+Copyright © 1991-2012 Unicode, Inc. All rights reserved. Distributed under the
 Terms of Use in http://www.unicode.org/copyright.html.
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -2134,8 +2352,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to UPX v3.01, which is included 
-with JRE 7 on Windows.
+%% This notice is provided with respect to UPX v3.01, which may be included 
+with JRE 8 on Windows.
 
 --- begin of LICENSE ---
 
@@ -2274,7 +2492,7 @@
 -------------------------------------------------------------------------------
 
 %% This notice is provided with respect to Xfree86-VidMode Extension 1.0,
-which is included with JRE 7, JDK 7, and OpenJDK 7 on Linux and Solaris.
+which may be included with JRE 8, JDK 8, and OpenJDK 8 on Linux and Solaris.
 
 --- begin of LICENSE ---
 
@@ -2326,8 +2544,8 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to X Window System 6.8.2, which is 
-included with JRE 7, JDK 7, and OpenJDK 7 on Linux and Solaris.
+%% This notice is provided with respect to X Window System 6.8.2, which may be 
+included with JRE 8, JDK 8, and OpenJDK 8 on Linux and Solaris.
 
 --- begin of LICENSE ---
 
@@ -3131,12 +3349,12 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to zlib v1.2.3, which is included 
-with JRE 7, JDK 7, and OpenJDK 7
+%% This notice is provided with respect to zlib v1.2.5, which may be included 
+with JRE 8, JDK 8, and OpenJDK 8.
 
 --- begin of LICENSE ---
 
-  version 1.2.3, July 18th, 2005
+  version 1.2.5, July 18th, 2005
 
   Copyright (C) 1995-2005 Jean-loup Gailly and Mark Adler
 
@@ -3163,16 +3381,18 @@
 
 -------------------------------------------------------------------------------
 
-%% This notice is provided with respect to the following which is 
-included with JRE 7, JDK 7, and OpenJDK 7, except where noted:
-
-  Apache Derby 10.8.1.2        [included with JDK 7 only]
+%% This notice is provided with respect to the following which may be 
+included with JRE 8, JDK 8, and OpenJDK 8, except where noted:
+
+  Apache Commons Math 2.2
+  Apache Derby 10.10.1.2        [included with JDK 8]
   Apache Jakarta BCEL 5.2 
   Apache Jakarta Regexp 1.4 
-  Apache Santuario XMLSec-Java 1.4.2
+  Apache Santuario XML Security for Java 1.5.4
   Apache Xalan-Java 2.7.1 
-  Apache Xerces2 Java 2.10.0 
+  Apache Xerces Java 2.10.0 
   Apache XML Resolver 1.1 
+  Dynalink 0.5
 
 
 --- begin of LICENSE ---
--- a/agent/src/os/linux/libproc_impl.c	Wed Feb 26 11:29:47 2014 +0100
+++ b/agent/src/os/linux/libproc_impl.c	Wed Feb 26 02:38:46 2014 -0800
@@ -29,54 +29,51 @@
 #include <thread_db.h>
 #include "libproc_impl.h"
 
-static const char* alt_root = NULL;
-static int alt_root_len = -1;
-
 #define SA_ALTROOT "SA_ALTROOT"
 
-static void init_alt_root() {
-   if (alt_root_len == -1) {
-      alt_root = getenv(SA_ALTROOT);
-      if (alt_root) {
-         alt_root_len = strlen(alt_root);
-      } else {
-         alt_root_len = 0;
-      }
-   }
-}
+int pathmap_open(const char* name) {
+  static const char *alt_root = NULL;
+  static int alt_root_initialized = 0;
 
-int pathmap_open(const char* name) {
-   int fd;
-   char alt_path[PATH_MAX + 1];
+  int fd;
+  char alt_path[PATH_MAX + 1], *alt_path_end;
+  const char *s;
 
-   init_alt_root();
+  if (!alt_root_initialized) {
+    alt_root_initialized = -1;
+    alt_root = getenv(SA_ALTROOT);
+  }
 
-   if (alt_root_len > 0) {
-      strcpy(alt_path, alt_root);
-      strcat(alt_path, name);
-      fd = open(alt_path, O_RDONLY);
-      if (fd >= 0) {
-         print_debug("path %s substituted for %s\n", alt_path, name);
-         return fd;
-      }
+  if (alt_root == NULL) {
+    return open(name, O_RDONLY);
+  }
 
-      if (strrchr(name, '/')) {
-         strcpy(alt_path, alt_root);
-         strcat(alt_path, strrchr(name, '/'));
-         fd = open(alt_path, O_RDONLY);
-         if (fd >= 0) {
-            print_debug("path %s substituted for %s\n", alt_path, name);
-            return fd;
-         }
-      }
-   } else {
-      fd = open(name, O_RDONLY);
-      if (fd >= 0) {
-         return fd;
-      }
-   }
+  strcpy(alt_path, alt_root);
+  alt_path_end = alt_path + strlen(alt_path);
 
-   return -1;
+  // Strip path items one by one and try to open file with alt_root prepended
+  s = name;
+  while (1) {
+    strcat(alt_path, s);
+    s += 1;
+
+    fd = open(alt_path, O_RDONLY);
+    if (fd >= 0) {
+      print_debug("path %s substituted for %s\n", alt_path, name);
+      return fd;
+    }
+
+    // Linker always put full path to solib to process, so we can rely
+    // on presence of /. If slash is not present, it means, that SOlib doesn't
+    // physically exist (e.g. linux-gate.so) and we fail opening it anyway
+    if ((s = strchr(s, '/')) == NULL) {
+      break;
+    }
+
+    *alt_path_end = 0;
+  }
+
+  return -1;
 }
 
 static bool _libsaproc_debug;
--- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java	Wed Feb 26 11:29:47 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxCDebugger.java	Wed Feb 26 02:38:46 2014 -0800
@@ -55,31 +55,21 @@
     if (pc == null) {
       return null;
     }
+
+    /* Typically we have about ten loaded objects here. So no reason to do
+      sort/binary search here. Linear search gives us acceptable performance.*/
+
     List objs = getLoadObjectList();
-    Object[] arr = objs.toArray();
-    // load objects are sorted by base address, do binary search
-    int mid  = -1;
-    int low  = 0;
-    int high = arr.length - 1;
 
-    while (low <= high) {
-       mid = (low + high) >> 1;
-       LoadObject midVal = (LoadObject) arr[mid];
-       long cmp = pc.minus(midVal.getBase());
-       if (cmp < 0) {
-          high = mid - 1;
-       } else if (cmp > 0) {
-          long size = midVal.getSize();
-          if (cmp >= size) {
-             low = mid + 1;
-          } else {
-             return (LoadObject) arr[mid];
-          }
-       } else { // match found
-          return (LoadObject) arr[mid];
-       }
+    for (int i = 0; i < objs.size(); i++) {
+      LoadObject ob = (LoadObject) objs.get(i);
+      Address base = ob.getBase();
+      long size = ob.getSize();
+      if ( pc.greaterThanOrEqual(base) && pc.lessThan(base.addOffsetTo(size))) {
+        return ob;
+      }
     }
-    // no match found.
+
     return null;
   }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/AdaptiveFreeList.java	Wed Feb 26 02:38:46 2014 -0800
@@ -0,0 +1,77 @@
+/*
+ * @(#)AdaptiveFreeList.java
+ *
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.memory;
+
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObject;
+import sun.jvm.hotspot.types.CIntegerField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+
+public class AdaptiveFreeList extends VMObject {
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+      public void update(Observable o, Object data) {
+        initialize(VM.getVM().getTypeDataBase());
+      }
+    });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("AdaptiveFreeList<FreeChunk>");
+    sizeField = type.getCIntegerField("_size");
+    countField = type.getCIntegerField("_count");
+    headerSize = type.getSize();
+  }
+
+  // Fields
+  private static CIntegerField sizeField;
+  private static CIntegerField countField;
+  private static long          headerSize;
+
+  //Constructor
+  public AdaptiveFreeList(Address address) {
+    super(address);
+  }
+
+  // Accessors
+  public long size() {
+    return sizeField.getValue(addr);
+  }
+
+  public long count() {
+    return  countField.getValue(addr);
+  }
+
+  public static long sizeOf() {
+    return headerSize;
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java	Wed Feb 26 11:29:47 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/CompactibleFreeListSpace.java	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,25 +24,29 @@
 
 package sun.jvm.hotspot.memory;
 
-import java.io.*;
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.oops.*;
-import sun.jvm.hotspot.runtime.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.utilities.*;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Observable;
+import java.util.Observer;
+
+import sun.jvm.hotspot.debugger.Address;
+import sun.jvm.hotspot.debugger.Debugger;
+import sun.jvm.hotspot.oops.ObjectHeap;
+import sun.jvm.hotspot.oops.Oop;
+import sun.jvm.hotspot.runtime.VM;
+import sun.jvm.hotspot.runtime.VMObjectFactory;
+import sun.jvm.hotspot.types.AddressField;
+import sun.jvm.hotspot.types.Type;
+import sun.jvm.hotspot.types.TypeDataBase;
+import sun.jvm.hotspot.utilities.Assert;
 
 public class CompactibleFreeListSpace extends CompactibleSpace {
    private static AddressField collectorField;
-
-   // for free size, three fields
-   //       FreeBlockDictionary* _dictionary;        // ptr to dictionary for large size blocks
-   //       FreeList _indexedFreeList[IndexSetSize]; // indexed array for small size blocks
-   //       LinearAllocBlock _smallLinearAllocBlock; // small linear alloc in TLAB
    private static AddressField indexedFreeListField;
    private static AddressField dictionaryField;
    private static long         smallLinearAllocBlockFieldOffset;
-   private static long indexedFreeListSizeOf;
 
    private int    heapWordSize;     // 4 for 32bit, 8 for 64 bits
    private int    IndexSetStart;    // for small indexed list
@@ -109,11 +113,11 @@
       // small chunks
       long size = 0;
       Address cur = addr.addOffsetTo( indexedFreeListField.getOffset() );
-      cur = cur.addOffsetTo(IndexSetStart*FreeList.sizeOf());
+      cur = cur.addOffsetTo(IndexSetStart*AdaptiveFreeList.sizeOf());
       for (int i=IndexSetStart; i<IndexSetSize; i += IndexSetStride) {
-         FreeList freeList = (FreeList) VMObjectFactory.newObject(FreeList.class, cur);
+         AdaptiveFreeList freeList = (AdaptiveFreeList) VMObjectFactory.newObject(AdaptiveFreeList.class, cur);
          size += i*freeList.count();
-         cur= cur.addOffsetTo(IndexSetStride*FreeList.sizeOf());
+         cur= cur.addOffsetTo(IndexSetStride*AdaptiveFreeList.sizeOf());
       }
 
       // large block
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/FreeList.java	Wed Feb 26 11:29:47 2014 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,72 +0,0 @@
-/*
- * @(#)FreeList.java
- *
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-package sun.jvm.hotspot.memory;
-
-import java.util.*;
-import sun.jvm.hotspot.debugger.*;
-import sun.jvm.hotspot.types.*;
-import sun.jvm.hotspot.runtime.*;
-
-public class FreeList extends VMObject {
-   static {
-      VM.registerVMInitializedObserver(new Observer() {
-         public void update(Observable o, Object data) {
-            initialize(VM.getVM().getTypeDataBase());
-         }
-      });
-   }
-
-   private static synchronized void initialize(TypeDataBase db) {
-      Type type = db.lookupType("FreeList<FreeChunk>");
-      sizeField = type.getCIntegerField("_size");
-      countField = type.getCIntegerField("_count");
-      headerSize = type.getSize();
-   }
-
-   // Fields
-   private static CIntegerField sizeField;
-   private static CIntegerField countField;
-   private static long          headerSize;
-
-   //Constructor
-   public FreeList(Address address) {
-     super(address);
-   }
-
-   // Accessors
-   public long size() {
-      return sizeField.getValue(addr);
-   }
-
-   public long count() {
-      return  countField.getValue(addr);
-   }
-
-   public static long sizeOf() {
-     return headerSize;
-  }
-}
--- a/agent/src/share/classes/sun/jvm/hotspot/opto/Block.java	Wed Feb 26 11:29:47 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/opto/Block.java	Wed Feb 26 02:38:46 2014 -0800
@@ -48,7 +48,7 @@
     preOrderField = new CIntField(type.getCIntegerField("_pre_order"), 0);
     domDepthField = new CIntField(type.getCIntegerField("_dom_depth"), 0);
     idomField = type.getAddressField("_idom");
-    freqField = type.getJFloatField("_freq");
+    freqField = type.getJDoubleField("_freq");
   }
 
   private static AddressField nodesField;
@@ -57,7 +57,7 @@
   private static CIntField preOrderField;
   private static CIntField domDepthField;
   private static AddressField idomField;
-  private static JFloatField freqField;
+  private static JDoubleField freqField;
 
   public Block(Address addr) {
     super(addr);
@@ -67,8 +67,8 @@
     return (int)preOrderField.getValue(getAddress());
   }
 
-  public float freq() {
-    return (float)freqField.getValue(getAddress());
+  public double freq() {
+    return (double)freqField.getValue(getAddress());
   }
 
   public Node_List nodes() {
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Wed Feb 26 11:29:47 2014 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js	Wed Feb 26 02:38:46 2014 -0800
@@ -371,19 +371,23 @@
    return sa.dbg.lookup(dso, sym);
 }
 
+function loadObjectContainingPC(addr) {
+    if (sa.cdbg == null) {
+      // no CDebugger support, return null
+      return null;
+    }
+
+    return  sa.cdbg.loadObjectContainingPC(addr);
+}
+
 // returns the ClosestSymbol or null
 function closestSymbolFor(addr) {
-   if (sa.cdbg == null) {
-      // no CDebugger support, return null
-      return null;
-   } else {
-      var dso = sa.cdbg.loadObjectContainingPC(addr);
-      if (dso != null) {
-         return dso.closestSymbolToPC(addr);
-      } else {
-         return null;
-      }
-   }
+    var dso = loadObjectContainingPC(addr);
+    if (dso != null) {
+      return dso.closestSymbolToPC(addr);
+    }
+
+    return null;
 }
 
 // Address-to-symbol
@@ -804,6 +808,16 @@
 // VM type to SA class map
 var  vmType2Class = new Object();
 
+// C2 only classes
+try{
+  vmType2Class["ExceptionBlob"] = sapkg.code.ExceptionBlob;
+  vmType2Class["UncommonTrapBlob"] = sapkg.code.UncommonTrapBlob;
+} catch(e) {
+  // Ignore exception. C2 specific objects might be not 
+  // available in client VM
+}
+
+
 // This is *not* exhaustive. Add more if needed.
 // code blobs
 vmType2Class["BufferBlob"] = sapkg.code.BufferBlob;
@@ -812,10 +826,8 @@
 vmType2Class["SafepointBlob"] = sapkg.code.SafepointBlob;
 vmType2Class["C2IAdapter"] = sapkg.code.C2IAdapter;
 vmType2Class["DeoptimizationBlob"] = sapkg.code.DeoptimizationBlob;
-vmType2Class["ExceptionBlob"] = sapkg.code.ExceptionBlob;
 vmType2Class["I2CAdapter"] = sapkg.code.I2CAdapter;
 vmType2Class["OSRAdapter"] = sapkg.code.OSRAdapter;
-vmType2Class["UncommonTrapBlob"] = sapkg.code.UncommonTrapBlob;
 vmType2Class["PCDesc"] = sapkg.code.PCDesc;
 
 // interpreter
@@ -876,21 +888,29 @@
 
 // returns description of given pointer as a String
 function whatis(addr) {
-   addr = any2addr(addr);
-   var ptrLoc = findPtr(addr);
-   if (ptrLoc.isUnknown()) {
-      var vmType = vmTypeof(addr);
-      if (vmType != null) {
-         return "pointer to " + vmType.name;
-      } else {
-         var sym = closestSymbolFor(addr);
-         if (sym != null) {
-            return sym.name + '+' + sym.offset;
-         } else {
-            return ptrLoc.toString();
-         }
-      }
-   } else {
-      return ptrLoc.toString();
-   }
+  addr = any2addr(addr);
+  var ptrLoc = findPtr(addr);
+  if (!ptrLoc.isUnknown()) {
+    return ptrLoc.toString();
+  }
+
+  var vmType = vmTypeof(addr);
+  if (vmType != null) {
+    return "pointer to " + vmType.name;
+  }
+
+  var dso = loadObjectContainingPC(addr);
+  if (dso == null) {
+    return ptrLoc.toString();
+  }
+
+  var sym = dso.closestSymbolToPC(addr);
+  if (sym != null) {
+    return sym.name + '+' + sym.offset;
+  }
+
+  var s = dso.getName();
+  var p = s.lastIndexOf("/");
+  var base = dso.getBase();
+  return s.substring(p+1, s.length) + '+' + addr.minus(base);
 }
--- a/make/bsd/makefiles/gcc.make	Wed Feb 26 11:29:47 2014 +0100
+++ b/make/bsd/makefiles/gcc.make	Wed Feb 26 02:38:46 2014 -0800
@@ -260,7 +260,7 @@
   WARNINGS_ARE_ERRORS += -Wno-empty-body
 endif
 
-WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value
+WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wformat=2 -Wno-error=format-nonliteral
 
 ifeq ($(USE_CLANG),)
   # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
@@ -289,7 +289,7 @@
 # The flags to use for an Optimized g++ build
 ifeq ($(OS_VENDOR), Darwin)
   # use -Os by default, unless -O3 can be proved to be worth the cost, as per policy
-  # <http://wikis.sun.com/display/OpenJDK/Mac+OS+X+Port+Compilers>
+  # <https://wiki.openjdk.java.net/display/MacOSXPort/Compiler+Errata>
   OPT_CFLAGS_DEFAULT ?= SIZE
 else
   OPT_CFLAGS_DEFAULT ?= SPEED
--- a/make/linux/makefiles/gcc.make	Wed Feb 26 11:29:47 2014 +0100
+++ b/make/linux/makefiles/gcc.make	Wed Feb 26 02:38:46 2014 -0800
@@ -215,7 +215,7 @@
   WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
 endif
 
-WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value
+WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value -Wformat=2 -Wno-error=format-nonliteral
 
 ifeq ($(USE_CLANG),)
   # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
--- a/make/solaris/makefiles/gcc.make	Wed Feb 26 11:29:47 2014 +0100
+++ b/make/solaris/makefiles/gcc.make	Wed Feb 26 02:38:46 2014 -0800
@@ -118,7 +118,7 @@
 # Compiler warnings are treated as errors 
 WARNINGS_ARE_ERRORS = -Werror 
 # Enable these warnings. See 'info gcc' about details on these options
-WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
+WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef -Wformat=2 -Wno-error=format-nonliteral
 CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
 # Special cases 
 CFLAGS_WARN/BYFILE = $(CFLAGS_WARN/$@)$(CFLAGS_WARN/DEFAULT$(CFLAGS_WARN/$@))  
--- a/src/cpu/ppc/vm/ppc.ad	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/cpu/ppc/vm/ppc.ad	Wed Feb 26 02:38:46 2014 -0800
@@ -2076,6 +2076,8 @@
     return false;
 
   switch (opcode) {
+  case Op_SqrtD:
+    return VM_Version::has_fsqrt();
   case Op_CountLeadingZerosI:
   case Op_CountLeadingZerosL:
   case Op_CountTrailingZerosI:
@@ -8740,7 +8742,7 @@
   ins_pipe(pipe_class_default);
 %}
 
-// VM_Version::has_sqrt() decides if this node will be used.
+// VM_Version::has_fsqrt() decides if this node will be used.
 // Sqrt float double precision
 instruct sqrtD_reg(regD dst, regD src) %{
   match(Set dst (SqrtD src));
--- a/src/cpu/sparc/vm/sparc.ad	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/cpu/sparc/vm/sparc.ad	Wed Feb 26 02:38:46 2014 -0800
@@ -2037,19 +2037,6 @@
   return L7_REGP_mask();
 }
 
-const RegMask Matcher::mathExactI_result_proj_mask() {
-  return G1_REGI_mask();
-}
-
-const RegMask Matcher::mathExactL_result_proj_mask() {
-  return G1_REGL_mask();
-}
-
-const RegMask Matcher::mathExactI_flags_proj_mask() {
-  return INT_FLAGS_mask();
-}
-
-
 %}
 
 
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -98,217 +98,6 @@
   return Address::make_array(adr);
 }
 
-int MacroAssembler::biased_locking_enter(Register lock_reg,
-                                         Register obj_reg,
-                                         Register swap_reg,
-                                         Register tmp_reg,
-                                         bool swap_reg_contains_mark,
-                                         Label& done,
-                                         Label* slow_case,
-                                         BiasedLockingCounters* counters) {
-  assert(UseBiasedLocking, "why call this otherwise?");
-  assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
-  assert_different_registers(lock_reg, obj_reg, swap_reg);
-
-  if (PrintBiasedLockingStatistics && counters == NULL)
-    counters = BiasedLocking::counters();
-
-  bool need_tmp_reg = false;
-  if (tmp_reg == noreg) {
-    need_tmp_reg = true;
-    tmp_reg = lock_reg;
-  } else {
-    assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
-  }
-  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
-  Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
-  Address klass_addr     (obj_reg, oopDesc::klass_offset_in_bytes());
-  Address saved_mark_addr(lock_reg, 0);
-
-  // Biased locking
-  // See whether the lock is currently biased toward our thread and
-  // whether the epoch is still valid
-  // Note that the runtime guarantees sufficient alignment of JavaThread
-  // pointers to allow age to be placed into low bits
-  // First check to see whether biasing is even enabled for this object
-  Label cas_label;
-  int null_check_offset = -1;
-  if (!swap_reg_contains_mark) {
-    null_check_offset = offset();
-    movl(swap_reg, mark_addr);
-  }
-  if (need_tmp_reg) {
-    push(tmp_reg);
-  }
-  movl(tmp_reg, swap_reg);
-  andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
-  cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
-  if (need_tmp_reg) {
-    pop(tmp_reg);
-  }
-  jcc(Assembler::notEqual, cas_label);
-  // The bias pattern is present in the object's header. Need to check
-  // whether the bias owner and the epoch are both still current.
-  // Note that because there is no current thread register on x86 we
-  // need to store off the mark word we read out of the object to
-  // avoid reloading it and needing to recheck invariants below. This
-  // store is unfortunate but it makes the overall code shorter and
-  // simpler.
-  movl(saved_mark_addr, swap_reg);
-  if (need_tmp_reg) {
-    push(tmp_reg);
-  }
-  get_thread(tmp_reg);
-  xorl(swap_reg, tmp_reg);
-  if (swap_reg_contains_mark) {
-    null_check_offset = offset();
-  }
-  movl(tmp_reg, klass_addr);
-  xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset()));
-  andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
-  if (need_tmp_reg) {
-    pop(tmp_reg);
-  }
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address)counters->biased_lock_entry_count_addr()));
-  }
-  jcc(Assembler::equal, done);
-
-  Label try_revoke_bias;
-  Label try_rebias;
-
-  // At this point we know that the header has the bias pattern and
-  // that we are not the bias owner in the current epoch. We need to
-  // figure out more details about the state of the header in order to
-  // know what operations can be legally performed on the object's
-  // header.
-
-  // If the low three bits in the xor result aren't clear, that means
-  // the prototype header is no longer biased and we have to revoke
-  // the bias on this object.
-  testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
-  jcc(Assembler::notZero, try_revoke_bias);
-
-  // Biasing is still enabled for this data type. See whether the
-  // epoch of the current bias is still valid, meaning that the epoch
-  // bits of the mark word are equal to the epoch bits of the
-  // prototype header. (Note that the prototype header's epoch bits
-  // only change at a safepoint.) If not, attempt to rebias the object
-  // toward the current thread. Note that we must be absolutely sure
-  // that the current epoch is invalid in order to do this because
-  // otherwise the manipulations it performs on the mark word are
-  // illegal.
-  testl(swap_reg, markOopDesc::epoch_mask_in_place);
-  jcc(Assembler::notZero, try_rebias);
-
-  // The epoch of the current bias is still valid but we know nothing
-  // about the owner; it might be set or it might be clear. Try to
-  // acquire the bias of the object using an atomic operation. If this
-  // fails we will go in to the runtime to revoke the object's bias.
-  // Note that we first construct the presumed unbiased header so we
-  // don't accidentally blow away another thread's valid bias.
-  movl(swap_reg, saved_mark_addr);
-  andl(swap_reg,
-       markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
-  if (need_tmp_reg) {
-    push(tmp_reg);
-  }
-  get_thread(tmp_reg);
-  orl(tmp_reg, swap_reg);
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgptr(tmp_reg, Address(obj_reg, 0));
-  if (need_tmp_reg) {
-    pop(tmp_reg);
-  }
-  // If the biasing toward our thread failed, this means that
-  // another thread succeeded in biasing it toward itself and we
-  // need to revoke that bias. The revocation will occur in the
-  // interpreter runtime in the slow case.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
-  }
-  if (slow_case != NULL) {
-    jcc(Assembler::notZero, *slow_case);
-  }
-  jmp(done);
-
-  bind(try_rebias);
-  // At this point we know the epoch has expired, meaning that the
-  // current "bias owner", if any, is actually invalid. Under these
-  // circumstances _only_, we are allowed to use the current header's
-  // value as the comparison value when doing the cas to acquire the
-  // bias in the current epoch. In other words, we allow transfer of
-  // the bias from one thread to another directly in this situation.
-  //
-  // FIXME: due to a lack of registers we currently blow away the age
-  // bits in this situation. Should attempt to preserve them.
-  if (need_tmp_reg) {
-    push(tmp_reg);
-  }
-  get_thread(tmp_reg);
-  movl(swap_reg, klass_addr);
-  orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset()));
-  movl(swap_reg, saved_mark_addr);
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgptr(tmp_reg, Address(obj_reg, 0));
-  if (need_tmp_reg) {
-    pop(tmp_reg);
-  }
-  // If the biasing toward our thread failed, then another thread
-  // succeeded in biasing it toward itself and we need to revoke that
-  // bias. The revocation will occur in the runtime in the slow case.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
-  }
-  if (slow_case != NULL) {
-    jcc(Assembler::notZero, *slow_case);
-  }
-  jmp(done);
-
-  bind(try_revoke_bias);
-  // The prototype mark in the klass doesn't have the bias bit set any
-  // more, indicating that objects of this data type are not supposed
-  // to be biased any more. We are going to try to reset the mark of
-  // this object to the prototype value and fall through to the
-  // CAS-based locking scheme. Note that if our CAS fails, it means
-  // that another thread raced us for the privilege of revoking the
-  // bias of this particular object, so it's okay to continue in the
-  // normal locking code.
-  //
-  // FIXME: due to a lack of registers we currently blow away the age
-  // bits in this situation. Should attempt to preserve them.
-  movl(swap_reg, saved_mark_addr);
-  if (need_tmp_reg) {
-    push(tmp_reg);
-  }
-  movl(tmp_reg, klass_addr);
-  movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset()));
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgptr(tmp_reg, Address(obj_reg, 0));
-  if (need_tmp_reg) {
-    pop(tmp_reg);
-  }
-  // Fall through to the normal CAS-based lock, because no matter what
-  // the result of the above CAS, some thread must have succeeded in
-  // removing the bias bit from the object's header.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
-  }
-
-  bind(cas_label);
-
-  return null_check_offset;
-}
 void MacroAssembler::call_VM_leaf_base(address entry_point,
                                        int number_of_arguments) {
   call(RuntimeAddress(entry_point));
@@ -726,165 +515,6 @@
   return array;
 }
 
-int MacroAssembler::biased_locking_enter(Register lock_reg,
-                                         Register obj_reg,
-                                         Register swap_reg,
-                                         Register tmp_reg,
-                                         bool swap_reg_contains_mark,
-                                         Label& done,
-                                         Label* slow_case,
-                                         BiasedLockingCounters* counters) {
-  assert(UseBiasedLocking, "why call this otherwise?");
-  assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
-  assert(tmp_reg != noreg, "tmp_reg must be supplied");
-  assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
-  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
-  Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
-  Address saved_mark_addr(lock_reg, 0);
-
-  if (PrintBiasedLockingStatistics && counters == NULL)
-    counters = BiasedLocking::counters();
-
-  // Biased locking
-  // See whether the lock is currently biased toward our thread and
-  // whether the epoch is still valid
-  // Note that the runtime guarantees sufficient alignment of JavaThread
-  // pointers to allow age to be placed into low bits
-  // First check to see whether biasing is even enabled for this object
-  Label cas_label;
-  int null_check_offset = -1;
-  if (!swap_reg_contains_mark) {
-    null_check_offset = offset();
-    movq(swap_reg, mark_addr);
-  }
-  movq(tmp_reg, swap_reg);
-  andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
-  cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
-  jcc(Assembler::notEqual, cas_label);
-  // The bias pattern is present in the object's header. Need to check
-  // whether the bias owner and the epoch are both still current.
-  load_prototype_header(tmp_reg, obj_reg);
-  orq(tmp_reg, r15_thread);
-  xorq(tmp_reg, swap_reg);
-  andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
-  }
-  jcc(Assembler::equal, done);
-
-  Label try_revoke_bias;
-  Label try_rebias;
-
-  // At this point we know that the header has the bias pattern and
-  // that we are not the bias owner in the current epoch. We need to
-  // figure out more details about the state of the header in order to
-  // know what operations can be legally performed on the object's
-  // header.
-
-  // If the low three bits in the xor result aren't clear, that means
-  // the prototype header is no longer biased and we have to revoke
-  // the bias on this object.
-  testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
-  jcc(Assembler::notZero, try_revoke_bias);
-
-  // Biasing is still enabled for this data type. See whether the
-  // epoch of the current bias is still valid, meaning that the epoch
-  // bits of the mark word are equal to the epoch bits of the
-  // prototype header. (Note that the prototype header's epoch bits
-  // only change at a safepoint.) If not, attempt to rebias the object
-  // toward the current thread. Note that we must be absolutely sure
-  // that the current epoch is invalid in order to do this because
-  // otherwise the manipulations it performs on the mark word are
-  // illegal.
-  testq(tmp_reg, markOopDesc::epoch_mask_in_place);
-  jcc(Assembler::notZero, try_rebias);
-
-  // The epoch of the current bias is still valid but we know nothing
-  // about the owner; it might be set or it might be clear. Try to
-  // acquire the bias of the object using an atomic operation. If this
-  // fails we will go in to the runtime to revoke the object's bias.
-  // Note that we first construct the presumed unbiased header so we
-  // don't accidentally blow away another thread's valid bias.
-  andq(swap_reg,
-       markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
-  movq(tmp_reg, swap_reg);
-  orq(tmp_reg, r15_thread);
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgq(tmp_reg, Address(obj_reg, 0));
-  // If the biasing toward our thread failed, this means that
-  // another thread succeeded in biasing it toward itself and we
-  // need to revoke that bias. The revocation will occur in the
-  // interpreter runtime in the slow case.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
-  }
-  if (slow_case != NULL) {
-    jcc(Assembler::notZero, *slow_case);
-  }
-  jmp(done);
-
-  bind(try_rebias);
-  // At this point we know the epoch has expired, meaning that the
-  // current "bias owner", if any, is actually invalid. Under these
-  // circumstances _only_, we are allowed to use the current header's
-  // value as the comparison value when doing the cas to acquire the
-  // bias in the current epoch. In other words, we allow transfer of
-  // the bias from one thread to another directly in this situation.
-  //
-  // FIXME: due to a lack of registers we currently blow away the age
-  // bits in this situation. Should attempt to preserve them.
-  load_prototype_header(tmp_reg, obj_reg);
-  orq(tmp_reg, r15_thread);
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgq(tmp_reg, Address(obj_reg, 0));
-  // If the biasing toward our thread failed, then another thread
-  // succeeded in biasing it toward itself and we need to revoke that
-  // bias. The revocation will occur in the runtime in the slow case.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
-  }
-  if (slow_case != NULL) {
-    jcc(Assembler::notZero, *slow_case);
-  }
-  jmp(done);
-
-  bind(try_revoke_bias);
-  // The prototype mark in the klass doesn't have the bias bit set any
-  // more, indicating that objects of this data type are not supposed
-  // to be biased any more. We are going to try to reset the mark of
-  // this object to the prototype value and fall through to the
-  // CAS-based locking scheme. Note that if our CAS fails, it means
-  // that another thread raced us for the privilege of revoking the
-  // bias of this particular object, so it's okay to continue in the
-  // normal locking code.
-  //
-  // FIXME: due to a lack of registers we currently blow away the age
-  // bits in this situation. Should attempt to preserve them.
-  load_prototype_header(tmp_reg, obj_reg);
-  if (os::is_MP()) {
-    lock();
-  }
-  cmpxchgq(tmp_reg, Address(obj_reg, 0));
-  // Fall through to the normal CAS-based lock, because no matter what
-  // the result of the above CAS, some thread must have succeeded in
-  // removing the bias bit from the object's header.
-  if (counters != NULL) {
-    cond_inc32(Assembler::zero,
-               ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
-  }
-
-  bind(cas_label);
-
-  return null_check_offset;
-}
-
 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
   Label L, E;
 
@@ -1360,9 +990,16 @@
 
 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
   pushf();
-  if (os::is_MP())
-    lock();
-  incrementl(counter_addr);
+  if (reachable(counter_addr)) {
+    if (os::is_MP())
+      lock();
+    incrementl(as_Address(counter_addr));
+  } else {
+    lea(rscratch1, counter_addr);
+    if (os::is_MP())
+      lock();
+    incrementl(Address(rscratch1, 0));
+  }
   popf();
 }
 
@@ -1393,6 +1030,234 @@
   }
 }
 
+int MacroAssembler::biased_locking_enter(Register lock_reg,
+                                         Register obj_reg,
+                                         Register swap_reg,
+                                         Register tmp_reg,
+                                         bool swap_reg_contains_mark,
+                                         Label& done,
+                                         Label* slow_case,
+                                         BiasedLockingCounters* counters) {
+  assert(UseBiasedLocking, "why call this otherwise?");
+  assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
+  LP64_ONLY( assert(tmp_reg != noreg, "tmp_reg must be supplied"); )
+  bool need_tmp_reg = false;
+  if (tmp_reg == noreg) {
+    need_tmp_reg = true;
+    tmp_reg = lock_reg;
+    assert_different_registers(lock_reg, obj_reg, swap_reg);
+  } else {
+    assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
+  }
+  assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+  Address mark_addr      (obj_reg, oopDesc::mark_offset_in_bytes());
+  Address saved_mark_addr(lock_reg, 0);
+
+  if (PrintBiasedLockingStatistics && counters == NULL) {
+    counters = BiasedLocking::counters();
+  }
+  // Biased locking
+  // See whether the lock is currently biased toward our thread and
+  // whether the epoch is still valid
+  // Note that the runtime guarantees sufficient alignment of JavaThread
+  // pointers to allow age to be placed into low bits
+  // First check to see whether biasing is even enabled for this object
+  Label cas_label;
+  int null_check_offset = -1;
+  if (!swap_reg_contains_mark) {
+    null_check_offset = offset();
+    movptr(swap_reg, mark_addr);
+  }
+  if (need_tmp_reg) {
+    push(tmp_reg);
+  }
+  movptr(tmp_reg, swap_reg);
+  andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
+  cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
+  if (need_tmp_reg) {
+    pop(tmp_reg);
+  }
+  jcc(Assembler::notEqual, cas_label);
+  // The bias pattern is present in the object's header. Need to check
+  // whether the bias owner and the epoch are both still current.
+#ifndef _LP64
+  // Note that because there is no current thread register on x86_32 we
+  // need to store off the mark word we read out of the object to
+  // avoid reloading it and needing to recheck invariants below. This
+  // store is unfortunate but it makes the overall code shorter and
+  // simpler.
+  movptr(saved_mark_addr, swap_reg);
+#endif
+  if (need_tmp_reg) {
+    push(tmp_reg);
+  }
+  if (swap_reg_contains_mark) {
+    null_check_offset = offset();
+  }
+  load_prototype_header(tmp_reg, obj_reg);
+#ifdef _LP64
+  orptr(tmp_reg, r15_thread);
+  xorptr(tmp_reg, swap_reg);
+  Register header_reg = tmp_reg;
+#else
+  xorptr(tmp_reg, swap_reg);
+  get_thread(swap_reg);
+  xorptr(swap_reg, tmp_reg);
+  Register header_reg = swap_reg;
+#endif
+  andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
+  if (need_tmp_reg) {
+    pop(tmp_reg);
+  }
+  if (counters != NULL) {
+    cond_inc32(Assembler::zero,
+               ExternalAddress((address) counters->biased_lock_entry_count_addr()));
+  }
+  jcc(Assembler::equal, done);
+
+  Label try_revoke_bias;
+  Label try_rebias;
+
+  // At this point we know that the header has the bias pattern and
+  // that we are not the bias owner in the current epoch. We need to
+  // figure out more details about the state of the header in order to
+  // know what operations can be legally performed on the object's
+  // header.
+
+  // If the low three bits in the xor result aren't clear, that means
+  // the prototype header is no longer biased and we have to revoke
+  // the bias on this object.
+  testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
+  jccb(Assembler::notZero, try_revoke_bias);
+
+  // Biasing is still enabled for this data type. See whether the
+  // epoch of the current bias is still valid, meaning that the epoch
+  // bits of the mark word are equal to the epoch bits of the
+  // prototype header. (Note that the prototype header's epoch bits
+  // only change at a safepoint.) If not, attempt to rebias the object
+  // toward the current thread. Note that we must be absolutely sure
+  // that the current epoch is invalid in order to do this because
+  // otherwise the manipulations it performs on the mark word are
+  // illegal.
+  testptr(header_reg, markOopDesc::epoch_mask_in_place);
+  jccb(Assembler::notZero, try_rebias);
+
+  // The epoch of the current bias is still valid but we know nothing
+  // about the owner; it might be set or it might be clear. Try to
+  // acquire the bias of the object using an atomic operation. If this
+  // fails we will go in to the runtime to revoke the object's bias.
+  // Note that we first construct the presumed unbiased header so we
+  // don't accidentally blow away another thread's valid bias.
+  NOT_LP64( movptr(swap_reg, saved_mark_addr); )
+  andptr(swap_reg,
+         markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+  if (need_tmp_reg) {
+    push(tmp_reg);
+  }
+#ifdef _LP64
+  movptr(tmp_reg, swap_reg);
+  orptr(tmp_reg, r15_thread);
+#else
+  get_thread(tmp_reg);
+  orptr(tmp_reg, swap_reg);
+#endif
+  if (os::is_MP()) {
+    lock();
+  }
+  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
+  if (need_tmp_reg) {
+    pop(tmp_reg);
+  }
+  // If the biasing toward our thread failed, this means that
+  // another thread succeeded in biasing it toward itself and we
+  // need to revoke that bias. The revocation will occur in the
+  // interpreter runtime in the slow case.
+  if (counters != NULL) {
+    cond_inc32(Assembler::zero,
+               ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
+  }
+  if (slow_case != NULL) {
+    jcc(Assembler::notZero, *slow_case);
+  }
+  jmp(done);
+
+  bind(try_rebias);
+  // At this point we know the epoch has expired, meaning that the
+  // current "bias owner", if any, is actually invalid. Under these
+  // circumstances _only_, we are allowed to use the current header's
+  // value as the comparison value when doing the cas to acquire the
+  // bias in the current epoch. In other words, we allow transfer of
+  // the bias from one thread to another directly in this situation.
+  //
+  // FIXME: due to a lack of registers we currently blow away the age
+  // bits in this situation. Should attempt to preserve them.
+  if (need_tmp_reg) {
+    push(tmp_reg);
+  }
+  load_prototype_header(tmp_reg, obj_reg);
+#ifdef _LP64
+  orptr(tmp_reg, r15_thread);
+#else
+  get_thread(swap_reg);
+  orptr(tmp_reg, swap_reg);
+  movptr(swap_reg, saved_mark_addr);
+#endif
+  if (os::is_MP()) {
+    lock();
+  }
+  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
+  if (need_tmp_reg) {
+    pop(tmp_reg);
+  }
+  // If the biasing toward our thread failed, then another thread
+  // succeeded in biasing it toward itself and we need to revoke that
+  // bias. The revocation will occur in the runtime in the slow case.
+  if (counters != NULL) {
+    cond_inc32(Assembler::zero,
+               ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
+  }
+  if (slow_case != NULL) {
+    jcc(Assembler::notZero, *slow_case);
+  }
+  jmp(done);
+
+  bind(try_revoke_bias);
+  // The prototype mark in the klass doesn't have the bias bit set any
+  // more, indicating that objects of this data type are not supposed
+  // to be biased any more. We are going to try to reset the mark of
+  // this object to the prototype value and fall through to the
+  // CAS-based locking scheme. Note that if our CAS fails, it means
+  // that another thread raced us for the privilege of revoking the
+  // bias of this particular object, so it's okay to continue in the
+  // normal locking code.
+  //
+  // FIXME: due to a lack of registers we currently blow away the age
+  // bits in this situation. Should attempt to preserve them.
+  NOT_LP64( movptr(swap_reg, saved_mark_addr); )
+  if (need_tmp_reg) {
+    push(tmp_reg);
+  }
+  load_prototype_header(tmp_reg, obj_reg);
+  if (os::is_MP()) {
+    lock();
+  }
+  cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
+  if (need_tmp_reg) {
+    pop(tmp_reg);
+  }
+  // Fall through to the normal CAS-based lock, because no matter what
+  // the result of the above CAS, some thread must have succeeded in
+  // removing the bias bit from the object's header.
+  if (counters != NULL) {
+    cond_inc32(Assembler::zero,
+               ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
+  }
+
+  bind(cas_label);
+
+  return null_check_offset;
+}
+
 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
   assert(UseBiasedLocking, "why call this otherwise?");
 
@@ -1408,6 +1273,620 @@
   jcc(Assembler::equal, done);
 }
 
+#ifdef COMPILER2
+// Fast_Lock and Fast_Unlock used by C2
+
+// Because the transitions from emitted code to the runtime
+// monitorenter/exit helper stubs are so slow it's critical that
+// we inline both the stack-locking fast-path and the inflated fast path.
+//
+// See also: cmpFastLock and cmpFastUnlock.
+//
+// What follows is a specialized inline transliteration of the code
+// in slow_enter() and slow_exit().  If we're concerned about I$ bloat
+// another option would be to emit TrySlowEnter and TrySlowExit methods
+// at startup-time.  These methods would accept arguments as
+// (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
+// indications in the icc.ZFlag.  Fast_Lock and Fast_Unlock would simply
+// marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
+// In practice, however, the # of lock sites is bounded and is usually small.
+// Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
+// if the processor uses simple bimodal branch predictors keyed by EIP
+// Since the helper routines would be called from multiple synchronization
+// sites.
+//
+// An even better approach would be write "MonitorEnter()" and "MonitorExit()"
+// in java - using j.u.c and unsafe - and just bind the lock and unlock sites
+// to those specialized methods.  That'd give us a mostly platform-independent
+// implementation that the JITs could optimize and inline at their pleasure.
+// Done correctly, the only time we'd need to cross to native could would be
+// to park() or unpark() threads.  We'd also need a few more unsafe operators
+// to (a) prevent compiler-JIT reordering of non-volatile accesses, and
+// (b) explicit barriers or fence operations.
+//
+// TODO:
+//
+// *  Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
+//    This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
+//    Given TLAB allocation, Self is usually manifested in a register, so passing it into
+//    the lock operators would typically be faster than reifying Self.
+//
+// *  Ideally I'd define the primitives as:
+//       fast_lock   (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
+//       fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
+//    Unfortunately ADLC bugs prevent us from expressing the ideal form.
+//    Instead, we're stuck with a rather awkward and brittle register assignments below.
+//    Furthermore the register assignments are overconstrained, possibly resulting in
+//    sub-optimal code near the synchronization site.
+//
+// *  Eliminate the sp-proximity tests and just use "== Self" tests instead.
+//    Alternately, use a better sp-proximity test.
+//
+// *  Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
+//    Either one is sufficient to uniquely identify a thread.
+//    TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
+//
+// *  Intrinsify notify() and notifyAll() for the common cases where the
+//    object is locked by the calling thread but the waitlist is empty.
+//    avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
+//
+// *  use jccb and jmpb instead of jcc and jmp to improve code density.
+//    But beware of excessive branch density on AMD Opterons.
+//
+// *  Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
+//    or failure of the fast-path.  If the fast-path fails then we pass
+//    control to the slow-path, typically in C.  In Fast_Lock and
+//    Fast_Unlock we often branch to DONE_LABEL, just to find that C2
+//    will emit a conditional branch immediately after the node.
+//    So we have branches to branches and lots of ICC.ZF games.
+//    Instead, it might be better to have C2 pass a "FailureLabel"
+//    into Fast_Lock and Fast_Unlock.  In the case of success, control
+//    will drop through the node.  ICC.ZF is undefined at exit.
+//    In the case of failure, the node will branch directly to the
+//    FailureLabel
+
+
+// obj: object to lock
+// box: on-stack box address (displaced header location) - KILLED
+// rax,: tmp -- KILLED
+// scr: tmp -- KILLED
+void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg, Register scrReg, BiasedLockingCounters* counters) {
+  // Ensure the register assignents are disjoint
+  guarantee (objReg != boxReg, "");
+  guarantee (objReg != tmpReg, "");
+  guarantee (objReg != scrReg, "");
+  guarantee (boxReg != tmpReg, "");
+  guarantee (boxReg != scrReg, "");
+  guarantee (tmpReg == rax, "");
+
+  if (counters != NULL) {
+    atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()));
+  }
+  if (EmitSync & 1) {
+      // set box->dhw = unused_mark (3)
+      // Force all sync thru slow-path: slow_enter() and slow_exit()
+      movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+      cmpptr (rsp, (int32_t)NULL_WORD);
+  } else
+  if (EmitSync & 2) {
+      Label DONE_LABEL ;
+      if (UseBiasedLocking) {
+         // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
+         biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
+      }
+
+      movptr(tmpReg, Address(objReg, 0));           // fetch markword
+      orptr (tmpReg, 0x1);
+      movptr(Address(boxReg, 0), tmpReg);           // Anticipate successful CAS
+      if (os::is_MP()) {
+        lock();
+      }
+      cmpxchgptr(boxReg, Address(objReg, 0));       // Updates tmpReg
+      jccb(Assembler::equal, DONE_LABEL);
+      // Recursive locking
+      subptr(tmpReg, rsp);
+      andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
+      movptr(Address(boxReg, 0), tmpReg);
+      bind(DONE_LABEL);
+  } else {
+    // Possible cases that we'll encounter in fast_lock
+    // ------------------------------------------------
+    // * Inflated
+    //    -- unlocked
+    //    -- Locked
+    //       = by self
+    //       = by other
+    // * biased
+    //    -- by Self
+    //    -- by other
+    // * neutral
+    // * stack-locked
+    //    -- by self
+    //       = sp-proximity test hits
+    //       = sp-proximity test generates false-negative
+    //    -- by other
+    //
+
+    Label IsInflated, DONE_LABEL;
+
+    // it's stack-locked, biased or neutral
+    // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
+    // order to reduce the number of conditional branches in the most common cases.
+    // Beware -- there's a subtle invariant that fetch of the markword
+    // at [FETCH], below, will never observe a biased encoding (*101b).
+    // If this invariant is not held we risk exclusion (safety) failure.
+    if (UseBiasedLocking && !UseOptoBiasInlining) {
+      biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
+    }
+
+    movptr(tmpReg, Address(objReg, 0));          // [FETCH]
+    testl (tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
+    jccb  (Assembler::notZero, IsInflated);
+
+    // Attempt stack-locking ...
+    orptr (tmpReg, 0x1);
+    movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
+    if (os::is_MP()) {
+      lock();
+    }
+    cmpxchgptr(boxReg, Address(objReg, 0));      // Updates tmpReg
+    if (counters != NULL) {
+      cond_inc32(Assembler::equal,
+                 ExternalAddress((address)counters->fast_path_entry_count_addr()));
+    }
+    jccb(Assembler::equal, DONE_LABEL);
+
+    // Recursive locking
+    subptr(tmpReg, rsp);
+    andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
+    movptr(Address(boxReg, 0), tmpReg);
+    if (counters != NULL) {
+      cond_inc32(Assembler::equal,
+                 ExternalAddress((address)counters->fast_path_entry_count_addr()));
+    }
+    jmpb(DONE_LABEL);
+
+    bind(IsInflated);
+#ifndef _LP64
+    // The object is inflated.
+    //
+    // TODO-FIXME: eliminate the ugly use of manifest constants:
+    //   Use markOopDesc::monitor_value instead of "2".
+    //   use markOop::unused_mark() instead of "3".
+    // The tmpReg value is an objectMonitor reference ORed with
+    // markOopDesc::monitor_value (2).   We can either convert tmpReg to an
+    // objectmonitor pointer by masking off the "2" bit or we can just
+    // use tmpReg as an objectmonitor pointer but bias the objectmonitor
+    // field offsets with "-2" to compensate for and annul the low-order tag bit.
+    //
+    // I use the latter as it avoids AGI stalls.
+    // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]"
+    // instead of "mov r, [tmpReg+OFFSETOF(Owner)]".
+    //
+    #define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2)
+
+    // boxReg refers to the on-stack BasicLock in the current frame.
+    // We'd like to write:
+    //   set box->_displaced_header = markOop::unused_mark().  Any non-0 value suffices.
+    // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
+    // additional latency as we have another ST in the store buffer that must drain.
+
+    if (EmitSync & 8192) {
+       movptr(Address(boxReg, 0), 3);            // results in ST-before-CAS penalty
+       get_thread (scrReg);
+       movptr(boxReg, tmpReg);                    // consider: LEA box, [tmp-2]
+       movptr(tmpReg, NULL_WORD);                 // consider: xor vs mov
+       if (os::is_MP()) {
+         lock();
+       }
+       cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    } else
+    if ((EmitSync & 128) == 0) {                      // avoid ST-before-CAS
+       movptr(scrReg, boxReg);
+       movptr(boxReg, tmpReg);                   // consider: LEA box, [tmp-2]
+
+       // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
+       if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
+          // prefetchw [eax + Offset(_owner)-2]
+          prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       }
+
+       if ((EmitSync & 64) == 0) {
+         // Optimistic form: consider XORL tmpReg,tmpReg
+         movptr(tmpReg, NULL_WORD);
+       } else {
+         // Can suffer RTS->RTO upgrades on shared or cold $ lines
+         // Test-And-CAS instead of CAS
+         movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));   // rax, = m->_owner
+         testptr(tmpReg, tmpReg);                   // Locked ?
+         jccb  (Assembler::notZero, DONE_LABEL);
+       }
+
+       // Appears unlocked - try to swing _owner from null to non-null.
+       // Ideally, I'd manifest "Self" with get_thread and then attempt
+       // to CAS the register containing Self into m->Owner.
+       // But we don't have enough registers, so instead we can either try to CAS
+       // rsp or the address of the box (in scr) into &m->owner.  If the CAS succeeds
+       // we later store "Self" into m->Owner.  Transiently storing a stack address
+       // (rsp or the address of the box) into  m->owner is harmless.
+       // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
+       if (os::is_MP()) {
+         lock();
+       }
+       cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       movptr(Address(scrReg, 0), 3);          // box->_displaced_header = 3
+       jccb  (Assembler::notZero, DONE_LABEL);
+       get_thread (scrReg);                    // beware: clobbers ICCs
+       movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg);
+       xorptr(boxReg, boxReg);                 // set icc.ZFlag = 1 to indicate success
+
+       // If the CAS fails we can either retry or pass control to the slow-path.
+       // We use the latter tactic.
+       // Pass the CAS result in the icc.ZFlag into DONE_LABEL
+       // If the CAS was successful ...
+       //   Self has acquired the lock
+       //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
+       // Intentional fall-through into DONE_LABEL ...
+    } else {
+       movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark()));  // results in ST-before-CAS penalty
+       movptr(boxReg, tmpReg);
+
+       // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
+       if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
+          // prefetchw [eax + Offset(_owner)-2]
+          prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       }
+
+       if ((EmitSync & 64) == 0) {
+         // Optimistic form
+         xorptr  (tmpReg, tmpReg);
+       } else {
+         // Can suffer RTS->RTO upgrades on shared or cold $ lines
+         movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));   // rax, = m->_owner
+         testptr(tmpReg, tmpReg);                   // Locked ?
+         jccb  (Assembler::notZero, DONE_LABEL);
+       }
+
+       // Appears unlocked - try to swing _owner from null to non-null.
+       // Use either "Self" (in scr) or rsp as thread identity in _owner.
+       // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
+       get_thread (scrReg);
+       if (os::is_MP()) {
+         lock();
+       }
+       cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+
+       // If the CAS fails we can either retry or pass control to the slow-path.
+       // We use the latter tactic.
+       // Pass the CAS result in the icc.ZFlag into DONE_LABEL
+       // If the CAS was successful ...
+       //   Self has acquired the lock
+       //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
+       // Intentional fall-through into DONE_LABEL ...
+    }
+#else // _LP64
+    // It's inflated
+
+    // TODO: someday avoid the ST-before-CAS penalty by
+    // relocating (deferring) the following ST.
+    // We should also think about trying a CAS without having
+    // fetched _owner.  If the CAS is successful we may
+    // avoid an RTO->RTS upgrade on the $line.
+
+    // Without cast to int32_t a movptr will destroy r10 which is typically obj
+    movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
+
+    mov    (boxReg, tmpReg);
+    movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    testptr(tmpReg, tmpReg);
+    jccb   (Assembler::notZero, DONE_LABEL);
+
+    // It's inflated and appears unlocked
+    if (os::is_MP()) {
+      lock();
+    }
+    cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    // Intentional fall-through into DONE_LABEL ...
+
+#endif
+
+    // DONE_LABEL is a hot target - we'd really like to place it at the
+    // start of cache line by padding with NOPs.
+    // See the AMD and Intel software optimization manuals for the
+    // most efficient "long" NOP encodings.
+    // Unfortunately none of our alignment mechanisms suffice.
+    bind(DONE_LABEL);
+
+    // At DONE_LABEL the icc ZFlag is set as follows ...
+    // Fast_Unlock uses the same protocol.
+    // ZFlag == 1 -> Success
+    // ZFlag == 0 -> Failure - force control through the slow-path
+  }
+}
+
+// obj: object to unlock
+// box: box address (displaced header location), killed.  Must be EAX.
+// tmp: killed, cannot be obj nor box.
+//
+// Some commentary on balanced locking:
+//
+// Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
+// Methods that don't have provably balanced locking are forced to run in the
+// interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
+// The interpreter provides two properties:
+// I1:  At return-time the interpreter automatically and quietly unlocks any
+//      objects acquired the current activation (frame).  Recall that the
+//      interpreter maintains an on-stack list of locks currently held by
+//      a frame.
+// I2:  If a method attempts to unlock an object that is not held by the
+//      the frame the interpreter throws IMSX.
+//
+// Lets say A(), which has provably balanced locking, acquires O and then calls B().
+// B() doesn't have provably balanced locking so it runs in the interpreter.
+// Control returns to A() and A() unlocks O.  By I1 and I2, above, we know that O
+// is still locked by A().
+//
+// The only other source of unbalanced locking would be JNI.  The "Java Native Interface:
+// Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
+// should not be unlocked by "normal" java-level locking and vice-versa.  The specification
+// doesn't specify what will occur if a program engages in such mixed-mode locking, however.
+
+void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg) {
+  guarantee (objReg != boxReg, "");
+  guarantee (objReg != tmpReg, "");
+  guarantee (boxReg != tmpReg, "");
+  guarantee (boxReg == rax, "");
+
+  if (EmitSync & 4) {
+    // Disable - inhibit all inlining.  Force control through the slow-path
+    cmpptr (rsp, 0);
+  } else
+  if (EmitSync & 8) {
+    Label DONE_LABEL;
+    if (UseBiasedLocking) {
+       biased_locking_exit(objReg, tmpReg, DONE_LABEL);
+    }
+    // Classic stack-locking code ...
+    // Check whether the displaced header is 0
+    //(=> recursive unlock)
+    movptr(tmpReg, Address(boxReg, 0));
+    testptr(tmpReg, tmpReg);
+    jccb(Assembler::zero, DONE_LABEL);
+    // If not recursive lock, reset the header to displaced header
+    if (os::is_MP()) {
+      lock();
+    }
+    cmpxchgptr(tmpReg, Address(objReg, 0));   // Uses RAX which is box
+    bind(DONE_LABEL);
+  } else {
+    Label DONE_LABEL, Stacked, CheckSucc;
+
+    // Critically, the biased locking test must have precedence over
+    // and appear before the (box->dhw == 0) recursive stack-lock test.
+    if (UseBiasedLocking && !UseOptoBiasInlining) {
+       biased_locking_exit(objReg, tmpReg, DONE_LABEL);
+    }
+
+    cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
+    movptr(tmpReg, Address(objReg, 0));             // Examine the object's markword
+    jccb  (Assembler::zero, DONE_LABEL);            // 0 indicates recursive stack-lock
+
+    testptr(tmpReg, 0x02);                          // Inflated?
+    jccb  (Assembler::zero, Stacked);
+
+    // It's inflated.
+    // Despite our balanced locking property we still check that m->_owner == Self
+    // as java routines or native JNI code called by this thread might
+    // have released the lock.
+    // Refer to the comments in synchronizer.cpp for how we might encode extra
+    // state in _succ so we can avoid fetching EntryList|cxq.
+    //
+    // I'd like to add more cases in fast_lock() and fast_unlock() --
+    // such as recursive enter and exit -- but we have to be wary of
+    // I$ bloat, T$ effects and BP$ effects.
+    //
+    // If there's no contention try a 1-0 exit.  That is, exit without
+    // a costly MEMBAR or CAS.  See synchronizer.cpp for details on how
+    // we detect and recover from the race that the 1-0 exit admits.
+    //
+    // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
+    // before it STs null into _owner, releasing the lock.  Updates
+    // to data protected by the critical section must be visible before
+    // we drop the lock (and thus before any other thread could acquire
+    // the lock and observe the fields protected by the lock).
+    // IA32's memory-model is SPO, so STs are ordered with respect to
+    // each other and there's no need for an explicit barrier (fence).
+    // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
+#ifndef _LP64
+    get_thread (boxReg);
+    if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
+      // prefetchw [ebx + Offset(_owner)-2]
+      prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    }
+
+    // Note that we could employ various encoding schemes to reduce
+    // the number of loads below (currently 4) to just 2 or 3.
+    // Refer to the comments in synchronizer.cpp.
+    // In practice the chain of fetches doesn't seem to impact performance, however.
+    if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
+       // Attempt to reduce branch density - AMD's branch predictor.
+       xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
+       orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
+       orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
+       jccb  (Assembler::notZero, DONE_LABEL);
+       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
+       jmpb  (DONE_LABEL);
+    } else {
+       xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
+       jccb  (Assembler::notZero, DONE_LABEL);
+       movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
+       orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
+       jccb  (Assembler::notZero, CheckSucc);
+       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
+       jmpb  (DONE_LABEL);
+    }
+
+    // The Following code fragment (EmitSync & 65536) improves the performance of
+    // contended applications and contended synchronization microbenchmarks.
+    // Unfortunately the emission of the code - even though not executed - causes regressions
+    // in scimark and jetstream, evidently because of $ effects.  Replacing the code
+    // with an equal number of never-executed NOPs results in the same regression.
+    // We leave it off by default.
+
+    if ((EmitSync & 65536) != 0) {
+       Label LSuccess, LGoSlowPath ;
+
+       bind  (CheckSucc);
+
+       // Optional pre-test ... it's safe to elide this
+       if ((EmitSync & 16) == 0) {
+          cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
+          jccb  (Assembler::zero, LGoSlowPath);
+       }
+
+       // We have a classic Dekker-style idiom:
+       //    ST m->_owner = 0 ; MEMBAR; LD m->_succ
+       // There are a number of ways to implement the barrier:
+       // (1) lock:andl &m->_owner, 0
+       //     is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
+       //     LOCK: ANDL [ebx+Offset(_Owner)-2], 0
+       //     Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
+       // (2) If supported, an explicit MFENCE is appealing.
+       //     In older IA32 processors MFENCE is slower than lock:add or xchg
+       //     particularly if the write-buffer is full as might be the case if
+       //     if stores closely precede the fence or fence-equivalent instruction.
+       //     In more modern implementations MFENCE appears faster, however.
+       // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
+       //     The $lines underlying the top-of-stack should be in M-state.
+       //     The locked add instruction is serializing, of course.
+       // (4) Use xchg, which is serializing
+       //     mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
+       // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
+       //     The integer condition codes will tell us if succ was 0.
+       //     Since _succ and _owner should reside in the same $line and
+       //     we just stored into _owner, it's likely that the $line
+       //     remains in M-state for the lock:orl.
+       //
+       // We currently use (3), although it's likely that switching to (2)
+       // is correct for the future.
+
+       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
+       if (os::is_MP()) {
+          if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
+            mfence();
+          } else {
+            lock (); addptr(Address(rsp, 0), 0);
+          }
+       }
+       // Ratify _succ remains non-null
+       cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0);
+       jccb  (Assembler::notZero, LSuccess);
+
+       xorptr(boxReg, boxReg);                  // box is really EAX
+       if (os::is_MP()) { lock(); }
+       cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+       jccb  (Assembler::notEqual, LSuccess);
+       // Since we're low on registers we installed rsp as a placeholding in _owner.
+       // Now install Self over rsp.  This is safe as we're transitioning from
+       // non-null to non=null
+       get_thread (boxReg);
+       movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg);
+       // Intentional fall-through into LGoSlowPath ...
+
+       bind  (LGoSlowPath);
+       orptr(boxReg, 1);                      // set ICC.ZF=0 to indicate failure
+       jmpb  (DONE_LABEL);
+
+       bind  (LSuccess);
+       xorptr(boxReg, boxReg);                 // set ICC.ZF=1 to indicate success
+       jmpb  (DONE_LABEL);
+    }
+
+    bind (Stacked);
+    // It's not inflated and it's not recursively stack-locked and it's not biased.
+    // It must be stack-locked.
+    // Try to reset the header to displaced header.
+    // The "box" value on the stack is stable, so we can reload
+    // and be assured we observe the same value as above.
+    movptr(tmpReg, Address(boxReg, 0));
+    if (os::is_MP()) {
+      lock();
+    }
+    cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
+    // Intention fall-thru into DONE_LABEL
+
+    // DONE_LABEL is a hot target - we'd really like to place it at the
+    // start of cache line by padding with NOPs.
+    // See the AMD and Intel software optimization manuals for the
+    // most efficient "long" NOP encodings.
+    // Unfortunately none of our alignment mechanisms suffice.
+    if ((EmitSync & 65536) == 0) {
+       bind (CheckSucc);
+    }
+#else // _LP64
+    // It's inflated
+    movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+    xorptr(boxReg, r15_thread);
+    orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
+    jccb  (Assembler::notZero, DONE_LABEL);
+    movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
+    orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
+    jccb  (Assembler::notZero, CheckSucc);
+    movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
+    jmpb  (DONE_LABEL);
+
+    if ((EmitSync & 65536) == 0) {
+      Label LSuccess, LGoSlowPath ;
+      bind  (CheckSucc);
+      cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
+      jccb  (Assembler::zero, LGoSlowPath);
+
+      // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
+      // the explicit ST;MEMBAR combination, but masm doesn't currently support
+      // "ANDQ M,IMM".  Don't use MFENCE here.  lock:add to TOS, xchg, etc
+      // are all faster when the write buffer is populated.
+      movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
+      if (os::is_MP()) {
+         lock (); addl (Address(rsp, 0), 0);
+      }
+      cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
+      jccb  (Assembler::notZero, LSuccess);
+
+      movptr (boxReg, (int32_t)NULL_WORD);                   // box is really EAX
+      if (os::is_MP()) { lock(); }
+      cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+      jccb  (Assembler::notEqual, LSuccess);
+      // Intentional fall-through into slow-path
+
+      bind  (LGoSlowPath);
+      orl   (boxReg, 1);                      // set ICC.ZF=0 to indicate failure
+      jmpb  (DONE_LABEL);
+
+      bind  (LSuccess);
+      testl (boxReg, 0);                      // set ICC.ZF=1 to indicate success
+      jmpb  (DONE_LABEL);
+    }
+
+    bind  (Stacked);
+    movptr(tmpReg, Address (boxReg, 0));      // re-fetch
+    if (os::is_MP()) { lock(); }
+    cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
+
+    if (EmitSync & 65536) {
+       bind (CheckSucc);
+    }
+#endif
+    bind(DONE_LABEL);
+    // Avoid branch to branch on AMD processors
+    if (EmitSync & 32768) {
+       nop();
+    }
+  }
+}
+#endif // COMPILER2
+
 void MacroAssembler::c2bool(Register x) {
   // implements x == 0 ? 0 : 1
   // note: must only look at least-significant byte of x
--- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -651,7 +651,12 @@
                            Label& done, Label* slow_case = NULL,
                            BiasedLockingCounters* counters = NULL);
   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
-
+#ifdef COMPILER2
+  // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
+  // See full desription in macroAssembler_x86.cpp.
+  void fast_lock(Register obj, Register box, Register tmp, Register scr, BiasedLockingCounters* counters);
+  void fast_unlock(Register obj, Register box, Register tmp);
+#endif
 
   Condition negate_condition(Condition cond);
 
--- a/src/cpu/x86/vm/x86_32.ad	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/cpu/x86/vm/x86_32.ad	Wed Feb 26 02:38:46 2014 -0800
@@ -1542,19 +1542,6 @@
   return EBP_REG_mask();
 }
 
-const RegMask Matcher::mathExactI_result_proj_mask() {
-  return EAX_REG_mask();
-}
-
-const RegMask Matcher::mathExactL_result_proj_mask() {
-  ShouldNotReachHere();
-  return RegMask();
-}
-
-const RegMask Matcher::mathExactI_flags_proj_mask() {
-  return INT_FLAGS_mask();
-}
-
 // Returns true if the high 32 bits of the value is known to be zero.
 bool is_operand_hi32_zero(Node* n) {
   int opc = n->Opcode();
@@ -2918,542 +2905,6 @@
     emit_d8    (cbuf,0 );
   %}
 
-
-  // Because the transitions from emitted code to the runtime
-  // monitorenter/exit helper stubs are so slow it's critical that
-  // we inline both the stack-locking fast-path and the inflated fast path.
-  //
-  // See also: cmpFastLock and cmpFastUnlock.
-  //
-  // What follows is a specialized inline transliteration of the code
-  // in slow_enter() and slow_exit().  If we're concerned about I$ bloat
-  // another option would be to emit TrySlowEnter and TrySlowExit methods
-  // at startup-time.  These methods would accept arguments as
-  // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
-  // indications in the icc.ZFlag.  Fast_Lock and Fast_Unlock would simply
-  // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
-  // In practice, however, the # of lock sites is bounded and is usually small.
-  // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
-  // if the processor uses simple bimodal branch predictors keyed by EIP
-  // Since the helper routines would be called from multiple synchronization
-  // sites.
-  //
-  // An even better approach would be write "MonitorEnter()" and "MonitorExit()"
-  // in java - using j.u.c and unsafe - and just bind the lock and unlock sites
-  // to those specialized methods.  That'd give us a mostly platform-independent
-  // implementation that the JITs could optimize and inline at their pleasure.
-  // Done correctly, the only time we'd need to cross to native could would be
-  // to park() or unpark() threads.  We'd also need a few more unsafe operators
-  // to (a) prevent compiler-JIT reordering of non-volatile accesses, and
-  // (b) explicit barriers or fence operations.
-  //
-  // TODO:
-  //
-  // *  Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
-  //    This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
-  //    Given TLAB allocation, Self is usually manifested in a register, so passing it into
-  //    the lock operators would typically be faster than reifying Self.
-  //
-  // *  Ideally I'd define the primitives as:
-  //       fast_lock   (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
-  //       fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
-  //    Unfortunately ADLC bugs prevent us from expressing the ideal form.
-  //    Instead, we're stuck with a rather awkward and brittle register assignments below.
-  //    Furthermore the register assignments are overconstrained, possibly resulting in
-  //    sub-optimal code near the synchronization site.
-  //
-  // *  Eliminate the sp-proximity tests and just use "== Self" tests instead.
-  //    Alternately, use a better sp-proximity test.
-  //
-  // *  Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
-  //    Either one is sufficient to uniquely identify a thread.
-  //    TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
-  //
-  // *  Intrinsify notify() and notifyAll() for the common cases where the
-  //    object is locked by the calling thread but the waitlist is empty.
-  //    avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
-  //
-  // *  use jccb and jmpb instead of jcc and jmp to improve code density.
-  //    But beware of excessive branch density on AMD Opterons.
-  //
-  // *  Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
-  //    or failure of the fast-path.  If the fast-path fails then we pass
-  //    control to the slow-path, typically in C.  In Fast_Lock and
-  //    Fast_Unlock we often branch to DONE_LABEL, just to find that C2
-  //    will emit a conditional branch immediately after the node.
-  //    So we have branches to branches and lots of ICC.ZF games.
-  //    Instead, it might be better to have C2 pass a "FailureLabel"
-  //    into Fast_Lock and Fast_Unlock.  In the case of success, control
-  //    will drop through the node.  ICC.ZF is undefined at exit.
-  //    In the case of failure, the node will branch directly to the
-  //    FailureLabel
-
-
-  // obj: object to lock
-  // box: on-stack box address (displaced header location) - KILLED
-  // rax,: tmp -- KILLED
-  // scr: tmp -- KILLED
-  enc_class Fast_Lock( eRegP obj, eRegP box, eAXRegI tmp, eRegP scr ) %{
-
-    Register objReg = as_Register($obj$$reg);
-    Register boxReg = as_Register($box$$reg);
-    Register tmpReg = as_Register($tmp$$reg);
-    Register scrReg = as_Register($scr$$reg);
-
-    // Ensure the register assignents are disjoint
-    guarantee (objReg != boxReg, "") ;
-    guarantee (objReg != tmpReg, "") ;
-    guarantee (objReg != scrReg, "") ;
-    guarantee (boxReg != tmpReg, "") ;
-    guarantee (boxReg != scrReg, "") ;
-    guarantee (tmpReg == as_Register(EAX_enc), "") ;
-
-    MacroAssembler masm(&cbuf);
-
-    if (_counters != NULL) {
-      masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
-    }
-    if (EmitSync & 1) {
-        // set box->dhw = unused_mark (3)
-        // Force all sync thru slow-path: slow_enter() and slow_exit() 
-        masm.movptr (Address(boxReg, 0), int32_t(markOopDesc::unused_mark())) ;             
-        masm.cmpptr (rsp, (int32_t)0) ;                        
-    } else 
-    if (EmitSync & 2) { 
-        Label DONE_LABEL ;           
-        if (UseBiasedLocking) {
-           // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
-           masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
-        }
-
-        masm.movptr(tmpReg, Address(objReg, 0)) ;          // fetch markword 
-        masm.orptr (tmpReg, 0x1);
-        masm.movptr(Address(boxReg, 0), tmpReg);           // Anticipate successful CAS 
-        if (os::is_MP()) { masm.lock();  }
-        masm.cmpxchgptr(boxReg, Address(objReg, 0));          // Updates tmpReg
-        masm.jcc(Assembler::equal, DONE_LABEL);
-        // Recursive locking
-        masm.subptr(tmpReg, rsp);
-        masm.andptr(tmpReg, (int32_t) 0xFFFFF003 );
-        masm.movptr(Address(boxReg, 0), tmpReg);
-        masm.bind(DONE_LABEL) ; 
-    } else {  
-      // Possible cases that we'll encounter in fast_lock 
-      // ------------------------------------------------
-      // * Inflated
-      //    -- unlocked
-      //    -- Locked
-      //       = by self
-      //       = by other
-      // * biased
-      //    -- by Self
-      //    -- by other
-      // * neutral
-      // * stack-locked
-      //    -- by self
-      //       = sp-proximity test hits
-      //       = sp-proximity test generates false-negative
-      //    -- by other
-      //
-
-      Label IsInflated, DONE_LABEL, PopDone ;
-
-      // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
-      // order to reduce the number of conditional branches in the most common cases.
-      // Beware -- there's a subtle invariant that fetch of the markword
-      // at [FETCH], below, will never observe a biased encoding (*101b).
-      // If this invariant is not held we risk exclusion (safety) failure.
-      if (UseBiasedLocking && !UseOptoBiasInlining) {
-        masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
-      }
-
-      masm.movptr(tmpReg, Address(objReg, 0)) ;         // [FETCH]
-      masm.testptr(tmpReg, 0x02) ;                      // Inflated v (Stack-locked or neutral)
-      masm.jccb  (Assembler::notZero, IsInflated) ;
-
-      // Attempt stack-locking ...
-      masm.orptr (tmpReg, 0x1);
-      masm.movptr(Address(boxReg, 0), tmpReg);          // Anticipate successful CAS
-      if (os::is_MP()) { masm.lock();  }
-      masm.cmpxchgptr(boxReg, Address(objReg, 0));           // Updates tmpReg
-      if (_counters != NULL) {
-        masm.cond_inc32(Assembler::equal,
-                        ExternalAddress((address)_counters->fast_path_entry_count_addr()));
-      }
-      masm.jccb (Assembler::equal, DONE_LABEL);
-
-      // Recursive locking
-      masm.subptr(tmpReg, rsp);
-      masm.andptr(tmpReg, 0xFFFFF003 );
-      masm.movptr(Address(boxReg, 0), tmpReg);
-      if (_counters != NULL) {
-        masm.cond_inc32(Assembler::equal,
-                        ExternalAddress((address)_counters->fast_path_entry_count_addr()));
-      }
-      masm.jmp  (DONE_LABEL) ;
-
-      masm.bind (IsInflated) ;
-
-      // The object is inflated.
-      //
-      // TODO-FIXME: eliminate the ugly use of manifest constants:
-      //   Use markOopDesc::monitor_value instead of "2".
-      //   use markOop::unused_mark() instead of "3".
-      // The tmpReg value is an objectMonitor reference ORed with
-      // markOopDesc::monitor_value (2).   We can either convert tmpReg to an
-      // objectmonitor pointer by masking off the "2" bit or we can just
-      // use tmpReg as an objectmonitor pointer but bias the objectmonitor
-      // field offsets with "-2" to compensate for and annul the low-order tag bit.
-      //
-      // I use the latter as it avoids AGI stalls.
-      // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]"
-      // instead of "mov r, [tmpReg+OFFSETOF(Owner)]".
-      //
-      #define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2)
-
-      // boxReg refers to the on-stack BasicLock in the current frame.
-      // We'd like to write:
-      //   set box->_displaced_header = markOop::unused_mark().  Any non-0 value suffices.
-      // This is convenient but results a ST-before-CAS penalty.  The following CAS suffers
-      // additional latency as we have another ST in the store buffer that must drain.
-
-      if (EmitSync & 8192) { 
-         masm.movptr(Address(boxReg, 0), 3) ;            // results in ST-before-CAS penalty
-         masm.get_thread (scrReg) ; 
-         masm.movptr(boxReg, tmpReg);                    // consider: LEA box, [tmp-2] 
-         masm.movptr(tmpReg, NULL_WORD);                 // consider: xor vs mov
-         if (os::is_MP()) { masm.lock(); } 
-         masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; 
-      } else 
-      if ((EmitSync & 128) == 0) {                      // avoid ST-before-CAS
-         masm.movptr(scrReg, boxReg) ; 
-         masm.movptr(boxReg, tmpReg);                   // consider: LEA box, [tmp-2] 
-
-         // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
-         if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
-            // prefetchw [eax + Offset(_owner)-2]
-            masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
-         }
-
-         if ((EmitSync & 64) == 0) {
-           // Optimistic form: consider XORL tmpReg,tmpReg
-           masm.movptr(tmpReg, NULL_WORD) ; 
-         } else { 
-           // Can suffer RTS->RTO upgrades on shared or cold $ lines
-           // Test-And-CAS instead of CAS
-           masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;   // rax, = m->_owner
-           masm.testptr(tmpReg, tmpReg) ;                   // Locked ? 
-           masm.jccb  (Assembler::notZero, DONE_LABEL) ;                   
-         }
-
-         // Appears unlocked - try to swing _owner from null to non-null.
-         // Ideally, I'd manifest "Self" with get_thread and then attempt
-         // to CAS the register containing Self into m->Owner.
-         // But we don't have enough registers, so instead we can either try to CAS
-         // rsp or the address of the box (in scr) into &m->owner.  If the CAS succeeds
-         // we later store "Self" into m->Owner.  Transiently storing a stack address
-         // (rsp or the address of the box) into  m->owner is harmless.
-         // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
-         if (os::is_MP()) { masm.lock();  }
-         masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; 
-         masm.movptr(Address(scrReg, 0), 3) ;          // box->_displaced_header = 3
-         masm.jccb  (Assembler::notZero, DONE_LABEL) ; 
-         masm.get_thread (scrReg) ;                    // beware: clobbers ICCs
-         masm.movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg) ; 
-         masm.xorptr(boxReg, boxReg) ;                 // set icc.ZFlag = 1 to indicate success
-                       
-         // If the CAS fails we can either retry or pass control to the slow-path.  
-         // We use the latter tactic.  
-         // Pass the CAS result in the icc.ZFlag into DONE_LABEL
-         // If the CAS was successful ...
-         //   Self has acquired the lock
-         //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
-         // Intentional fall-through into DONE_LABEL ...
-      } else {
-         masm.movptr(Address(boxReg, 0), 3) ;       // results in ST-before-CAS penalty
-         masm.movptr(boxReg, tmpReg) ; 
-
-         // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
-         if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
-            // prefetchw [eax + Offset(_owner)-2]
-            masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
-         }
-
-         if ((EmitSync & 64) == 0) {
-           // Optimistic form
-           masm.xorptr  (tmpReg, tmpReg) ; 
-         } else { 
-           // Can suffer RTS->RTO upgrades on shared or cold $ lines
-           masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;   // rax, = m->_owner
-           masm.testptr(tmpReg, tmpReg) ;                   // Locked ? 
-           masm.jccb  (Assembler::notZero, DONE_LABEL) ;                   
-         }
-
-         // Appears unlocked - try to swing _owner from null to non-null.
-         // Use either "Self" (in scr) or rsp as thread identity in _owner.
-         // Invariant: tmpReg == 0.  tmpReg is EAX which is the implicit cmpxchg comparand.
-         masm.get_thread (scrReg) ;
-         if (os::is_MP()) { masm.lock(); }
-         masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
-
-         // If the CAS fails we can either retry or pass control to the slow-path.
-         // We use the latter tactic.
-         // Pass the CAS result in the icc.ZFlag into DONE_LABEL
-         // If the CAS was successful ...
-         //   Self has acquired the lock
-         //   Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
-         // Intentional fall-through into DONE_LABEL ...
-      }
-
-      // DONE_LABEL is a hot target - we'd really like to place it at the
-      // start of cache line by padding with NOPs.
-      // See the AMD and Intel software optimization manuals for the
-      // most efficient "long" NOP encodings.
-      // Unfortunately none of our alignment mechanisms suffice.
-      masm.bind(DONE_LABEL);
-
-      // Avoid branch-to-branch on AMD processors
-      // This appears to be superstition.
-      if (EmitSync & 32) masm.nop() ;
-
-
-      // At DONE_LABEL the icc ZFlag is set as follows ...
-      // Fast_Unlock uses the same protocol.
-      // ZFlag == 1 -> Success
-      // ZFlag == 0 -> Failure - force control through the slow-path
-    }
-  %}
-
-  // obj: object to unlock
-  // box: box address (displaced header location), killed.  Must be EAX.
-  // rbx,: killed tmp; cannot be obj nor box.
-  //
-  // Some commentary on balanced locking:
-  //
-  // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
-  // Methods that don't have provably balanced locking are forced to run in the
-  // interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
-  // The interpreter provides two properties:
-  // I1:  At return-time the interpreter automatically and quietly unlocks any
-  //      objects acquired the current activation (frame).  Recall that the
-  //      interpreter maintains an on-stack list of locks currently held by
-  //      a frame.
-  // I2:  If a method attempts to unlock an object that is not held by the
-  //      the frame the interpreter throws IMSX.
-  //
-  // Lets say A(), which has provably balanced locking, acquires O and then calls B().
-  // B() doesn't have provably balanced locking so it runs in the interpreter.
-  // Control returns to A() and A() unlocks O.  By I1 and I2, above, we know that O
-  // is still locked by A().
-  //
-  // The only other source of unbalanced locking would be JNI.  The "Java Native Interface:
-  // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
-  // should not be unlocked by "normal" java-level locking and vice-versa.  The specification
-  // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
-
-  enc_class Fast_Unlock( nabxRegP obj, eAXRegP box, eRegP tmp) %{
-
-    Register objReg = as_Register($obj$$reg);
-    Register boxReg = as_Register($box$$reg);
-    Register tmpReg = as_Register($tmp$$reg);
-
-    guarantee (objReg != boxReg, "") ;
-    guarantee (objReg != tmpReg, "") ;
-    guarantee (boxReg != tmpReg, "") ;
-    guarantee (boxReg == as_Register(EAX_enc), "") ;
-    MacroAssembler masm(&cbuf);
-
-    if (EmitSync & 4) {
-      // Disable - inhibit all inlining.  Force control through the slow-path
-      masm.cmpptr (rsp, 0) ; 
-    } else 
-    if (EmitSync & 8) {
-      Label DONE_LABEL ;
-      if (UseBiasedLocking) {
-         masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
-      }
-      // classic stack-locking code ...
-      masm.movptr(tmpReg, Address(boxReg, 0)) ;
-      masm.testptr(tmpReg, tmpReg) ;
-      masm.jcc   (Assembler::zero, DONE_LABEL) ;
-      if (os::is_MP()) { masm.lock(); }
-      masm.cmpxchgptr(tmpReg, Address(objReg, 0));          // Uses EAX which is box
-      masm.bind(DONE_LABEL);
-    } else {
-      Label DONE_LABEL, Stacked, CheckSucc, Inflated ;
-
-      // Critically, the biased locking test must have precedence over
-      // and appear before the (box->dhw == 0) recursive stack-lock test.
-      if (UseBiasedLocking && !UseOptoBiasInlining) {
-         masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
-      }
-      
-      masm.cmpptr(Address(boxReg, 0), 0) ;            // Examine the displaced header
-      masm.movptr(tmpReg, Address(objReg, 0)) ;       // Examine the object's markword
-      masm.jccb  (Assembler::zero, DONE_LABEL) ;      // 0 indicates recursive stack-lock
-
-      masm.testptr(tmpReg, 0x02) ;                     // Inflated? 
-      masm.jccb  (Assembler::zero, Stacked) ;
-
-      masm.bind  (Inflated) ;
-      // It's inflated.
-      // Despite our balanced locking property we still check that m->_owner == Self
-      // as java routines or native JNI code called by this thread might
-      // have released the lock.
-      // Refer to the comments in synchronizer.cpp for how we might encode extra
-      // state in _succ so we can avoid fetching EntryList|cxq.
-      //
-      // I'd like to add more cases in fast_lock() and fast_unlock() --
-      // such as recursive enter and exit -- but we have to be wary of
-      // I$ bloat, T$ effects and BP$ effects.
-      //
-      // If there's no contention try a 1-0 exit.  That is, exit without
-      // a costly MEMBAR or CAS.  See synchronizer.cpp for details on how
-      // we detect and recover from the race that the 1-0 exit admits.
-      //
-      // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
-      // before it STs null into _owner, releasing the lock.  Updates
-      // to data protected by the critical section must be visible before
-      // we drop the lock (and thus before any other thread could acquire
-      // the lock and observe the fields protected by the lock).
-      // IA32's memory-model is SPO, so STs are ordered with respect to
-      // each other and there's no need for an explicit barrier (fence).
-      // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
-
-      masm.get_thread (boxReg) ;
-      if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
-        // prefetchw [ebx + Offset(_owner)-2]
-        masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2));
-      }
-
-      // Note that we could employ various encoding schemes to reduce
-      // the number of loads below (currently 4) to just 2 or 3.
-      // Refer to the comments in synchronizer.cpp.
-      // In practice the chain of fetches doesn't seem to impact performance, however.
-      if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
-         // Attempt to reduce branch density - AMD's branch predictor.
-         masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;  
-         masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
-         masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; 
-         masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; 
-         masm.jccb  (Assembler::notZero, DONE_LABEL) ; 
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ; 
-         masm.jmpb  (DONE_LABEL) ; 
-      } else { 
-         masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;  
-         masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
-         masm.jccb  (Assembler::notZero, DONE_LABEL) ; 
-         masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ; 
-         masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ; 
-         masm.jccb  (Assembler::notZero, CheckSucc) ; 
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ; 
-         masm.jmpb  (DONE_LABEL) ; 
-      }
-
-      // The Following code fragment (EmitSync & 65536) improves the performance of
-      // contended applications and contended synchronization microbenchmarks.
-      // Unfortunately the emission of the code - even though not executed - causes regressions
-      // in scimark and jetstream, evidently because of $ effects.  Replacing the code
-      // with an equal number of never-executed NOPs results in the same regression.
-      // We leave it off by default.
-
-      if ((EmitSync & 65536) != 0) {
-         Label LSuccess, LGoSlowPath ;
-
-         masm.bind  (CheckSucc) ;
-
-         // Optional pre-test ... it's safe to elide this
-         if ((EmitSync & 16) == 0) { 
-            masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ; 
-            masm.jccb  (Assembler::zero, LGoSlowPath) ; 
-         }
-
-         // We have a classic Dekker-style idiom:
-         //    ST m->_owner = 0 ; MEMBAR; LD m->_succ
-         // There are a number of ways to implement the barrier:
-         // (1) lock:andl &m->_owner, 0
-         //     is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
-         //     LOCK: ANDL [ebx+Offset(_Owner)-2], 0
-         //     Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
-         // (2) If supported, an explicit MFENCE is appealing.
-         //     In older IA32 processors MFENCE is slower than lock:add or xchg
-         //     particularly if the write-buffer is full as might be the case if
-         //     if stores closely precede the fence or fence-equivalent instruction.
-         //     In more modern implementations MFENCE appears faster, however.
-         // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
-         //     The $lines underlying the top-of-stack should be in M-state.
-         //     The locked add instruction is serializing, of course.
-         // (4) Use xchg, which is serializing
-         //     mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
-         // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
-         //     The integer condition codes will tell us if succ was 0.
-         //     Since _succ and _owner should reside in the same $line and
-         //     we just stored into _owner, it's likely that the $line
-         //     remains in M-state for the lock:orl.
-         //
-         // We currently use (3), although it's likely that switching to (2)
-         // is correct for the future.
-            
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD) ; 
-         if (os::is_MP()) { 
-            if (VM_Version::supports_sse2() && 1 == FenceInstruction) { 
-              masm.mfence();
-            } else { 
-              masm.lock () ; masm.addptr(Address(rsp, 0), 0) ; 
-            }
-         }
-         // Ratify _succ remains non-null
-         masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ; 
-         masm.jccb  (Assembler::notZero, LSuccess) ; 
-
-         masm.xorptr(boxReg, boxReg) ;                  // box is really EAX
-         if (os::is_MP()) { masm.lock(); }
-         masm.cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
-         masm.jccb  (Assembler::notEqual, LSuccess) ;
-         // Since we're low on registers we installed rsp as a placeholding in _owner.
-         // Now install Self over rsp.  This is safe as we're transitioning from
-         // non-null to non=null
-         masm.get_thread (boxReg) ;
-         masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg) ;
-         // Intentional fall-through into LGoSlowPath ...
-
-         masm.bind  (LGoSlowPath) ; 
-         masm.orptr(boxReg, 1) ;                      // set ICC.ZF=0 to indicate failure
-         masm.jmpb  (DONE_LABEL) ; 
-
-         masm.bind  (LSuccess) ; 
-         masm.xorptr(boxReg, boxReg) ;                 // set ICC.ZF=1 to indicate success
-         masm.jmpb  (DONE_LABEL) ; 
-      }
-
-      masm.bind (Stacked) ;
-      // It's not inflated and it's not recursively stack-locked and it's not biased.
-      // It must be stack-locked.
-      // Try to reset the header to displaced header.
-      // The "box" value on the stack is stable, so we can reload
-      // and be assured we observe the same value as above.
-      masm.movptr(tmpReg, Address(boxReg, 0)) ;
-      if (os::is_MP()) {   masm.lock();    }
-      masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box
-      // Intention fall-thru into DONE_LABEL
-
-
-      // DONE_LABEL is a hot target - we'd really like to place it at the
-      // start of cache line by padding with NOPs.
-      // See the AMD and Intel software optimization manuals for the
-      // most efficient "long" NOP encodings.
-      // Unfortunately none of our alignment mechanisms suffice.
-      if ((EmitSync & 65536) == 0) {
-         masm.bind (CheckSucc) ;
-      }
-      masm.bind(DONE_LABEL);
-
-      // Avoid branch to branch on AMD processors
-      if (EmitSync & 32768) { masm.nop() ; }
-    }
-  %}
-
-
   enc_class enc_pop_rdx() %{
     emit_opcode(cbuf,0x5A);
   %}
@@ -7545,44 +6996,6 @@
 //----------Arithmetic Instructions--------------------------------------------
 //----------Addition Instructions----------------------------------------------
 
-instruct addExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
-%{
-  match(AddExactI dst src);
-  effect(DEF cr);
-
-  format %{ "ADD    $dst, $src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
-%{
-  match(AddExactI dst src);
-  effect(DEF cr);
-
-  format %{ "ADD    $dst, $src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
-%{
-  match(AddExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(125);
-  format %{ "ADD    $dst,$src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$Address);
-  %}
-  ins_pipe( ialu_reg_mem );
-%}
-
-
 // Integer Addition Instructions
 instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
   match(Set dst (AddI dst src));
@@ -7892,43 +7305,6 @@
 
 //----------Subtraction Instructions-------------------------------------------
 
-instruct subExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
-%{
-  match(SubExactI dst src);
-  effect(DEF cr);
-
-  format %{ "SUB    $dst, $src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactI_eReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
-%{
-  match(SubExactI dst src);
-  effect(DEF cr);
-
-  format %{ "SUB    $dst, $src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
-%{
-  match(SubExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(125);
-  format %{ "SUB    $dst,$src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$Address);
-  %}
-  ins_pipe( ialu_reg_mem );
-%}
-
 // Integer Subtraction Instructions
 instruct subI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
   match(Set dst (SubI dst src));
@@ -7997,17 +7373,6 @@
   ins_pipe( ialu_reg );
 %}
 
-instruct negExactI_eReg(eAXRegI dst, eFlagsReg cr) %{
-  match(NegExactI dst);
-  effect(DEF cr);
-
-  format %{ "NEG    $dst\t# negExact int"%}
-  ins_encode %{
-    __ negl($dst$$Register);
-  %}
-  ins_pipe(ialu_reg);
-%}
-
 //----------Multiplication/Division Instructions-------------------------------
 // Integer Multiplication Instructions
 // Multiply Register
@@ -8219,46 +7584,6 @@
   ins_pipe( pipe_slow );
 %}
 
-instruct mulExactI_eReg(eAXRegI dst, rRegI src, eFlagsReg cr)
-%{
-  match(MulExactI dst src);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "IMUL   $dst, $src\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-instruct mulExactI_eReg_imm(eAXRegI dst, rRegI src, immI imm, eFlagsReg cr)
-%{
-  match(MulExactI src imm);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "IMUL   $dst, $src, $imm\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Register, $imm$$constant);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-instruct mulExactI_eReg_mem(eAXRegI dst, memory src, eFlagsReg cr)
-%{
-  match(MulExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(350);
-  format %{ "IMUL   $dst, $src\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Address);
-  %}
-  ins_pipe(ialu_reg_mem_alu0);
-%}
-
-
 // Integer DIV with Register
 instruct divI_eReg(eAXRegI rax, eDXRegI rdx, eCXRegI div, eFlagsReg cr) %{
   match(Set rax (DivI rax div));
@@ -9124,6 +8449,91 @@
 instruct cadd_cmpLTMask_mem(ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr) %{
   match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
 */
+//----------Overflow Math Instructions-----------------------------------------
+
+instruct overflowAddI_eReg(eFlagsReg cr, eAXRegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowAddI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "ADD    $op1, $op2\t# overflow check int" %}
+
+  ins_encode %{
+    __ addl($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowAddI_rReg_imm(eFlagsReg cr, eAXRegI op1, immI op2)
+%{
+  match(Set cr (OverflowAddI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "ADD    $op1, $op2\t# overflow check int" %}
+
+  ins_encode %{
+    __ addl($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubI_rReg(eFlagsReg cr, rRegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowSubI op1 op2));
+
+  format %{ "CMP    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ cmpl($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubI_rReg_imm(eFlagsReg cr, rRegI op1, immI op2)
+%{
+  match(Set cr (OverflowSubI op1 op2));
+
+  format %{ "CMP    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ cmpl($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowNegI_rReg(eFlagsReg cr, immI0 zero, eAXRegI op2)
+%{
+  match(Set cr (OverflowSubI zero op2));
+  effect(DEF cr, USE_KILL op2);
+
+  format %{ "NEG    $op2\t# overflow check int" %}
+  ins_encode %{
+    __ negl($op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowMulI_rReg(eFlagsReg cr, eAXRegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowMulI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "IMUL    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ imull($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct overflowMulI_rReg_imm(eFlagsReg cr, rRegI op1, immI op2, rRegI tmp)
+%{
+  match(Set cr (OverflowMulI op1 op2));
+  effect(DEF cr, TEMP tmp, USE op1, USE op2);
+
+  format %{ "IMUL    $tmp, $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ imull($tmp$$Register, $op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
 
 //----------Long Instructions------------------------------------------------
 // Add Long Register with Register
@@ -13157,23 +12567,26 @@
 
 // inlined locking and unlocking
 
-
-instruct cmpFastLock( eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
-  match( Set cr (FastLock object box) );
-  effect( TEMP tmp, TEMP scr, USE_KILL box );
+instruct cmpFastLock(eFlagsReg cr, eRegP object, eBXRegP box, eAXRegI tmp, eRegP scr) %{
+  match(Set cr (FastLock object box));
+  effect(TEMP tmp, TEMP scr, USE_KILL box);
   ins_cost(300);
   format %{ "FASTLOCK $object,$box\t! kills $box,$tmp,$scr" %}
-  ins_encode( Fast_Lock(object,box,tmp,scr) );
-  ins_pipe( pipe_slow );
-%}
-
-instruct cmpFastUnlock( eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
-  match( Set cr (FastUnlock object box) );
-  effect( TEMP tmp, USE_KILL box );
+  ins_encode %{
+    __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
+  %}
+  ins_pipe(pipe_slow);
+%}
+
+instruct cmpFastUnlock(eFlagsReg cr, eRegP object, eAXRegP box, eRegP tmp ) %{
+  match(Set cr (FastUnlock object box));
+  effect(TEMP tmp, USE_KILL box);
   ins_cost(300);
   format %{ "FASTUNLOCK $object,$box\t! kills $box,$tmp" %}
-  ins_encode( Fast_Unlock(object,box,tmp) );
-  ins_pipe( pipe_slow );
+  ins_encode %{
+    __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
+  %}
+  ins_pipe(pipe_slow);
 %}
 
 
--- a/src/cpu/x86/vm/x86_64.ad	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/cpu/x86/vm/x86_64.ad	Wed Feb 26 02:38:46 2014 -0800
@@ -1657,18 +1657,6 @@
   return PTR_RBP_REG_mask();
 }
 
-const RegMask Matcher::mathExactI_result_proj_mask() {
-  return INT_RAX_REG_mask();
-}
-
-const RegMask Matcher::mathExactL_result_proj_mask() {
-  return LONG_RAX_REG_mask();
-}
-
-const RegMask Matcher::mathExactI_flags_proj_mask() {
-  return INT_FLAGS_mask();
-}
-
 %}
 
 //----------ENCODING BLOCK-----------------------------------------------------
@@ -2599,231 +2587,6 @@
   %}
 
 
-  // obj: object to lock
-  // box: box address (header location) -- killed
-  // tmp: rax -- killed
-  // scr: rbx -- killed
-  //
-  // What follows is a direct transliteration of fast_lock() and fast_unlock()
-  // from i486.ad.  See that file for comments.
-  // TODO: where possible switch from movq (r, 0) to movl(r,0) and
-  // use the shorter encoding.  (Movl clears the high-order 32-bits).
-
-
-  enc_class Fast_Lock(rRegP obj, rRegP box, rax_RegI tmp, rRegP scr)
-  %{
-    Register objReg = as_Register((int)$obj$$reg);
-    Register boxReg = as_Register((int)$box$$reg);
-    Register tmpReg = as_Register($tmp$$reg);
-    Register scrReg = as_Register($scr$$reg);
-    MacroAssembler masm(&cbuf);
-
-    // Verify uniqueness of register assignments -- necessary but not sufficient
-    assert (objReg != boxReg && objReg != tmpReg &&
-            objReg != scrReg && tmpReg != scrReg, "invariant") ;
-
-    if (_counters != NULL) {
-      masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
-    }
-    if (EmitSync & 1) {
-        // Without cast to int32_t a movptr will destroy r10 which is typically obj
-        masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
-        masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
-    } else
-    if (EmitSync & 2) {
-        Label DONE_LABEL;
-        if (UseBiasedLocking) {
-           // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
-          masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
-        }
-        // QQQ was movl...
-        masm.movptr(tmpReg, 0x1);
-        masm.orptr(tmpReg, Address(objReg, 0));
-        masm.movptr(Address(boxReg, 0), tmpReg);
-        if (os::is_MP()) {
-          masm.lock();
-        }
-        masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
-        masm.jcc(Assembler::equal, DONE_LABEL);
-
-        // Recursive locking
-        masm.subptr(tmpReg, rsp);
-        masm.andptr(tmpReg, 7 - os::vm_page_size());
-        masm.movptr(Address(boxReg, 0), tmpReg);
-
-        masm.bind(DONE_LABEL);
-        masm.nop(); // avoid branch to branch
-    } else {
-        Label DONE_LABEL, IsInflated, Egress;
-
-        masm.movptr(tmpReg, Address(objReg, 0)) ;
-        masm.testl (tmpReg, 0x02) ;         // inflated vs stack-locked|neutral|biased
-        masm.jcc   (Assembler::notZero, IsInflated) ;
-
-        // it's stack-locked, biased or neutral
-        // TODO: optimize markword triage order to reduce the number of
-        // conditional branches in the most common cases.
-        // Beware -- there's a subtle invariant that fetch of the markword
-        // at [FETCH], below, will never observe a biased encoding (*101b).
-        // If this invariant is not held we'll suffer exclusion (safety) failure.
-
-        if (UseBiasedLocking && !UseOptoBiasInlining) {
-          masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
-          masm.movptr(tmpReg, Address(objReg, 0)) ;        // [FETCH]
-        }
-
-        // was q will it destroy high?
-        masm.orl   (tmpReg, 1) ;
-        masm.movptr(Address(boxReg, 0), tmpReg) ;
-        if (os::is_MP()) { masm.lock(); }
-        masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
-        if (_counters != NULL) {
-           masm.cond_inc32(Assembler::equal,
-                           ExternalAddress((address) _counters->fast_path_entry_count_addr()));
-        }
-        masm.jcc   (Assembler::equal, DONE_LABEL);
-
-        // Recursive locking
-        masm.subptr(tmpReg, rsp);
-        masm.andptr(tmpReg, 7 - os::vm_page_size());
-        masm.movptr(Address(boxReg, 0), tmpReg);
-        if (_counters != NULL) {
-           masm.cond_inc32(Assembler::equal,
-                           ExternalAddress((address) _counters->fast_path_entry_count_addr()));
-        }
-        masm.jmp   (DONE_LABEL) ;
-
-        masm.bind  (IsInflated) ;
-        // It's inflated
-
-        // TODO: someday avoid the ST-before-CAS penalty by
-        // relocating (deferring) the following ST.
-        // We should also think about trying a CAS without having
-        // fetched _owner.  If the CAS is successful we may
-        // avoid an RTO->RTS upgrade on the $line.
-        // Without cast to int32_t a movptr will destroy r10 which is typically obj
-        masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
-
-        masm.mov    (boxReg, tmpReg) ;
-        masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
-        masm.testptr(tmpReg, tmpReg) ;
-        masm.jcc    (Assembler::notZero, DONE_LABEL) ;
-
-        // It's inflated and appears unlocked
-        if (os::is_MP()) { masm.lock(); }
-        masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
-        // Intentional fall-through into DONE_LABEL ...
-
-        masm.bind  (DONE_LABEL) ;
-        masm.nop   () ;                 // avoid jmp to jmp
-    }
-  %}
-
-  // obj: object to unlock
-  // box: box address (displaced header location), killed
-  // RBX: killed tmp; cannot be obj nor box
-  enc_class Fast_Unlock(rRegP obj, rax_RegP box, rRegP tmp)
-  %{
-
-    Register objReg = as_Register($obj$$reg);
-    Register boxReg = as_Register($box$$reg);
-    Register tmpReg = as_Register($tmp$$reg);
-    MacroAssembler masm(&cbuf);
-
-    if (EmitSync & 4) {
-       masm.cmpptr(rsp, 0) ;
-    } else
-    if (EmitSync & 8) {
-       Label DONE_LABEL;
-       if (UseBiasedLocking) {
-         masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
-       }
-
-       // Check whether the displaced header is 0
-       //(=> recursive unlock)
-       masm.movptr(tmpReg, Address(boxReg, 0));
-       masm.testptr(tmpReg, tmpReg);
-       masm.jcc(Assembler::zero, DONE_LABEL);
-
-       // If not recursive lock, reset the header to displaced header
-       if (os::is_MP()) {
-         masm.lock();
-       }
-       masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
-       masm.bind(DONE_LABEL);
-       masm.nop(); // avoid branch to branch
-    } else {
-       Label DONE_LABEL, Stacked, CheckSucc ;
-
-       if (UseBiasedLocking && !UseOptoBiasInlining) {
-         masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
-       }
-
-       masm.movptr(tmpReg, Address(objReg, 0)) ;
-       masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
-       masm.jcc   (Assembler::zero, DONE_LABEL) ;
-       masm.testl (tmpReg, 0x02) ;
-       masm.jcc   (Assembler::zero, Stacked) ;
-
-       // It's inflated
-       masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
-       masm.xorptr(boxReg, r15_thread) ;
-       masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
-       masm.jcc   (Assembler::notZero, DONE_LABEL) ;
-       masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
-       masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
-       masm.jcc   (Assembler::notZero, CheckSucc) ;
-       masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
-       masm.jmp   (DONE_LABEL) ;
-
-       if ((EmitSync & 65536) == 0) {
-         Label LSuccess, LGoSlowPath ;
-         masm.bind  (CheckSucc) ;
-         masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
-         masm.jcc   (Assembler::zero, LGoSlowPath) ;
-
-         // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
-         // the explicit ST;MEMBAR combination, but masm doesn't currently support
-         // "ANDQ M,IMM".  Don't use MFENCE here.  lock:add to TOS, xchg, etc
-         // are all faster when the write buffer is populated.
-         masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
-         if (os::is_MP()) {
-            masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
-         }
-         masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
-         masm.jcc   (Assembler::notZero, LSuccess) ;
-
-         masm.movptr (boxReg, (int32_t)NULL_WORD) ;                   // box is really EAX
-         if (os::is_MP()) { masm.lock(); }
-         masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
-         masm.jcc   (Assembler::notEqual, LSuccess) ;
-         // Intentional fall-through into slow-path
-
-         masm.bind  (LGoSlowPath) ;
-         masm.orl   (boxReg, 1) ;                      // set ICC.ZF=0 to indicate failure
-         masm.jmp   (DONE_LABEL) ;
-
-         masm.bind  (LSuccess) ;
-         masm.testl (boxReg, 0) ;                      // set ICC.ZF=1 to indicate success
-         masm.jmp   (DONE_LABEL) ;
-       }
-
-       masm.bind  (Stacked) ;
-       masm.movptr(tmpReg, Address (boxReg, 0)) ;      // re-fetch
-       if (os::is_MP()) { masm.lock(); }
-       masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
-
-       if (EmitSync & 65536) {
-          masm.bind (CheckSucc) ;
-       }
-       masm.bind(DONE_LABEL);
-       if (EmitSync & 32768) {
-          masm.nop();                      // avoid branch to branch
-       }
-    }
-  %}
-
-
   enc_class enc_rethrow()
   %{
     cbuf.set_insts_mark();
@@ -6963,82 +6726,6 @@
 //----------Arithmetic Instructions--------------------------------------------
 //----------Addition Instructions----------------------------------------------
 
-instruct addExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
-%{
-  match(AddExactI dst src);
-  effect(DEF cr);
-
-  format %{ "addl    $dst, $src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
-%{
-  match(AddExactI dst src);
-  effect(DEF cr);
-
-  format %{ "addl    $dst, $src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
-%{
-  match(AddExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(125); // XXX
-  format %{ "addl    $dst, $src\t# addExact int" %}
-  ins_encode %{
-    __ addl($dst$$Register, $src$$Address);
-  %}
-
-  ins_pipe(ialu_reg_mem);
-%}
-
-instruct addExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
-%{
-  match(AddExactL dst src);
-  effect(DEF cr);
-
-  format %{ "addq    $dst, $src\t# addExact long" %}
-  ins_encode %{
-    __ addq($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr)
-%{
-  match(AddExactL dst src);
-  effect(DEF cr);
-
-  format %{ "addq    $dst, $src\t# addExact long" %}
-  ins_encode %{
-    __ addq($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct addExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr)
-%{
-  match(AddExactL dst (LoadL src));
-  effect(DEF cr);
-
-  ins_cost(125); // XXX
-  format %{ "addq    $dst, $src\t# addExact long" %}
-  ins_encode %{
-    __ addq($dst$$Register, $src$$Address);
-  %}
-
-  ins_pipe(ialu_reg_mem);
-%}
-
 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
 %{
   match(Set dst (AddI dst src));
@@ -7651,80 +7338,6 @@
   ins_pipe(ialu_mem_imm);
 %}
 
-instruct subExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
-%{
-  match(SubExactI dst src);
-  effect(DEF cr);
-
-  format %{ "subl    $dst, $src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
-%{
-  match(SubExactI dst src);
-  effect(DEF cr);
-
-  format %{ "subl    $dst, $src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
-%{
-  match(SubExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(125);
-  format %{ "subl    $dst, $src\t# subExact int" %}
-  ins_encode %{
-    __ subl($dst$$Register, $src$$Address);
-  %}
-  ins_pipe(ialu_reg_mem);
-%}
-
-instruct subExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
-%{
-  match(SubExactL dst src);
-  effect(DEF cr);
-
-  format %{ "subq    $dst, $src\t# subExact long" %}
-  ins_encode %{
-    __ subq($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactL_rReg_imm(rax_RegL dst, immL32 src, rFlagsReg cr)
-%{
-  match(SubExactL dst (LoadL src));
-  effect(DEF cr);
-
-  format %{ "subq    $dst, $src\t# subExact long" %}
-  ins_encode %{
-    __ subq($dst$$Register, $src$$constant);
-  %}
-  ins_pipe(ialu_reg_reg);
-%}
-
-instruct subExactL_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
-%{
-  match(SubExactI dst src);
-  effect(DEF cr);
-
-  ins_cost(125);
-  format %{ "subq    $dst, $src\t# subExact long" %}
-  ins_encode %{
-    __ subq($dst$$Register, $src$$Address);
-  %}
-  ins_pipe(ialu_reg_mem);
-%}
-
 instruct subL_rReg(rRegL dst, rRegL src, rFlagsReg cr)
 %{
   match(Set dst (SubL dst src));
@@ -7841,31 +7454,6 @@
   ins_pipe(ialu_reg);
 %}
 
-instruct negExactI_rReg(rax_RegI dst, rFlagsReg cr)
-%{
-  match(NegExactI dst);
-  effect(KILL cr);
-
-  format %{ "negl    $dst\t# negExact int" %}
-  ins_encode %{
-    __ negl($dst$$Register);
-  %}
-  ins_pipe(ialu_reg);
-%}
-
-instruct negExactL_rReg(rax_RegL dst, rFlagsReg cr)
-%{
-  match(NegExactL dst);
-  effect(KILL cr);
-
-  format %{ "negq    $dst\t# negExact long" %}
-  ins_encode %{
-    __ negq($dst$$Register);
-  %}
-  ins_pipe(ialu_reg);
-%}
-
-
 //----------Multiplication/Division Instructions-------------------------------
 // Integer Multiplication Instructions
 // Multiply Register
@@ -7982,86 +7570,6 @@
   ins_pipe(ialu_reg_reg_alu0);
 %}
 
-
-instruct mulExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
-%{
-  match(MulExactI dst src);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "imull   $dst, $src\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-
-instruct mulExactI_rReg_imm(rax_RegI dst, rRegI src, immI imm, rFlagsReg cr)
-%{
-  match(MulExactI src imm);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "imull   $dst, $src, $imm\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Register, $imm$$constant);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-instruct mulExactI_rReg_mem(rax_RegI dst, memory src, rFlagsReg cr)
-%{
-  match(MulExactI dst (LoadI src));
-  effect(DEF cr);
-
-  ins_cost(350);
-  format %{ "imull   $dst, $src\t# mulExact int" %}
-  ins_encode %{
-    __ imull($dst$$Register, $src$$Address);
-  %}
-  ins_pipe(ialu_reg_mem_alu0);
-%}
-
-instruct mulExactL_rReg(rax_RegL dst, rRegL src, rFlagsReg cr)
-%{
-  match(MulExactL dst src);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "imulq   $dst, $src\t# mulExact long" %}
-  ins_encode %{
-    __ imulq($dst$$Register, $src$$Register);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-instruct mulExactL_rReg_imm(rax_RegL dst, rRegL src, immL32 imm, rFlagsReg cr)
-%{
-  match(MulExactL src imm);
-  effect(DEF cr);
-
-  ins_cost(300);
-  format %{ "imulq   $dst, $src, $imm\t# mulExact long" %}
-  ins_encode %{
-    __ imulq($dst$$Register, $src$$Register, $imm$$constant);
-  %}
-  ins_pipe(ialu_reg_reg_alu0);
-%}
-
-instruct mulExactL_rReg_mem(rax_RegL dst, memory src, rFlagsReg cr)
-%{
-  match(MulExactL dst (LoadL src));
-  effect(DEF cr);
-
-  ins_cost(350);
-  format %{ "imulq   $dst, $src\t# mulExact long" %}
-  ins_encode %{
-    __ imulq($dst$$Register, $src$$Address);
-  %}
-  ins_pipe(ialu_reg_mem_alu0);
-%}
-
 instruct divI_rReg(rax_RegI rax, rdx_RegI rdx, no_rax_rdx_RegI div,
                    rFlagsReg cr)
 %{
@@ -10670,6 +10178,174 @@
   ins_pipe( pipe_slow );
 %}
 
+//----------Overflow Math Instructions-----------------------------------------
+
+instruct overflowAddI_rReg(rFlagsReg cr, rax_RegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowAddI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "addl    $op1, $op2\t# overflow check int" %}
+
+  ins_encode %{
+    __ addl($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowAddI_rReg_imm(rFlagsReg cr, rax_RegI op1, immI op2)
+%{
+  match(Set cr (OverflowAddI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "addl    $op1, $op2\t# overflow check int" %}
+
+  ins_encode %{
+    __ addl($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowAddL_rReg(rFlagsReg cr, rax_RegL op1, rRegL op2)
+%{
+  match(Set cr (OverflowAddL op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "addq    $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ addq($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowAddL_rReg_imm(rFlagsReg cr, rax_RegL op1, immL32 op2)
+%{
+  match(Set cr (OverflowAddL op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "addq    $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ addq($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubI_rReg(rFlagsReg cr, rRegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowSubI op1 op2));
+
+  format %{ "cmpl    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ cmpl($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2)
+%{
+  match(Set cr (OverflowSubI op1 op2));
+
+  format %{ "cmpl    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ cmpl($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubL_rReg(rFlagsReg cr, rRegL op1, rRegL op2)
+%{
+  match(Set cr (OverflowSubL op1 op2));
+
+  format %{ "cmpq    $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ cmpq($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowSubL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2)
+%{
+  match(Set cr (OverflowSubL op1 op2));
+
+  format %{ "cmpq    $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ cmpq($op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowNegI_rReg(rFlagsReg cr, immI0 zero, rax_RegI op2)
+%{
+  match(Set cr (OverflowSubI zero op2));
+  effect(DEF cr, USE_KILL op2);
+
+  format %{ "negl    $op2\t# overflow check int" %}
+  ins_encode %{
+    __ negl($op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowNegL_rReg(rFlagsReg cr, immL0 zero, rax_RegL op2)
+%{
+  match(Set cr (OverflowSubL zero op2));
+  effect(DEF cr, USE_KILL op2);
+
+  format %{ "negq    $op2\t# overflow check long" %}
+  ins_encode %{
+    __ negq($op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct overflowMulI_rReg(rFlagsReg cr, rax_RegI op1, rRegI op2)
+%{
+  match(Set cr (OverflowMulI op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "imull    $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ imull($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct overflowMulI_rReg_imm(rFlagsReg cr, rRegI op1, immI op2, rRegI tmp)
+%{
+  match(Set cr (OverflowMulI op1 op2));
+  effect(DEF cr, TEMP tmp, USE op1, USE op2);
+
+  format %{ "imull    $tmp, $op1, $op2\t# overflow check int" %}
+  ins_encode %{
+    __ imull($tmp$$Register, $op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct overflowMulL_rReg(rFlagsReg cr, rax_RegL op1, rRegL op2)
+%{
+  match(Set cr (OverflowMulL op1 op2));
+  effect(DEF cr, USE_KILL op1, USE op2);
+
+  format %{ "imulq    $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ imulq($op1$$Register, $op2$$Register);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
+instruct overflowMulL_rReg_imm(rFlagsReg cr, rRegL op1, immL32 op2, rRegL tmp)
+%{
+  match(Set cr (OverflowMulL op1 op2));
+  effect(DEF cr, TEMP tmp, USE op1, USE op2);
+
+  format %{ "imulq    $tmp, $op1, $op2\t# overflow check long" %}
+  ins_encode %{
+    __ imulq($tmp$$Register, $op1$$Register, $op2$$constant);
+  %}
+  ins_pipe(ialu_reg_reg_alu0);
+%}
+
 
 //----------Control Flow Instructions------------------------------------------
 // Signed compare Instructions
@@ -11453,27 +11129,25 @@
 // ============================================================================
 // inlined locking and unlocking
 
-instruct cmpFastLock(rFlagsReg cr,
-                     rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr)
-%{
+instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{
   match(Set cr (FastLock object box));
   effect(TEMP tmp, TEMP scr, USE_KILL box);
-
   ins_cost(300);
   format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %}
-  ins_encode(Fast_Lock(object, box, tmp, scr));
+  ins_encode %{
+    __ fast_lock($object$$Register, $box$$Register, $tmp$$Register, $scr$$Register, _counters);
+  %}
   ins_pipe(pipe_slow);
 %}
 
-instruct cmpFastUnlock(rFlagsReg cr,
-                       rRegP object, rax_RegP box, rRegP tmp)
-%{
+instruct cmpFastUnlock(rFlagsReg cr, rRegP object, rax_RegP box, rRegP tmp) %{
   match(Set cr (FastUnlock object box));
   effect(TEMP tmp, USE_KILL box);
-
   ins_cost(300);
   format %{ "fastunlock $object,$box\t! kills $box,$tmp" %}
-  ins_encode(Fast_Unlock(object, box, tmp));
+  ins_encode %{
+    __ fast_unlock($object$$Register, $box$$Register, $tmp$$Register);
+  %}
   ins_pipe(pipe_slow);
 %}
 
--- a/src/os/aix/vm/os_aix.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/os/aix/vm/os_aix.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1496,6 +1496,10 @@
   return res;
 }
 
+void* os::get_default_process_handle() {
+  return (void*)::dlopen(NULL, RTLD_LAZY);
+}
+
 void os::print_dll_info(outputStream *st) {
   st->print_cr("Dynamic libraries:");
   LoadedLibraries::print(st);
--- a/src/os/bsd/vm/os_bsd.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/os/bsd/vm/os_bsd.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1788,7 +1788,7 @@
         jrelib_p = buf + len;
         snprintf(jrelib_p, buflen-len, "/%s", COMPILER_VARIANT);
         if (0 != access(buf, F_OK)) {
-          snprintf(jrelib_p, buflen-len, "");
+          snprintf(jrelib_p, buflen-len, "%s", "");
         }
 
         // If the path exists within JAVA_HOME, add the JVM library name
--- a/src/share/vm/adlc/archDesc.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/adlc/archDesc.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1167,15 +1167,12 @@
          || strcmp(idealName,"CmpF") == 0
          || strcmp(idealName,"FastLock") == 0
          || strcmp(idealName,"FastUnlock") == 0
-         || strcmp(idealName,"AddExactI") == 0
-         || strcmp(idealName,"AddExactL") == 0
-         || strcmp(idealName,"SubExactI") == 0
-         || strcmp(idealName,"SubExactL") == 0
-         || strcmp(idealName,"MulExactI") == 0
-         || strcmp(idealName,"MulExactL") == 0
-         || strcmp(idealName,"NegExactI") == 0
-         || strcmp(idealName,"NegExactL") == 0
-         || strcmp(idealName,"FlagsProj") == 0
+         || strcmp(idealName,"OverflowAddI") == 0
+         || strcmp(idealName,"OverflowAddL") == 0
+         || strcmp(idealName,"OverflowSubI") == 0
+         || strcmp(idealName,"OverflowSubL") == 0
+         || strcmp(idealName,"OverflowMulI") == 0
+         || strcmp(idealName,"OverflowMulL") == 0
          || strcmp(idealName,"Bool") == 0
          || strcmp(idealName,"Binary") == 0 ) {
       // Removed ConI from the must_clone list.  CPUs that cannot use
--- a/src/share/vm/ci/ciClassList.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/ci/ciClassList.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -103,6 +103,7 @@
 friend class ciMethodType;             \
 friend class ciReceiverTypeData;       \
 friend class ciTypeEntries;            \
+friend class ciSpeculativeTrapData;    \
 friend class ciSymbol;                 \
 friend class ciArray;                  \
 friend class ciObjArray;               \
--- a/src/share/vm/ci/ciMethodData.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/ci/ciMethodData.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -78,6 +78,35 @@
   _parameters = NULL;
 }
 
+void ciMethodData::load_extra_data() {
+  MethodData* mdo = get_MethodData();
+
+  // speculative trap entries also hold a pointer to a Method so need to be translated
+  DataLayout* dp_src  = mdo->extra_data_base();
+  DataLayout* end_src = mdo->extra_data_limit();
+  DataLayout* dp_dst  = extra_data_base();
+  for (;; dp_src = MethodData::next_extra(dp_src), dp_dst = MethodData::next_extra(dp_dst)) {
+    assert(dp_src < end_src, "moved past end of extra data");
+    assert(dp_src->tag() == dp_dst->tag(), err_msg("should be same tags %d != %d", dp_src->tag(), dp_dst->tag()));
+    switch(dp_src->tag()) {
+    case DataLayout::speculative_trap_data_tag: {
+      ciSpeculativeTrapData* data_dst = new ciSpeculativeTrapData(dp_dst);
+      SpeculativeTrapData* data_src = new SpeculativeTrapData(dp_src);
+      data_dst->translate_from(data_src);
+      break;
+    }
+    case DataLayout::bit_data_tag:
+      break;
+    case DataLayout::no_tag:
+    case DataLayout::arg_info_data_tag:
+      // An empty slot or ArgInfoData entry marks the end of the trap data
+      return;
+    default:
+      fatal(err_msg("bad tag = %d", dp_src->tag()));
+    }
+  }
+}
+
 void ciMethodData::load_data() {
   MethodData* mdo = get_MethodData();
   if (mdo == NULL) {
@@ -116,6 +145,8 @@
     parameters->translate_from(mdo->parameters_type_data());
   }
 
+  load_extra_data();
+
   // Note:  Extra data are all BitData, and do not need translation.
   _current_mileage = MethodData::mileage_of(mdo->method());
   _invocation_counter = mdo->invocation_count();
@@ -156,6 +187,12 @@
   set_type(translate_klass(k));
 }
 
+void ciSpeculativeTrapData::translate_from(const ProfileData* data) {
+  Method* m = data->as_SpeculativeTrapData()->method();
+  ciMethod* ci_m = CURRENT_ENV->get_method(m);
+  set_method(ci_m);
+}
+
 // Get the data at an arbitrary (sort of) data index.
 ciProfileData* ciMethodData::data_at(int data_index) {
   if (out_of_bounds(data_index)) {
@@ -203,33 +240,65 @@
   return next;
 }
 
-// Translate a bci to its corresponding data, or NULL.
-ciProfileData* ciMethodData::bci_to_data(int bci) {
-  ciProfileData* data = data_before(bci);
-  for ( ; is_valid(data); data = next_data(data)) {
-    if (data->bci() == bci) {
-      set_hint_di(dp_to_di(data->dp()));
-      return data;
-    } else if (data->bci() > bci) {
-      break;
-    }
-  }
+ciProfileData* ciMethodData::bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots) {
   // bci_to_extra_data(bci) ...
   DataLayout* dp  = data_layout_at(data_size());
   DataLayout* end = data_layout_at(data_size() + extra_data_size());
-  for (; dp < end; dp = MethodData::next_extra(dp)) {
-    if (dp->tag() == DataLayout::no_tag) {
+  two_free_slots = false;
+  for (;dp < end; dp = MethodData::next_extra(dp)) {
+    switch(dp->tag()) {
+    case DataLayout::no_tag:
       _saw_free_extra_data = true;  // observed an empty slot (common case)
+      two_free_slots = (MethodData::next_extra(dp)->tag() == DataLayout::no_tag);
       return NULL;
+    case DataLayout::arg_info_data_tag:
+      return NULL; // ArgInfoData is at the end of extra data section.
+    case DataLayout::bit_data_tag:
+      if (m == NULL && dp->bci() == bci) {
+        return new ciBitData(dp);
+      }
+      break;
+    case DataLayout::speculative_trap_data_tag: {
+      ciSpeculativeTrapData* data = new ciSpeculativeTrapData(dp);
+      // data->method() might be null if the MDO is snapshotted
+      // concurrently with a trap
+      if (m != NULL && data->method() == m && dp->bci() == bci) {
+        return data;
+      }
+      break;
     }
-    if (dp->tag() == DataLayout::arg_info_data_tag) {
-      break; // ArgInfoData is at the end of extra data section.
+    default:
+      fatal(err_msg("bad tag = %d", dp->tag()));
     }
-    if (dp->bci() == bci) {
-      assert(dp->tag() == DataLayout::bit_data_tag, "sane");
-      return new ciBitData(dp);
+  }
+  return NULL;
+}
+
+// Translate a bci to its corresponding data, or NULL.
+ciProfileData* ciMethodData::bci_to_data(int bci, ciMethod* m) {
+  // If m is not NULL we look for a SpeculativeTrapData entry
+  if (m == NULL) {
+    ciProfileData* data = data_before(bci);
+    for ( ; is_valid(data); data = next_data(data)) {
+      if (data->bci() == bci) {
+        set_hint_di(dp_to_di(data->dp()));
+        return data;
+      } else if (data->bci() > bci) {
+        break;
+      }
     }
   }
+  bool two_free_slots = false;
+  ciProfileData* result = bci_to_extra_data(bci, m, two_free_slots);
+  if (result != NULL) {
+    return result;
+  }
+  if (m != NULL && !two_free_slots) {
+    // We were looking for a SpeculativeTrapData entry we didn't
+    // find. Room is not available for more SpeculativeTrapData
+    // entries, look in the non SpeculativeTrapData entries.
+    return bci_to_data(bci, NULL);
+  }
   return NULL;
 }
 
@@ -525,18 +594,25 @@
   st->print_cr("--- Extra data:");
   DataLayout* dp  = data_layout_at(data_size());
   DataLayout* end = data_layout_at(data_size() + extra_data_size());
-  for (; dp < end; dp = MethodData::next_extra(dp)) {
-    if (dp->tag() == DataLayout::no_tag)  continue;
-    if (dp->tag() == DataLayout::bit_data_tag) {
+  for (;; dp = MethodData::next_extra(dp)) {
+    assert(dp < end, "moved past end of extra data");
+    switch (dp->tag()) {
+    case DataLayout::no_tag:
+      continue;
+    case DataLayout::bit_data_tag:
       data = new BitData(dp);
-    } else {
-      assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo");
+      break;
+    case DataLayout::arg_info_data_tag:
       data = new ciArgInfoData(dp);
       dp = end; // ArgInfoData is at the end of extra data section.
+      break;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
     }
     st->print("%d", dp_to_di(data->dp()));
     st->fill_to(6);
     data->print_data_on(st);
+    if (dp >= end) return;
   }
 }
 
@@ -569,8 +645,8 @@
   st->cr();
 }
 
-void ciCallTypeData::print_data_on(outputStream* st) const {
-  print_shared(st, "ciCallTypeData");
+void ciCallTypeData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ciCallTypeData", extra);
   if (has_arguments()) {
     tab(st, true);
     st->print("argument types");
@@ -599,18 +675,18 @@
   }
 }
 
-void ciReceiverTypeData::print_data_on(outputStream* st) const {
-  print_shared(st, "ciReceiverTypeData");
+void ciReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ciReceiverTypeData", extra);
   print_receiver_data_on(st);
 }
 
-void ciVirtualCallData::print_data_on(outputStream* st) const {
-  print_shared(st, "ciVirtualCallData");
+void ciVirtualCallData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ciVirtualCallData", extra);
   rtd_super()->print_receiver_data_on(st);
 }
 
-void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
-  print_shared(st, "ciVirtualCallTypeData");
+void ciVirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ciVirtualCallTypeData", extra);
   rtd_super()->print_receiver_data_on(st);
   if (has_arguments()) {
     tab(st, true);
@@ -624,8 +700,15 @@
   }
 }
 
-void ciParametersTypeData::print_data_on(outputStream* st) const {
-  st->print_cr("Parametertypes");
+void ciParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
+  st->print_cr("ciParametersTypeData");
   parameters()->print_data_on(st);
 }
+
+void ciSpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
+  st->print_cr("ciSpeculativeTrapData");
+  tab(st);
+  method()->print_short_name(st);
+  st->cr();
+}
 #endif
--- a/src/share/vm/ci/ciMethodData.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/ci/ciMethodData.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -31,6 +31,7 @@
 #include "ci/ciUtilities.hpp"
 #include "oops/methodData.hpp"
 #include "oops/oop.inline.hpp"
+#include "runtime/deoptimization.hpp"
 
 class ciBitData;
 class ciCounterData;
@@ -44,6 +45,7 @@
 class ciCallTypeData;
 class ciVirtualCallTypeData;
 class ciParametersTypeData;
+class ciSpeculativeTrapData;;
 
 typedef ProfileData ciProfileData;
 
@@ -173,7 +175,7 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra) const;
 #endif
 };
 
@@ -200,7 +202,7 @@
   }
   void translate_receiver_data_from(const ProfileData* data);
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra) const;
   void print_receiver_data_on(outputStream* st) const;
 #endif
 };
@@ -225,7 +227,7 @@
     rtd_super()->translate_receiver_data_from(data);
   }
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra) const;
 #endif
 };
 
@@ -287,7 +289,7 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra) const;
 #endif
 };
 
@@ -336,7 +338,26 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra) const;
+#endif
+};
+
+class ciSpeculativeTrapData : public SpeculativeTrapData {
+public:
+  ciSpeculativeTrapData(DataLayout* layout) : SpeculativeTrapData(layout) {}
+
+  virtual void translate_from(const ProfileData* data);
+
+  ciMethod* method() const {
+    return (ciMethod*)intptr_at(method_offset);
+  }
+
+  void set_method(ciMethod* m) {
+    set_intptr_at(method_offset, (intptr_t)m);
+  }
+
+#ifndef PRODUCT
+  void print_data_on(outputStream* st, const char* extra) const;
 #endif
 };
 
@@ -436,6 +457,16 @@
 
   ciArgInfoData *arg_info() const;
 
+  address data_base() const {
+    return (address) _data;
+  }
+  DataLayout* limit_data_position() const {
+    return (DataLayout*)((address)data_base() + _data_size);
+  }
+
+  void load_extra_data();
+  ciProfileData* bci_to_extra_data(int bci, ciMethod* m, bool& two_free_slots);
+
 public:
   bool is_method_data() const { return true; }
 
@@ -475,9 +506,11 @@
   ciProfileData* next_data(ciProfileData* current);
   bool is_valid(ciProfileData* current) { return current != NULL; }
 
-  // Get the data at an arbitrary bci, or NULL if there is none.
-  ciProfileData* bci_to_data(int bci);
-  ciProfileData* bci_to_extra_data(int bci, bool create_if_missing);
+  DataLayout* extra_data_base() const { return limit_data_position(); }
+
+  // Get the data at an arbitrary bci, or NULL if there is none. If m
+  // is not NULL look for a SpeculativeTrapData if any first.
+  ciProfileData* bci_to_data(int bci, ciMethod* m = NULL);
 
   uint overflow_trap_count() const {
     return _orig.overflow_trap_count();
@@ -496,12 +529,13 @@
 
   // Helpful query functions that decode trap_state.
   int has_trap_at(ciProfileData* data, int reason);
-  int has_trap_at(int bci, int reason) {
-    return has_trap_at(bci_to_data(bci), reason);
+  int has_trap_at(int bci, ciMethod* m, int reason) {
+    assert((m != NULL) == Deoptimization::reason_is_speculate(reason), "inconsistent method/reason");
+    return has_trap_at(bci_to_data(bci, m), reason);
   }
   int trap_recompiled_at(ciProfileData* data);
-  int trap_recompiled_at(int bci) {
-    return trap_recompiled_at(bci_to_data(bci));
+  int trap_recompiled_at(int bci, ciMethod* m) {
+    return trap_recompiled_at(bci_to_data(bci, m));
   }
 
   void clear_escape_info();
--- a/src/share/vm/classfile/classLoaderData.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/classfile/classLoaderData.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -520,6 +520,13 @@
   }
 }
 
+bool ClassLoaderData::contains_klass(Klass* klass) {
+  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
+    if (k == klass) return true;
+  }
+  return false;
+}
+
 
 // GC root of class loader data created.
 ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
--- a/src/share/vm/classfile/classLoaderData.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/classfile/classLoaderData.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -260,6 +260,7 @@
   jobject add_handle(Handle h);
   void add_class(Klass* k);
   void remove_class(Klass* k);
+  bool contains_klass(Klass* k);
   void record_dependency(Klass* to, TRAPS);
   void init_dependencies(TRAPS);
 
--- a/src/share/vm/classfile/dictionary.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/classfile/dictionary.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -707,7 +707,7 @@
                 loader_data->class_loader() == NULL ||
                 loader_data->class_loader()->is_instance(),
                 "checking type of class_loader");
-      e->verify(/*check_dictionary*/false);
+      e->verify();
       probe->verify_protection_domain_set();
       element_count++;
     }
--- a/src/share/vm/classfile/systemDictionary.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/classfile/systemDictionary.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2650,23 +2650,6 @@
   constraints()->verify(dictionary(), placeholders());
 }
 
-
-void SystemDictionary::verify_obj_klass_present(Symbol* class_name,
-                                                ClassLoaderData* loader_data) {
-  GCMutexLocker mu(SystemDictionary_lock);
-  Symbol* name;
-
-  Klass* probe = find_class(class_name, loader_data);
-  if (probe == NULL) {
-    probe = SystemDictionary::find_shared_class(class_name);
-    if (probe == NULL) {
-      name = find_placeholder(class_name, loader_data);
-    }
-  }
-  guarantee(probe != NULL || name != NULL,
-            "Loaded klasses should be in SystemDictionary");
-}
-
 // utility function for class load event
 void SystemDictionary::post_class_load_event(const Ticks& start_time,
                                              instanceKlassHandle k,
--- a/src/share/vm/classfile/systemDictionary.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/classfile/systemDictionary.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -375,10 +375,6 @@
   static bool is_internal_format(Symbol* class_name);
 #endif
 
-  // Verify class is in dictionary
-  static void verify_obj_klass_present(Symbol* class_name,
-                                       ClassLoaderData* loader_data);
-
   // Initialization
   static void initialize(TRAPS);
 
--- a/src/share/vm/code/nmethod.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/code/nmethod.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -50,27 +50,6 @@
 
 // Only bother with this argument setup if dtrace is available
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL8(hotspot, compiled__method__load,
-  const char*, int, const char*, int, const char*, int, void*, size_t);
-
-HS_DTRACE_PROBE_DECL6(hotspot, compiled__method__unload,
-  char*, int, char*, int, char*, int);
-
-#define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
-  {                                                                       \
-    Method* m = (method);                                                 \
-    if (m != NULL) {                                                      \
-      Symbol* klass_name = m->klass_name();                               \
-      Symbol* name = m->name();                                           \
-      Symbol* signature = m->signature();                                 \
-      HS_DTRACE_PROBE6(hotspot, compiled__method__unload,                 \
-        klass_name->bytes(), klass_name->utf8_length(),                   \
-        name->bytes(), name->utf8_length(),                               \
-        signature->bytes(), signature->utf8_length());                    \
-    }                                                                     \
-  }
-#else /* USDT2 */
 #define DTRACE_METHOD_UNLOAD_PROBE(method)                                \
   {                                                                       \
     Method* m = (method);                                                 \
@@ -84,7 +63,6 @@
         (char *) signature->bytes(), signature->utf8_length());                    \
     }                                                                     \
   }
-#endif /* USDT2 */
 
 #else //  ndef DTRACE_ENABLED
 
@@ -1520,16 +1498,6 @@
 void nmethod::post_compiled_method_load_event() {
 
   Method* moop = method();
-#ifndef USDT2
-  HS_DTRACE_PROBE8(hotspot, compiled__method__load,
-      moop->klass_name()->bytes(),
-      moop->klass_name()->utf8_length(),
-      moop->name()->bytes(),
-      moop->name()->utf8_length(),
-      moop->signature()->bytes(),
-      moop->signature()->utf8_length(),
-      insts_begin(), insts_size());
-#else /* USDT2 */
   HOTSPOT_COMPILED_METHOD_LOAD(
       (char *) moop->klass_name()->bytes(),
       moop->klass_name()->utf8_length(),
@@ -1538,7 +1506,6 @@
       (char *) moop->signature()->bytes(),
       moop->signature()->utf8_length(),
       insts_begin(), insts_size());
-#endif /* USDT2 */
 
   if (JvmtiExport::should_post_compiled_method_load() ||
       JvmtiExport::should_post_compiled_method_unload()) {
--- a/src/share/vm/compiler/compileBroker.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/compiler/compileBroker.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -60,38 +60,6 @@
 
 // Only bother with this argument setup if dtrace is available
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL8(hotspot, method__compile__begin,
-  char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t);
-HS_DTRACE_PROBE_DECL9(hotspot, method__compile__end,
-  char*, intptr_t, char*, intptr_t, char*, intptr_t, char*, intptr_t, bool);
-
-#define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)             \
-  {                                                                      \
-    Symbol* klass_name = (method)->klass_name();                         \
-    Symbol* name = (method)->name();                                     \
-    Symbol* signature = (method)->signature();                           \
-    HS_DTRACE_PROBE8(hotspot, method__compile__begin,                    \
-      comp_name, strlen(comp_name),                                      \
-      klass_name->bytes(), klass_name->utf8_length(),                    \
-      name->bytes(), name->utf8_length(),                                \
-      signature->bytes(), signature->utf8_length());                     \
-  }
-
-#define DTRACE_METHOD_COMPILE_END_PROBE(method, comp_name, success)      \
-  {                                                                      \
-    Symbol* klass_name = (method)->klass_name();                         \
-    Symbol* name = (method)->name();                                     \
-    Symbol* signature = (method)->signature();                           \
-    HS_DTRACE_PROBE9(hotspot, method__compile__end,                      \
-      comp_name, strlen(comp_name),                                      \
-      klass_name->bytes(), klass_name->utf8_length(),                    \
-      name->bytes(), name->utf8_length(),                                \
-      signature->bytes(), signature->utf8_length(), (success));          \
-  }
-
-#else /* USDT2 */
-
 #define DTRACE_METHOD_COMPILE_BEGIN_PROBE(method, comp_name)             \
   {                                                                      \
     Symbol* klass_name = (method)->klass_name();                         \
@@ -115,7 +83,6 @@
       (char *) name->bytes(), name->utf8_length(),                       \
       (char *) signature->bytes(), signature->utf8_length(), (success)); \
   }
-#endif /* USDT2 */
 
 #else //  ndef DTRACE_ENABLED
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1730,8 +1730,8 @@
   _dictionary->return_chunk(chunk);
 #ifndef PRODUCT
   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
-    TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
-    TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
+    TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
+    TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
     tl->verify_stats();
   }
 #endif // PRODUCT
@@ -2541,10 +2541,10 @@
 
 #ifndef PRODUCT
 void CompactibleFreeListSpace::check_free_list_consistency() const {
-  assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
+  assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
     "Some sizes can't be allocated without recourse to"
     " linear allocation buffers");
-  assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
+  assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
     "else MIN_TREE_CHUNK_SIZE is wrong");
   assert(IndexSetStart != 0, "IndexSetStart not initialized");
   assert(IndexSetStride != 0, "IndexSetStride not initialized");
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -3035,7 +3035,6 @@
                                 true,   // activate StrongRootsScope
                                 SharedHeap::ScanningOption(roots_scanning_options()),
                                 &notOlder,
-                                true,   // walk code active on stacks
                                 NULL,
                                 NULL); // SSS: Provide correct closure
 
@@ -3102,7 +3101,6 @@
                                 true,   // activate StrongRootsScope
                                 SharedHeap::ScanningOption(roots_scanning_options()),
                                 &notOlder,
-                                true,   // walk code active on stacks
                                 NULL,
                                 &klass_closure);
 
@@ -3680,12 +3678,6 @@
   ResourceMark rm;
   HandleMark  hm;
 
-  FalseClosure falseClosure;
-  // In the case of a synchronous collection, we will elide the
-  // remark step, so it's important to catch all the nmethod oops
-  // in this step.
-  // The final 'true' flag to gen_process_strong_roots will ensure this.
-  // If 'async' is true, we can relax the nmethod tracing.
   MarkRefsIntoClosure notOlder(_span, &_markBitMap);
   GenCollectedHeap* gch = GenCollectedHeap::heap();
 
@@ -3738,7 +3730,6 @@
                                     true,   // activate StrongRootsScope
                                     SharedHeap::ScanningOption(roots_scanning_options()),
                                     &notOlder,
-                                    true,   // walk all of code cache if (so & SO_AllCodeCache)
                                     NULL,
                                     &klass_closure);
     }
@@ -5237,7 +5228,6 @@
                                 false,     // this is parallel code
                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
                                 &par_mri_cl,
-                                true,   // walk all of code cache if (so & SO_AllCodeCache)
                                 NULL,
                                 &klass_closure);
   assert(_collector->should_unload_classes()
@@ -5373,7 +5363,6 @@
                                 false,     // this is parallel code
                                 SharedHeap::ScanningOption(_collector->CMSCollector::roots_scanning_options()),
                                 &par_mrias_cl,
-                                true,   // walk all of code cache if (so & SO_AllCodeCache)
                                 NULL,
                                 NULL);     // The dirty klasses will be handled below
   assert(_collector->should_unload_classes()
@@ -5963,7 +5952,6 @@
                                   false, // use the local StrongRootsScope
                                   SharedHeap::ScanningOption(roots_scanning_options()),
                                   &mrias_cl,
-                                  true,   // walk code active on stacks
                                   NULL,
                                   NULL);  // The dirty klasses will be handled below
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1383,13 +1383,6 @@
 // Closures of various sorts used by CMS to accomplish its work
 //
 
-// This closure is used to check that a certain set of oops is empty.
-class FalseClosure: public OopClosure {
- public:
-  void do_oop(oop* p)       { guarantee(false, "Should be an empty set"); }
-  void do_oop(narrowOop* p) { guarantee(false, "Should be an empty set"); }
-};
-
 // This closure is used to do concurrent marking from the roots
 // following the first checkpoint.
 class MarkFromRootsClosure: public BitMapClosure {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -35,14 +35,6 @@
 #include "utilities/dtrace.hpp"
 
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__begin);
-HS_DTRACE_PROBE_DECL(hs_private, cms__initmark__end);
-
-HS_DTRACE_PROBE_DECL(hs_private, cms__remark__begin);
-HS_DTRACE_PROBE_DECL(hs_private, cms__remark__end);
-#endif /* !USDT2 */
-
 //////////////////////////////////////////////////////////
 // Methods in abstract class VM_CMS_Operation
 //////////////////////////////////////////////////////////
@@ -138,11 +130,7 @@
     // Nothing to do.
     return;
   }
-#ifndef USDT2
-  HS_DTRACE_PROBE(hs_private, cms__initmark__begin);
-#else /* USDT2 */
   HS_PRIVATE_CMS_INITMARK_BEGIN();
-#endif /* USDT2 */
 
   _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark");
 
@@ -158,11 +146,7 @@
 
   _collector->_gc_timer_cm->register_gc_pause_end();
 
-#ifndef USDT2
-  HS_DTRACE_PROBE(hs_private, cms__initmark__end);
-#else /* USDT2 */
   HS_PRIVATE_CMS_INITMARK_END();
-#endif /* USDT2 */
 }
 
 //////////////////////////////////////////////////////////
@@ -173,11 +157,7 @@
     // Nothing to do.
     return;
   }
-#ifndef USDT2
-  HS_DTRACE_PROBE(hs_private, cms__remark__begin);
-#else /* USDT2 */
   HS_PRIVATE_CMS_REMARK_BEGIN();
-#endif /* USDT2 */
 
   _collector->_gc_timer_cm->register_gc_pause_start("Final Mark");
 
@@ -194,11 +174,7 @@
   _collector->save_heap_summary();
   _collector->_gc_timer_cm->register_gc_pause_end();
 
-#ifndef USDT2
-  HS_DTRACE_PROBE(hs_private, cms__remark__end);
-#else /* USDT2 */
   HS_PRIVATE_CMS_REMARK_END();
-#endif /* USDT2 */
 }
 
 // VM operation to invoke a concurrent collection of a
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmStructs_cms.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -41,7 +41,7 @@
   nonstatic_field(LinearAllocBlock,            _word_size,                                    size_t)                                \
   nonstatic_field(AFLBinaryTreeDictionary,     _total_size,                                   size_t)                                \
   nonstatic_field(CompactibleFreeListSpace,    _dictionary,                                   AFLBinaryTreeDictionary*)              \
-  nonstatic_field(CompactibleFreeListSpace,    _indexedFreeList[0],                           FreeList<FreeChunk>)                   \
+  nonstatic_field(CompactibleFreeListSpace,    _indexedFreeList[0],                           AdaptiveFreeList<FreeChunk>)           \
   nonstatic_field(CompactibleFreeListSpace,    _smallLinearAllocBlock,                        LinearAllocBlock)
 
 
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -3394,13 +3394,12 @@
 
     if (!silent) { gclog_or_tty->print("Roots "); }
     VerifyRootsClosure rootsCl(vo);
-    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
-    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
     VerifyKlassClosure klassCl(this, &rootsCl);
 
     // We apply the relevant closures to all the oops in the
-    // system dictionary, the string table and the code cache.
-    const int so = SO_AllClasses | SO_Strings | SO_AllCodeCache;
+    // system dictionary, class loader data graph and the string table.
+    // Don't verify the code cache here, since it's verified below.
+    const int so = SO_AllClasses | SO_Strings;
 
     // Need cleared claim bits for the strong roots processing
     ClassLoaderDataGraph::clear_claimed_marks();
@@ -3408,10 +3407,14 @@
     process_strong_roots(true,      // activate StrongRootsScope
                          ScanningOption(so),  // roots scanning options
                          &rootsCl,
-                         &blobsCl,
                          &klassCl
                          );
 
+    // Verify the nmethods in the code cache.
+    G1VerifyCodeRootOopClosure codeRootsCl(this, &rootsCl, vo);
+    G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl);
+    CodeCache::blobs_do(&blobsCl);
+
     bool failures = rootsCl.failures() || codeRootsCl.failures();
 
     if (vo != VerifyOption_G1UseMarkWord) {
@@ -5115,12 +5118,9 @@
 
   BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
 
-  CodeBlobToOopClosure scan_code_roots(&buf_scan_non_heap_roots, true /* do_marking */);
-
   process_strong_roots(false, // no scoping; this is parallel code
                        so,
                        &buf_scan_non_heap_roots,
-                       &scan_code_roots,
                        scan_klasses
                        );
 
@@ -5180,12 +5180,6 @@
   _process_strong_tasks->all_tasks_completed();
 }
 
-void
-G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure) {
-  CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
-  SharedHeap::process_weak_roots(root_closure, &roots_in_blobs);
-}
-
 class G1StringSymbolTableUnlinkTask : public AbstractGangTask {
 private:
   BoolObjectClosure* _is_alive;
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -833,11 +833,6 @@
                                G1KlassScanClosure* scan_klasses,
                                int worker_i);
 
-  // Apply "blk" to all the weak roots of the system.  These include
-  // JNI weak roots, the code cache, system dictionary, symbol table,
-  // string table, and referents of reachable weak refs.
-  void g1_process_weak_roots(OopClosure* root_closure);
-
   // Frees a non-humongous region by initializing its contents and
   // adding it to the free list that's passed as a parameter (this is
   // usually a local list which will be appended to the master free
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -133,7 +133,6 @@
   sh->process_strong_roots(true,  // activate StrongRootsScope
                            SharedHeap::SO_SystemClasses,
                            &GenMarkSweep::follow_root_closure,
-                           &GenMarkSweep::follow_code_root_closure,
                            &GenMarkSweep::follow_klass_closure);
 
   // Process reference objects found during marking
@@ -307,9 +306,8 @@
   ClassLoaderDataGraph::clear_claimed_marks();
 
   sh->process_strong_roots(true,  // activate StrongRootsScope
-                           SharedHeap::SO_AllClasses,
+                           SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
                            &GenMarkSweep::adjust_pointer_closure,
-                           NULL,  // do not touch code cache here
                            &GenMarkSweep::adjust_klass_closure);
 
   assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
@@ -317,7 +315,7 @@
 
   // Now adjust pointers in remaining weak roots.  (All of which should
   // have been cleared if they pointed to non-surviving objects.)
-  g1h->g1_process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
+  sh->process_weak_roots(&GenMarkSweep::adjust_pointer_closure);
 
   GenMarkSweep::adjust_marks();
 
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -621,7 +621,6 @@
                                 false, // no scope; this is parallel code
                                 SharedHeap::ScanningOption(so),
                                 &par_scan_state.to_space_root_closure(),
-                                true,   // walk *all* scavengable nmethods
                                 &par_scan_state.older_gen_closure(),
                                 &klass_scan_closure);
   par_scan_state.end_strong_roots();
--- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -47,7 +47,6 @@
 SerialOldTracer*        MarkSweep::_gc_tracer       = NULL;
 
 MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
-CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true);
 
 void MarkSweep::FollowRootClosure::do_oop(oop* p)       { follow_root(p); }
 void MarkSweep::FollowRootClosure::do_oop(narrowOop* p) { follow_root(p); }
--- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -143,7 +143,6 @@
   // Public closures
   static IsAliveClosure       is_alive;
   static FollowRootClosure    follow_root_closure;
-  static CodeBlobToOopClosure follow_code_root_closure; // => follow_root_closure
   static MarkAndPushClosure   mark_and_push_closure;
   static FollowKlassClosure   follow_klass_closure;
   static FollowStackClosure   follow_stack_closure;
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -41,33 +41,18 @@
 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
 #endif // INCLUDE_ALL_GCS
 
-#ifndef USDT2
-HS_DTRACE_PROBE_DECL1(hotspot, gc__begin, bool);
-HS_DTRACE_PROBE_DECL(hotspot, gc__end);
-#endif /* !USDT2 */
-
 // The same dtrace probe can't be inserted in two different files, so we
 // have to call it here, so it's only in one file.  Can't create new probes
 // for the other file anymore.   The dtrace probes have to remain stable.
 void VM_GC_Operation::notify_gc_begin(bool full) {
-#ifndef USDT2
-  HS_DTRACE_PROBE1(hotspot, gc__begin, full);
-  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
-#else /* USDT2 */
   HOTSPOT_GC_BEGIN(
                    full);
   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
-#endif /* USDT2 */
 }
 
 void VM_GC_Operation::notify_gc_end() {
-#ifndef USDT2
-  HS_DTRACE_PROBE(hotspot, gc__end);
-  HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
-#else /* USDT2 */
   HOTSPOT_GC_END();
   HS_DTRACE_WORKAROUND_TAIL_CALL_BUG();
-#endif /* USDT2 */
 }
 
 void VM_GC_Operation::acquire_pending_list_lock() {
--- a/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -415,10 +415,10 @@
  * On some architectures/platforms it should be possible to do this implicitly
  */
 #undef CHECK_NULL
-#define CHECK_NULL(obj_)                                                                       \
-        if ((obj_) == NULL) {                                                                  \
-          VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap); \
-        }                                                                                      \
+#define CHECK_NULL(obj_)                                                                         \
+        if ((obj_) == NULL) {                                                                    \
+          VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \
+        }                                                                                        \
         VERIFY_OOP(obj_)
 
 #define VMdoubleConstZero() 0.0
--- a/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -596,7 +596,7 @@
     if (data != NULL) {
       st->print("  %d", mdo->dp_to_di(data->dp()));
       st->fill_to(6);
-      data->print_data_on(st);
+      data->print_data_on(st, mdo);
     }
   }
 }
--- a/src/share/vm/memory/binaryTreeDictionary.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/memory/binaryTreeDictionary.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -44,16 +44,16 @@
 // This is currently used in the Concurrent Mark&Sweep implementation.
 ////////////////////////////////////////////////////////////////////////////////
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t TreeChunk<Chunk_t, FreeList_t>::_min_tree_chunk_size = sizeof(TreeChunk<Chunk_t,  FreeList_t>)/HeapWordSize;
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>* TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(Chunk_t* fc) {
   // Do some assertion checking here.
   return (TreeChunk<Chunk_t, FreeList_t>*) fc;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void TreeChunk<Chunk_t, FreeList_t>::verify_tree_chunk_list() const {
   TreeChunk<Chunk_t, FreeList_t>* nextTC = (TreeChunk<Chunk_t, FreeList_t>*)next();
   if (prev() != NULL) { // interior list node shouldn't have tree fields
@@ -67,11 +67,11 @@
   }
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>::TreeList() : _parent(NULL),
   _left(NULL), _right(NULL) {}
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>*
 TreeList<Chunk_t, FreeList_t>::as_TreeList(TreeChunk<Chunk_t,FreeList_t>* tc) {
   // This first free chunk in the list will be the tree list.
@@ -88,20 +88,7 @@
   return tl;
 }
 
-
-template <class Chunk_t, template <class> class FreeList_t>
-TreeList<Chunk_t, FreeList_t>*
-get_chunk(size_t size, enum FreeBlockDictionary<Chunk_t>::Dither dither) {
-  FreeBlockDictionary<Chunk_t>::verify_par_locked();
-  Chunk_t* res = get_chunk_from_tree(size, dither);
-  assert(res == NULL || res->is_free(),
-         "Should be returning a free chunk");
-  assert(dither != FreeBlockDictionary<Chunk_t>::exactly ||
-         res->size() == size, "Not correct size");
-  return res;
-}
-
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>*
 TreeList<Chunk_t, FreeList_t>::as_TreeList(HeapWord* addr, size_t size) {
   TreeChunk<Chunk_t, FreeList_t>* tc = (TreeChunk<Chunk_t, FreeList_t>*) addr;
@@ -125,17 +112,17 @@
 // an over populated size.  The general get_better_list() just returns
 // the current list.
 template <>
-TreeList<FreeChunk, AdaptiveFreeList>*
-TreeList<FreeChunk, AdaptiveFreeList>::get_better_list(
-  BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList>* dictionary) {
+TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
+TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
+  BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
   // A candidate chunk has been found.  If it is already under
   // populated, get a chunk associated with the hint for this
   // chunk.
 
-  TreeList<FreeChunk, ::AdaptiveFreeList>* curTL = this;
+  TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
   if (surplus() <= 0) {
     /* Use the hint to find a size with a surplus, and reset the hint. */
-    TreeList<FreeChunk, ::AdaptiveFreeList>* hintTL = this;
+    TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
     while (hintTL->hint() != 0) {
       assert(hintTL->hint() > hintTL->size(),
         "hint points in the wrong direction");
@@ -163,14 +150,14 @@
 }
 #endif // INCLUDE_ALL_GCS
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>*
 TreeList<Chunk_t, FreeList_t>::get_better_list(
   BinaryTreeDictionary<Chunk_t, FreeList_t>* dictionary) {
   return this;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::remove_chunk_replace_if_needed(TreeChunk<Chunk_t, FreeList_t>* tc) {
 
   TreeList<Chunk_t, FreeList_t>* retTL = this;
@@ -286,7 +273,7 @@
   return retTL;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void TreeList<Chunk_t, FreeList_t>::return_chunk_at_tail(TreeChunk<Chunk_t, FreeList_t>* chunk) {
   assert(chunk != NULL, "returning NULL chunk");
   assert(chunk->list() == this, "list should be set for chunk");
@@ -301,7 +288,7 @@
   this->link_tail(chunk);
 
   assert(!tail() || size() == tail()->size(), "Wrong sized chunk in list");
-  FreeList_t<Chunk_t>::increment_count();
+  FreeList_t::increment_count();
   debug_only(this->increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
@@ -311,7 +298,7 @@
 // is defined to be after the chunk pointer to by head().  This is
 // because the TreeList<Chunk_t, FreeList_t> is embedded in the first TreeChunk<Chunk_t, FreeList_t> in the
 // list.  See the definition of TreeChunk<Chunk_t, FreeList_t>.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void TreeList<Chunk_t, FreeList_t>::return_chunk_at_head(TreeChunk<Chunk_t, FreeList_t>* chunk) {
   assert(chunk->list() == this, "list should be set for chunk");
   assert(head() != NULL, "The tree list is embedded in the first chunk");
@@ -329,13 +316,13 @@
   }
   head()->link_after(chunk);
   assert(!head() || size() == head()->size(), "Wrong sized chunk in list");
-  FreeList_t<Chunk_t>::increment_count();
+  FreeList_t::increment_count();
   debug_only(this->increment_returned_bytes_by(chunk->size()*sizeof(HeapWord));)
   assert(head() == NULL || head()->prev() == NULL, "list invariant");
   assert(tail() == NULL || tail()->next() == NULL, "list invariant");
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void TreeChunk<Chunk_t, FreeList_t>::assert_is_mangled() const {
   assert((ZapUnusedHeapArea &&
           SpaceMangler::is_mangled((HeapWord*) Chunk_t::size_addr()) &&
@@ -345,14 +332,14 @@
     "Space should be clear or mangled");
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::head_as_TreeChunk() {
   assert(head() == NULL || (TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head())->list() == this),
     "Wrong type of chunk?");
   return TreeChunk<Chunk_t, FreeList_t>::as_TreeChunk(head());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::first_available() {
   assert(head() != NULL, "The head of the list cannot be NULL");
   Chunk_t* fc = head()->next();
@@ -369,7 +356,7 @@
 // Returns the block with the largest heap address amongst
 // those in the list for this size; potentially slow and expensive,
 // use with caution!
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>* TreeList<Chunk_t, FreeList_t>::largest_address() {
   assert(head() != NULL, "The head of the list cannot be NULL");
   Chunk_t* fc = head()->next();
@@ -392,7 +379,7 @@
   return retTC;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 BinaryTreeDictionary<Chunk_t, FreeList_t>::BinaryTreeDictionary(MemRegion mr) {
   assert((mr.byte_size() > min_size()), "minimum chunk size");
 
@@ -405,17 +392,17 @@
   assert(total_free_blocks() == 1, "reset check failed");
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::inc_total_size(size_t inc) {
   _total_size = _total_size + inc;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::dec_total_size(size_t dec) {
   _total_size = _total_size - dec;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(MemRegion mr) {
   assert((mr.byte_size() > min_size()), "minimum chunk size");
   set_root(TreeList<Chunk_t, FreeList_t>::as_TreeList(mr.start(), mr.word_size()));
@@ -423,13 +410,13 @@
   set_total_free_blocks(1);
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset(HeapWord* addr, size_t byte_size) {
   MemRegion mr(addr, heap_word_size(byte_size));
   reset(mr);
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::reset() {
   set_root(NULL);
   set_total_size(0);
@@ -437,7 +424,7 @@
 }
 
 // Get a free block of size at least size from tree, or NULL.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>*
 BinaryTreeDictionary<Chunk_t, FreeList_t>::get_chunk_from_tree(
                               size_t size,
@@ -496,7 +483,7 @@
   return retTC;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_list(size_t size) const {
   TreeList<Chunk_t, FreeList_t>* curTL;
   for (curTL = root(); curTL != NULL;) {
@@ -515,7 +502,7 @@
 }
 
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 bool BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_chunk_in_free_list(Chunk_t* tc) const {
   size_t size = tc->size();
   TreeList<Chunk_t, FreeList_t>* tl = find_list(size);
@@ -526,7 +513,7 @@
   }
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_largest_dict() const {
   TreeList<Chunk_t, FreeList_t> *curTL = root();
   if (curTL != NULL) {
@@ -541,7 +528,7 @@
 // chunk in a list on a tree node, just unlink it.
 // If it is the last chunk in the list (the next link is NULL),
 // remove the node and repair the tree.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeChunk<Chunk_t, FreeList_t>*
 BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_chunk_from_tree(TreeChunk<Chunk_t, FreeList_t>* tc) {
   assert(tc != NULL, "Should not call with a NULL chunk");
@@ -682,7 +669,7 @@
 // Remove the leftmost node (lm) in the tree and return it.
 // If lm has a right child, link it to the left node of
 // the parent of lm.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 TreeList<Chunk_t, FreeList_t>* BinaryTreeDictionary<Chunk_t, FreeList_t>::remove_tree_minimum(TreeList<Chunk_t, FreeList_t>* tl) {
   assert(tl != NULL && tl->parent() != NULL, "really need a proper sub-tree");
   // locate the subtree minimum by walking down left branches
@@ -717,7 +704,7 @@
   return curTL;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::insert_chunk_in_tree(Chunk_t* fc) {
   TreeList<Chunk_t, FreeList_t> *curTL, *prevTL;
   size_t size = fc->size();
@@ -783,7 +770,7 @@
   }
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::max_chunk_size() const {
   FreeBlockDictionary<Chunk_t>::verify_par_locked();
   TreeList<Chunk_t, FreeList_t>* tc = root();
@@ -792,7 +779,7 @@
   return tc->size();
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_list_length(TreeList<Chunk_t, FreeList_t>* tl) const {
   size_t res;
   res = tl->count();
@@ -805,7 +792,7 @@
   return res;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_size_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
   if (tl == NULL)
     return 0;
@@ -814,7 +801,7 @@
          total_size_in_tree(tl->right());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 double BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_of_squared_block_sizes(TreeList<Chunk_t, FreeList_t>* const tl) const {
   if (tl == NULL) {
     return 0.0;
@@ -826,7 +813,7 @@
   return curr;
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_free_blocks_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
   if (tl == NULL)
     return 0;
@@ -835,14 +822,14 @@
          total_free_blocks_in_tree(tl->right());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::num_free_blocks() const {
   assert(total_free_blocks_in_tree(root()) == total_free_blocks(),
          "_total_free_blocks inconsistency");
   return total_free_blocks();
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
   if (tl == NULL)
     return 0;
@@ -850,12 +837,12 @@
                   tree_height_helper(tl->right()));
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::tree_height() const {
   return tree_height_helper(root());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
   if (tl == NULL) {
     return 0;
@@ -864,18 +851,18 @@
     total_nodes_helper(tl->right());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_nodes_in_tree(TreeList<Chunk_t, FreeList_t>* tl) const {
   return total_nodes_helper(root());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::dict_census_update(size_t size, bool split, bool birth){}
 
 #if INCLUDE_ALL_GCS
 template <>
-void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth){
-  TreeList<FreeChunk, AdaptiveFreeList>* nd = find_list(size);
+void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
+  TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
   if (nd) {
     if (split) {
       if (birth) {
@@ -903,7 +890,7 @@
 }
 #endif // INCLUDE_ALL_GCS
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 bool BinaryTreeDictionary<Chunk_t, FreeList_t>::coal_dict_over_populated(size_t size) {
   // For the general type of freelists, encourage coalescing by
   // returning true.
@@ -915,7 +902,7 @@
 bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
   if (FLSAlwaysCoalesceLarge) return true;
 
-  TreeList<FreeChunk, AdaptiveFreeList>* list_of_size = find_list(size);
+  TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
   // None of requested size implies overpopulated.
   return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
          list_of_size->count() > list_of_size->coal_desired();
@@ -928,15 +915,15 @@
 //   do_tree() walks the nodes in the binary tree applying do_list()
 //     to each list at each node.
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class TreeCensusClosure : public StackObj {
  protected:
-  virtual void do_list(FreeList_t<Chunk_t>* fl) = 0;
+  virtual void do_list(FreeList_t* fl) = 0;
  public:
   virtual void do_tree(TreeList<Chunk_t, FreeList_t>* tl) = 0;
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class AscendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
  public:
   void do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@@ -948,7 +935,7 @@
   }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class DescendTreeCensusClosure : public TreeCensusClosure<Chunk_t, FreeList_t> {
  public:
   void do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@@ -962,7 +949,7 @@
 
 // For each list in the tree, calculate the desired, desired
 // coalesce, count before sweep, and surplus before sweep.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class BeginSweepClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   double _percentage;
   float _inter_sweep_current;
@@ -995,16 +982,16 @@
 // Similar to TreeCensusClosure but searches the
 // tree and returns promptly when found.
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class TreeSearchClosure : public StackObj {
  protected:
-  virtual bool do_list(FreeList_t<Chunk_t>* fl) = 0;
+  virtual bool do_list(FreeList_t* fl) = 0;
  public:
   virtual bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) = 0;
 };
 
 #if 0 //  Don't need this yet but here for symmetry.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class AscendTreeSearchClosure : public TreeSearchClosure<Chunk_t> {
  public:
   bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@@ -1018,7 +1005,7 @@
 };
 #endif
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class DescendTreeSearchClosure : public TreeSearchClosure<Chunk_t, FreeList_t> {
  public:
   bool do_tree(TreeList<Chunk_t, FreeList_t>* tl) {
@@ -1033,14 +1020,14 @@
 
 // Searches the tree for a chunk that ends at the
 // specified address.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class EndTreeSearchClosure : public DescendTreeSearchClosure<Chunk_t, FreeList_t> {
   HeapWord* _target;
   Chunk_t* _found;
 
  public:
   EndTreeSearchClosure(HeapWord* target) : _target(target), _found(NULL) {}
-  bool do_list(FreeList_t<Chunk_t>* fl) {
+  bool do_list(FreeList_t* fl) {
     Chunk_t* item = fl->head();
     while (item != NULL) {
       if (item->end() == (uintptr_t*) _target) {
@@ -1054,7 +1041,7 @@
   Chunk_t* found() { return _found; }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 Chunk_t* BinaryTreeDictionary<Chunk_t, FreeList_t>::find_chunk_ends_at(HeapWord* target) const {
   EndTreeSearchClosure<Chunk_t, FreeList_t> etsc(target);
   bool found_target = etsc.do_tree(root());
@@ -1063,7 +1050,7 @@
   return etsc.found();
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::begin_sweep_dict_census(double coalSurplusPercent,
   float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
   BeginSweepClosure<Chunk_t, FreeList_t> bsc(coalSurplusPercent, inter_sweep_current,
@@ -1075,32 +1062,32 @@
 // Closures and methods for calculating total bytes returned to the
 // free lists in the tree.
 #ifndef PRODUCT
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class InitializeDictReturnedBytesClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
    public:
-  void do_list(FreeList_t<Chunk_t>* fl) {
+  void do_list(FreeList_t* fl) {
     fl->set_returned_bytes(0);
   }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::initialize_dict_returned_bytes() {
   InitializeDictReturnedBytesClosure<Chunk_t, FreeList_t> idrb;
   idrb.do_tree(root());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class ReturnedBytesClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   size_t _dict_returned_bytes;
  public:
   ReturnedBytesClosure() { _dict_returned_bytes = 0; }
-  void do_list(FreeList_t<Chunk_t>* fl) {
+  void do_list(FreeList_t* fl) {
     _dict_returned_bytes += fl->returned_bytes();
   }
   size_t dict_returned_bytes() { return _dict_returned_bytes; }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::sum_dict_returned_bytes() {
   ReturnedBytesClosure<Chunk_t, FreeList_t> rbc;
   rbc.do_tree(root());
@@ -1109,17 +1096,17 @@
 }
 
 // Count the number of entries in the tree.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class treeCountClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t> {
  public:
   uint count;
   treeCountClosure(uint c) { count = c; }
-  void do_list(FreeList_t<Chunk_t>* fl) {
+  void do_list(FreeList_t* fl) {
     count++;
   }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::total_count() {
   treeCountClosure<Chunk_t, FreeList_t> ctc(0);
   ctc.do_tree(root());
@@ -1128,7 +1115,7 @@
 #endif // PRODUCT
 
 // Calculate surpluses for the lists in the tree.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class setTreeSurplusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   double percentage;
  public:
@@ -1144,14 +1131,14 @@
 #endif // INCLUDE_ALL_GCS
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::set_tree_surplus(double splitSurplusPercent) {
   setTreeSurplusClosure<Chunk_t, FreeList_t> sts(splitSurplusPercent);
   sts.do_tree(root());
 }
 
 // Set hints for the lists in the tree.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class setTreeHintsClosure : public DescendTreeCensusClosure<Chunk_t, FreeList_t> {
   size_t hint;
  public:
@@ -1170,14 +1157,14 @@
 #endif // INCLUDE_ALL_GCS
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::set_tree_hints(void) {
   setTreeHintsClosure<Chunk_t, FreeList_t> sth(0);
   sth.do_tree(root());
 }
 
 // Save count before previous sweep and splits and coalesces.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class clearTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   void do_list(FreeList<Chunk_t>* fl) {}
 
@@ -1192,14 +1179,14 @@
 #endif // INCLUDE_ALL_GCS
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::clear_tree_census(void) {
   clearTreeCensusClosure<Chunk_t, FreeList_t> ctc;
   ctc.do_tree(root());
 }
 
 // Do reporting and post sweep clean up.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::end_sweep_dict_census(double splitSurplusPercent) {
   // Does walking the tree 3 times hurt?
   set_tree_surplus(splitSurplusPercent);
@@ -1211,7 +1198,7 @@
 }
 
 // Print summary statistics
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::report_statistics() const {
   FreeBlockDictionary<Chunk_t>::verify_par_locked();
   gclog_or_tty->print("Statistics for BinaryTreeDictionary:\n"
@@ -1230,22 +1217,22 @@
 // Print census information - counts, births, deaths, etc.
 // for each list in the tree.  Also print some summary
 // information.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class PrintTreeCensusClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   int _print_line;
   size_t _total_free;
-  FreeList_t<Chunk_t> _total;
+  FreeList_t _total;
 
  public:
   PrintTreeCensusClosure() {
     _print_line = 0;
     _total_free = 0;
   }
-  FreeList_t<Chunk_t>* total() { return &_total; }
+  FreeList_t* total() { return &_total; }
   size_t total_free() { return _total_free; }
   void do_list(FreeList<Chunk_t>* fl) {
     if (++_print_line >= 40) {
-      FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
+      FreeList_t::print_labels_on(gclog_or_tty, "size");
       _print_line = 0;
     }
     fl->print_on(gclog_or_tty);
@@ -1256,7 +1243,7 @@
 #if INCLUDE_ALL_GCS
   void do_list(AdaptiveFreeList<Chunk_t>* fl) {
     if (++_print_line >= 40) {
-      FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
+      FreeList_t::print_labels_on(gclog_or_tty, "size");
       _print_line = 0;
     }
     fl->print_on(gclog_or_tty);
@@ -1275,16 +1262,16 @@
 #endif // INCLUDE_ALL_GCS
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_dict_census(void) const {
 
   gclog_or_tty->print("\nBinaryTree\n");
-  FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, "size");
+  FreeList_t::print_labels_on(gclog_or_tty, "size");
   PrintTreeCensusClosure<Chunk_t, FreeList_t> ptc;
   ptc.do_tree(root());
 
-  FreeList_t<Chunk_t>* total = ptc.total();
-  FreeList_t<Chunk_t>::print_labels_on(gclog_or_tty, " ");
+  FreeList_t* total = ptc.total();
+  FreeList_t::print_labels_on(gclog_or_tty, " ");
 }
 
 #if INCLUDE_ALL_GCS
@@ -1293,7 +1280,7 @@
 
   gclog_or_tty->print("\nBinaryTree\n");
   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
-  PrintTreeCensusClosure<FreeChunk, AdaptiveFreeList> ptc;
+  PrintTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > ptc;
   ptc.do_tree(root());
 
   AdaptiveFreeList<FreeChunk>* total = ptc.total();
@@ -1311,7 +1298,7 @@
 }
 #endif // INCLUDE_ALL_GCS
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class PrintFreeListsClosure : public AscendTreeCensusClosure<Chunk_t, FreeList_t> {
   outputStream* _st;
   int _print_line;
@@ -1321,9 +1308,9 @@
     _st = st;
     _print_line = 0;
   }
-  void do_list(FreeList_t<Chunk_t>* fl) {
+  void do_list(FreeList_t* fl) {
     if (++_print_line >= 40) {
-      FreeList_t<Chunk_t>::print_labels_on(_st, "size");
+      FreeList_t::print_labels_on(_st, "size");
       _print_line = 0;
     }
     fl->print_on(gclog_or_tty);
@@ -1337,10 +1324,10 @@
   }
 };
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::print_free_lists(outputStream* st) const {
 
-  FreeList_t<Chunk_t>::print_labels_on(st, "size");
+  FreeList_t::print_labels_on(st, "size");
   PrintFreeListsClosure<Chunk_t, FreeList_t> pflc(st);
   pflc.do_tree(root());
 }
@@ -1349,7 +1336,7 @@
 // . _root has no parent
 // . parent and child point to each other
 // . each node's key correctly related to that of its child(ren)
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree() const {
   guarantee(root() == NULL || total_free_blocks() == 0 ||
     total_size() != 0, "_total_size shouldn't be 0?");
@@ -1357,7 +1344,7 @@
   verify_tree_helper(root());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 size_t BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_prev_free_ptrs(TreeList<Chunk_t, FreeList_t>* tl) {
   size_t ct = 0;
   for (Chunk_t* curFC = tl->head(); curFC != NULL; curFC = curFC->next()) {
@@ -1371,7 +1358,7 @@
 // Note: this helper is recursive rather than iterative, so use with
 // caution on very deep trees; and watch out for stack overflow errors;
 // In general, to be used only for debugging.
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify_tree_helper(TreeList<Chunk_t, FreeList_t>* tl) const {
   if (tl == NULL)
     return;
@@ -1400,25 +1387,25 @@
   verify_tree_helper(tl->right());
 }
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 void BinaryTreeDictionary<Chunk_t, FreeList_t>::verify() const {
   verify_tree();
   guarantee(total_size() == total_size_in_tree(root()), "Total Size inconsistency");
 }
 
-template class TreeList<Metablock, FreeList>;
-template class BinaryTreeDictionary<Metablock, FreeList>;
-template class TreeChunk<Metablock, FreeList>;
+template class TreeList<Metablock, FreeList<Metablock> >;
+template class BinaryTreeDictionary<Metablock, FreeList<Metablock> >;
+template class TreeChunk<Metablock, FreeList<Metablock> >;
 
-template class TreeList<Metachunk, FreeList>;
-template class BinaryTreeDictionary<Metachunk, FreeList>;
-template class TreeChunk<Metachunk, FreeList>;
+template class TreeList<Metachunk, FreeList<Metachunk> >;
+template class BinaryTreeDictionary<Metachunk, FreeList<Metachunk> >;
+template class TreeChunk<Metachunk, FreeList<Metachunk> >;
 
 
 #if INCLUDE_ALL_GCS
 // Explicitly instantiate these types for FreeChunk.
-template class TreeList<FreeChunk, AdaptiveFreeList>;
-template class BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>;
-template class TreeChunk<FreeChunk, AdaptiveFreeList>;
+template class TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >;
+template class BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> >;
+template class TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >;
 
 #endif // INCLUDE_ALL_GCS
--- a/src/share/vm/memory/binaryTreeDictionary.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/memory/binaryTreeDictionary.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -37,18 +37,18 @@
 // A TreeList is a FreeList which can be used to maintain a
 // binary tree of free lists.
 
-template <class Chunk_t, template <class> class FreeList_t> class TreeChunk;
-template <class Chunk_t, template <class> class FreeList_t> class BinaryTreeDictionary;
-template <class Chunk_t, template <class> class FreeList_t> class AscendTreeCensusClosure;
-template <class Chunk_t, template <class> class FreeList_t> class DescendTreeCensusClosure;
-template <class Chunk_t, template <class> class FreeList_t> class DescendTreeSearchClosure;
+template <class Chunk_t, class FreeList_t> class TreeChunk;
+template <class Chunk_t, class FreeList_t> class BinaryTreeDictionary;
+template <class Chunk_t, class FreeList_t> class AscendTreeCensusClosure;
+template <class Chunk_t, class FreeList_t> class DescendTreeCensusClosure;
+template <class Chunk_t, class FreeList_t> class DescendTreeSearchClosure;
 
 class FreeChunk;
 template <class> class AdaptiveFreeList;
-typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList> AFLBinaryTreeDictionary;
+typedef BinaryTreeDictionary<FreeChunk, AdaptiveFreeList<FreeChunk> > AFLBinaryTreeDictionary;
 
-template <class Chunk_t, template <class> class FreeList_t>
-class TreeList : public FreeList_t<Chunk_t> {
+template <class Chunk_t, class FreeList_t>
+class TreeList : public FreeList_t {
   friend class TreeChunk<Chunk_t, FreeList_t>;
   friend class BinaryTreeDictionary<Chunk_t, FreeList_t>;
   friend class AscendTreeCensusClosure<Chunk_t, FreeList_t>;
@@ -66,12 +66,12 @@
   TreeList<Chunk_t, FreeList_t>* right()  const { return _right;  }
 
   // Wrapper on call to base class, to get the template to compile.
-  Chunk_t* head() const { return FreeList_t<Chunk_t>::head(); }
-  Chunk_t* tail() const { return FreeList_t<Chunk_t>::tail(); }
-  void set_head(Chunk_t* head) { FreeList_t<Chunk_t>::set_head(head); }
-  void set_tail(Chunk_t* tail) { FreeList_t<Chunk_t>::set_tail(tail); }
+  Chunk_t* head() const { return FreeList_t::head(); }
+  Chunk_t* tail() const { return FreeList_t::tail(); }
+  void set_head(Chunk_t* head) { FreeList_t::set_head(head); }
+  void set_tail(Chunk_t* tail) { FreeList_t::set_tail(tail); }
 
-  size_t size() const { return FreeList_t<Chunk_t>::size(); }
+  size_t size() const { return FreeList_t::size(); }
 
   // Accessors for links in tree.
 
@@ -90,7 +90,7 @@
   void clear_left()               { _left = NULL;   }
   void clear_right()              { _right = NULL;  }
   void clear_parent()             { _parent = NULL; }
-  void initialize()               { clear_left(); clear_right(), clear_parent(); FreeList_t<Chunk_t>::initialize(); }
+  void initialize()               { clear_left(); clear_right(), clear_parent(); FreeList_t::initialize(); }
 
   // For constructing a TreeList from a Tree chunk or
   // address and size.
@@ -139,7 +139,7 @@
 // on the free list for a node in the tree and is only removed if
 // it is the last chunk on the free list.
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class TreeChunk : public Chunk_t {
   friend class TreeList<Chunk_t, FreeList_t>;
   TreeList<Chunk_t, FreeList_t>* _list;
@@ -173,7 +173,7 @@
 };
 
 
-template <class Chunk_t, template <class> class FreeList_t>
+template <class Chunk_t, class FreeList_t>
 class BinaryTreeDictionary: public FreeBlockDictionary<Chunk_t> {
   friend class VMStructs;
   size_t     _total_size;
--- a/src/share/vm/memory/defNewGeneration.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/memory/defNewGeneration.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -626,7 +626,6 @@
                                 true,  // activate StrongRootsScope
                                 SharedHeap::ScanningOption(so),
                                 &fsc_with_no_gc_barrier,
-                                true,   // walk *all* scavengable nmethods
                                 &fsc_with_gc_barrier,
                                 &klass_scan_closure);
 
--- a/src/share/vm/memory/genCollectedHeap.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -594,20 +594,12 @@
                          bool activate_scope,
                          SharedHeap::ScanningOption so,
                          OopsInGenClosure* not_older_gens,
-                         bool do_code_roots,
                          OopsInGenClosure* older_gens,
                          KlassClosure* klass_closure) {
   // General strong roots.
 
-  if (!do_code_roots) {
-    SharedHeap::process_strong_roots(activate_scope, so,
-                                     not_older_gens, NULL, klass_closure);
-  } else {
-    bool do_code_marking = (activate_scope || nmethod::oops_do_marking_is_active());
-    CodeBlobToOopClosure code_roots(not_older_gens, /*do_marking=*/ do_code_marking);
-    SharedHeap::process_strong_roots(activate_scope, so,
-                                     not_older_gens, &code_roots, klass_closure);
-  }
+  SharedHeap::process_strong_roots(activate_scope, so,
+                                   not_older_gens, klass_closure);
 
   if (younger_gens_as_roots) {
     if (!_gen_process_strong_tasks->is_task_claimed(GCH_PS_younger_gens)) {
@@ -629,9 +621,8 @@
   _gen_process_strong_tasks->all_tasks_completed();
 }
 
-void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure,
-                                              CodeBlobClosure* code_roots) {
-  SharedHeap::process_weak_roots(root_closure, code_roots);
+void GenCollectedHeap::gen_process_weak_roots(OopClosure* root_closure) {
+  SharedHeap::process_weak_roots(root_closure);
   // "Local" "weak" refs
   for (int i = 0; i < _n_gens; i++) {
     _gens[i]->ref_processor()->weak_oops_do(root_closure);
--- a/src/share/vm/memory/genCollectedHeap.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/memory/genCollectedHeap.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -414,15 +414,13 @@
                                 bool activate_scope,
                                 SharedHeap::ScanningOption so,
                                 OopsInGenClosure* not_older_gens,
-                                bool do_code_roots,
                                 OopsInGenClosure* older_gens,
                                 KlassClosure* klass_closure);
 
-  // Apply "blk" to all the weak roots of the system.  These include
-  // JNI weak roots, the code cache, system dictionary, symbol table,
-  // string table, and referents of reachable weak refs.
-  void gen_process_weak_roots(OopClosure* root_closure,
-                              CodeBlobClosure* code_roots);
+  // Apply "root_closure" to all the weak roots of the system.
+  // These include JNI weak roots, string table,
+  // and referents of reachable weak refs.
+  void gen_process_weak_roots(OopClosure* root_closure);
 
   // Set the saved marks of generations, if that makes sense.
   // In particular, if any generation might iterate over the oops
--- a/src/share/vm/memory/genMarkSweep.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/memory/genMarkSweep.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -212,7 +212,6 @@
                                 true,  // activate StrongRootsScope
                                 SharedHeap::SO_SystemClasses,
                                 &follow_root_closure,
-                                true,   // walk code active on stacks
                                 &follow_root_closure,
                                 &follow_klass_closure);
 
@@ -295,18 +294,12 @@
   gch->gen_process_strong_roots(level,
                                 false, // Younger gens are not roots.
                                 true,  // activate StrongRootsScope
-                                SharedHeap::SO_AllClasses,
+                                SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_AllCodeCache,
                                 &adjust_pointer_closure,
-                                false, // do not walk code
                                 &adjust_pointer_closure,
                                 &adjust_klass_closure);
 
-  // Now adjust pointers in remaining weak roots.  (All of which should
-  // have been cleared if they pointed to non-surviving objects.)
-  CodeBlobToOopClosure adjust_code_pointer_closure(&adjust_pointer_closure,
-                                                   /*do_marking=*/ false);
-  gch->gen_process_weak_roots(&adjust_pointer_closure,
-                              &adjust_code_pointer_closure);
+  gch->gen_process_weak_roots(&adjust_pointer_closure);
 
   adjust_marks();
   GenAdjustPointersClosure blk;
--- a/src/share/vm/memory/metaspace.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/memory/metaspace.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -46,8 +46,8 @@
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
 
-typedef BinaryTreeDictionary<Metablock, FreeList> BlockTreeDictionary;
-typedef BinaryTreeDictionary<Metachunk, FreeList> ChunkTreeDictionary;
+typedef BinaryTreeDictionary<Metablock, FreeList<Metablock> > BlockTreeDictionary;
+typedef BinaryTreeDictionary<Metachunk, FreeList<Metachunk> > ChunkTreeDictionary;
 
 // Set this constant to enable slow integrity checking of the free chunk lists
 const bool metaspace_slow_verify = false;
@@ -790,7 +790,7 @@
     return NULL;
   }
 
-  if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
+  if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
     // Dark matter.  Too small for dictionary.
     return NULL;
   }
@@ -810,7 +810,7 @@
   MetaWord* new_block = (MetaWord*)free_block;
   assert(block_size >= word_size, "Incorrect size of block from freelist");
   const size_t unused = block_size - word_size;
-  if (unused >= TreeChunk<Metablock, FreeList>::min_size()) {
+  if (unused >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
     return_block(new_block + word_size, unused);
   }
 
@@ -2240,7 +2240,7 @@
 void SpaceManager::deallocate(MetaWord* p, size_t word_size) {
   assert_lock_strong(_lock);
   size_t raw_word_size = get_raw_word_size(word_size);
-  size_t min_size = TreeChunk<Metablock, FreeList>::min_size();
+  size_t min_size = TreeChunk<Metablock, FreeList<Metablock> >::min_size();
   assert(raw_word_size >= min_size,
          err_msg("Should not deallocate dark matter " SIZE_FORMAT "<" SIZE_FORMAT, word_size, min_size));
   block_freelists()->return_block(p, raw_word_size);
@@ -2296,7 +2296,7 @@
 void SpaceManager::retire_current_chunk() {
   if (current_chunk() != NULL) {
     size_t remaining_words = current_chunk()->free_word_size();
-    if (remaining_words >= TreeChunk<Metablock, FreeList>::min_size()) {
+    if (remaining_words >= TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
       block_freelists()->return_block(current_chunk()->allocate(remaining_words), remaining_words);
       inc_used_metrics(remaining_words);
     }
@@ -3279,7 +3279,7 @@
     assert(Thread::current()->is_VM_thread(), "should be the VM thread");
     // Don't take Heap_lock
     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
-    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
+    if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
       // Dark matter.  Too small for dictionary.
 #ifdef ASSERT
       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
@@ -3294,7 +3294,7 @@
   } else {
     MutexLockerEx ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
 
-    if (word_size < TreeChunk<Metablock, FreeList>::min_size()) {
+    if (word_size < TreeChunk<Metablock, FreeList<Metablock> >::min_size()) {
       // Dark matter.  Too small for dictionary.
 #ifdef ASSERT
       Copy::fill_to_words((HeapWord*)ptr, word_size, 0xf5f5f5f5);
--- a/src/share/vm/memory/sharedHeap.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/memory/sharedHeap.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -139,7 +139,6 @@
 void SharedHeap::process_strong_roots(bool activate_scope,
                                       ScanningOption so,
                                       OopClosure* roots,
-                                      CodeBlobClosure* code_roots,
                                       KlassClosure* klass_closure) {
   StrongRootsScope srs(this, activate_scope);
 
@@ -156,15 +155,17 @@
   if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
     JNIHandles::oops_do(roots);
 
+  CodeBlobToOopClosure code_roots(roots, true);
+
   CLDToOopClosure roots_from_clds(roots);
   // If we limit class scanning to SO_SystemClasses we need to apply a CLD closure to
   // CLDs which are strongly reachable from the thread stacks.
   CLDToOopClosure* roots_from_clds_p = ((so & SO_SystemClasses) ? &roots_from_clds : NULL);
   // All threads execute this; the individual threads are task groups.
   if (CollectedHeap::use_parallel_gc_threads()) {
-    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
+    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, &code_roots);
   } else {
-    Threads::oops_do(roots, roots_from_clds_p, code_roots);
+    Threads::oops_do(roots, roots_from_clds_p, &code_roots);
   }
 
   if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
@@ -206,17 +207,17 @@
 
   if (!_process_strong_tasks->is_task_claimed(SH_PS_CodeCache_oops_do)) {
     if (so & SO_ScavengeCodeCache) {
-      assert(code_roots != NULL, "must supply closure for code cache");
+      assert(&code_roots != NULL, "must supply closure for code cache");
 
       // We only visit parts of the CodeCache when scavenging.
-      CodeCache::scavenge_root_nmethods_do(code_roots);
+      CodeCache::scavenge_root_nmethods_do(&code_roots);
     }
     if (so & SO_AllCodeCache) {
-      assert(code_roots != NULL, "must supply closure for code cache");
+      assert(&code_roots != NULL, "must supply closure for code cache");
 
       // CMSCollector uses this to do intermediate-strength collections.
       // We scan the entire code cache, since CodeCache::do_unloading is not called.
-      CodeCache::blobs_do(code_roots);
+      CodeCache::blobs_do(&code_roots);
     }
     // Verify that the code cache contents are not subject to
     // movement by a scavenging collection.
@@ -233,13 +234,9 @@
 };
 static AlwaysTrueClosure always_true;
 
-void SharedHeap::process_weak_roots(OopClosure* root_closure,
-                                    CodeBlobClosure* code_roots) {
+void SharedHeap::process_weak_roots(OopClosure* root_closure) {
   // Global (weak) JNI handles
   JNIHandles::weak_oops_do(&always_true, root_closure);
-
-  CodeCache::blobs_do(code_roots);
-  StringTable::oops_do(root_closure);
 }
 
 void SharedHeap::set_barrier_set(BarrierSet* bs) {
--- a/src/share/vm/memory/sharedHeap.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/memory/sharedHeap.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -238,14 +238,10 @@
   void process_strong_roots(bool activate_scope,
                             ScanningOption so,
                             OopClosure* roots,
-                            CodeBlobClosure* code_roots,
                             KlassClosure* klass_closure);
 
-  // Apply "blk" to all the weak roots of the system.  These include
-  // JNI weak roots, the code cache, system dictionary, symbol table,
-  // string table.
-  void process_weak_roots(OopClosure* root_closure,
-                          CodeBlobClosure* code_roots);
+  // Apply "root_closure" to the JNI weak roots..
+  void process_weak_roots(OopClosure* root_closure);
 
   // The functions below are helper functions that a subclass of
   // "SharedHeap" can use in the implementation of its virtual
@@ -275,4 +271,8 @@
                              size_t capacity);
 };
 
+inline SharedHeap::ScanningOption operator|(SharedHeap::ScanningOption so0, SharedHeap::ScanningOption so1) {
+  return static_cast<SharedHeap::ScanningOption>(static_cast<int>(so0) | static_cast<int>(so1));
+}
+
 #endif // SHARE_VM_MEMORY_SHAREDHEAP_HPP
--- a/src/share/vm/oops/arrayKlass.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/oops/arrayKlass.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -214,8 +214,8 @@
 
 // Verification
 
-void ArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
-  Klass::verify_on(st, check_dictionary);
+void ArrayKlass::verify_on(outputStream* st) {
+  Klass::verify_on(st);
 
   if (component_mirror() != NULL) {
     guarantee(component_mirror()->klass() != NULL, "should have a class");
--- a/src/share/vm/oops/arrayKlass.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/oops/arrayKlass.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -146,7 +146,7 @@
   void oop_print_on(oop obj, outputStream* st);
 
   // Verification
-  void verify_on(outputStream* st, bool check_dictionary);
+  void verify_on(outputStream* st);
 
   void oop_verify_on(oop obj, outputStream* st);
 };
--- a/src/share/vm/oops/instanceKlass.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/oops/instanceKlass.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -77,51 +77,6 @@
 
 #ifdef DTRACE_ENABLED
 
-#ifndef USDT2
-
-HS_DTRACE_PROBE_DECL4(hotspot, class__initialization__required,
-  char*, intptr_t, oop, intptr_t);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__recursive,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__concurrent,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__erroneous,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__super__failed,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__clinit,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__error,
-  char*, intptr_t, oop, intptr_t, int);
-HS_DTRACE_PROBE_DECL5(hotspot, class__initialization__end,
-  char*, intptr_t, oop, intptr_t, int);
-
-#define DTRACE_CLASSINIT_PROBE(type, clss, thread_type)          \
-  {                                                              \
-    char* data = NULL;                                           \
-    int len = 0;                                                 \
-    Symbol* name = (clss)->name();                               \
-    if (name != NULL) {                                          \
-      data = (char*)name->bytes();                               \
-      len = name->utf8_length();                                 \
-    }                                                            \
-    HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
-      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type);           \
-  }
-
-#define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
-  {                                                              \
-    char* data = NULL;                                           \
-    int len = 0;                                                 \
-    Symbol* name = (clss)->name();                               \
-    if (name != NULL) {                                          \
-      data = (char*)name->bytes();                               \
-      len = name->utf8_length();                                 \
-    }                                                            \
-    HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
-      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type, wait);     \
-  }
-#else /* USDT2 */
 
 #define HOTSPOT_CLASS_INITIALIZATION_required HOTSPOT_CLASS_INITIALIZATION_REQUIRED
 #define HOTSPOT_CLASS_INITIALIZATION_recursive HOTSPOT_CLASS_INITIALIZATION_RECURSIVE
@@ -156,7 +111,6 @@
     HOTSPOT_CLASS_INITIALIZATION_##type(                         \
       data, len, (clss)->class_loader(), thread_type, wait);     \
   }
-#endif /* USDT2 */
 
 #else //  ndef DTRACE_ENABLED
 
@@ -2238,15 +2192,7 @@
   for (int m = 0; m < methods()->length(); m++) {
     MethodData* mdo = methods()->at(m)->method_data();
     if (mdo != NULL) {
-      for (ProfileData* data = mdo->first_data();
-           mdo->is_valid(data);
-           data = mdo->next_data(data)) {
-        data->clean_weak_klass_links(is_alive);
-      }
-      ParametersTypeData* parameters = mdo->parameters_type_data();
-      if (parameters != NULL) {
-        parameters->clean_weak_klass_links(is_alive);
-      }
+      mdo->clean_method_data(is_alive);
     }
   }
 }
@@ -3184,7 +3130,7 @@
   virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
 };
 
-void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
+void InstanceKlass::verify_on(outputStream* st) {
 #ifndef PRODUCT
   // Avoid redundant verifies, this really should be in product.
   if (_verify_count == Universe::verify_count()) return;
@@ -3192,14 +3138,11 @@
 #endif
 
   // Verify Klass
-  Klass::verify_on(st, check_dictionary);
-
-  // Verify that klass is present in SystemDictionary if not already
-  // verifying the SystemDictionary.
-  if (is_loaded() && !is_anonymous() && check_dictionary) {
-    Symbol* h_name = name();
-    SystemDictionary::verify_obj_klass_present(h_name, class_loader_data());
-  }
+  Klass::verify_on(st);
+
+  // Verify that klass is present in ClassLoaderData
+  guarantee(class_loader_data()->contains_klass(this),
+            "this class isn't found in class loader data");
 
   // Verify vtables
   if (is_linked()) {
--- a/src/share/vm/oops/instanceKlass.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/oops/instanceKlass.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -306,7 +306,7 @@
   //   three cases:
   //     NULL: no implementor.
   //     A Klass* that's not itself: one implementor.
-  //     Itsef: more than one implementors.
+  //     Itself: more than one implementors.
   // embedded host klass follows here
   //   The embedded host klass only exists in an anonymous class for
   //   dynamic language support (JSR 292 enabled). The host class grants
@@ -1087,7 +1087,7 @@
   const char* internal_name() const;
 
   // Verification
-  void verify_on(outputStream* st, bool check_dictionary);
+  void verify_on(outputStream* st);
 
   void oop_verify_on(oop obj, outputStream* st);
 };
--- a/src/share/vm/oops/klass.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/oops/klass.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -638,7 +638,7 @@
 
 // Verification
 
-void Klass::verify_on(outputStream* st, bool check_dictionary) {
+void Klass::verify_on(outputStream* st) {
 
   // This can be expensive, but it is worth checking that this klass is actually
   // in the CLD graph but not in production.
--- a/src/share/vm/oops/klass.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/oops/klass.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -695,8 +695,8 @@
   virtual const char* internal_name() const = 0;
 
   // Verification
-  virtual void verify_on(outputStream* st, bool check_dictionary);
-  void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
+  virtual void verify_on(outputStream* st);
+  void verify() { verify_on(tty); }
 
 #ifndef PRODUCT
   bool verify_vtable_index(int index);
--- a/src/share/vm/oops/methodData.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/oops/methodData.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -80,8 +80,42 @@
   _data = NULL;
 }
 
+char* ProfileData::print_data_on_helper(const MethodData* md) const {
+  DataLayout* dp  = md->extra_data_base();
+  DataLayout* end = md->extra_data_limit();
+  stringStream ss;
+  for (;; dp = MethodData::next_extra(dp)) {
+    assert(dp < end, "moved past end of extra data");
+    switch(dp->tag()) {
+    case DataLayout::speculative_trap_data_tag:
+      if (dp->bci() == bci()) {
+        SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+        int trap = data->trap_state();
+        char buf[100];
+        ss.print("trap/");
+        data->method()->print_short_name(&ss);
+        ss.print("(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
+      }
+      break;
+    case DataLayout::bit_data_tag:
+      break;
+    case DataLayout::no_tag:
+    case DataLayout::arg_info_data_tag:
+      return ss.as_string();
+      break;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
+    }
+  }
+  return NULL;
+}
+
+void ProfileData::print_data_on(outputStream* st, const MethodData* md) const {
+  print_data_on(st, print_data_on_helper(md));
+}
+
 #ifndef PRODUCT
-void ProfileData::print_shared(outputStream* st, const char* name) const {
+void ProfileData::print_shared(outputStream* st, const char* name, const char* extra) const {
   st->print("bci: %d", bci());
   st->fill_to(tab_width_one);
   st->print("%s", name);
@@ -91,9 +125,13 @@
     char buf[100];
     st->print("trap(%s) ", Deoptimization::format_trap_state(buf, sizeof(buf), trap));
   }
+  if (extra != NULL) {
+    st->print(extra);
+  }
   int flags = data()->flags();
-  if (flags != 0)
+  if (flags != 0) {
     st->print("flags(%d) ", flags);
+  }
 }
 
 void ProfileData::tab(outputStream* st, bool first) const {
@@ -109,8 +147,8 @@
 
 
 #ifndef PRODUCT
-void BitData::print_data_on(outputStream* st) const {
-  print_shared(st, "BitData");
+void BitData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "BitData", extra);
 }
 #endif // !PRODUCT
 
@@ -120,8 +158,8 @@
 // A CounterData corresponds to a simple counter.
 
 #ifndef PRODUCT
-void CounterData::print_data_on(outputStream* st) const {
-  print_shared(st, "CounterData");
+void CounterData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "CounterData", extra);
   st->print_cr("count(%u)", count());
 }
 #endif // !PRODUCT
@@ -150,8 +188,8 @@
 }
 
 #ifndef PRODUCT
-void JumpData::print_data_on(outputStream* st) const {
-  print_shared(st, "JumpData");
+void JumpData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "JumpData", extra);
   st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
 }
 #endif // !PRODUCT
@@ -332,8 +370,8 @@
   st->cr();
 }
 
-void CallTypeData::print_data_on(outputStream* st) const {
-  CounterData::print_data_on(st);
+void CallTypeData::print_data_on(outputStream* st, const char* extra) const {
+  CounterData::print_data_on(st, extra);
   if (has_arguments()) {
     tab(st, true);
     st->print("argument types");
@@ -346,8 +384,8 @@
   }
 }
 
-void VirtualCallTypeData::print_data_on(outputStream* st) const {
-  VirtualCallData::print_data_on(st);
+void VirtualCallTypeData::print_data_on(outputStream* st, const char* extra) const {
+  VirtualCallData::print_data_on(st, extra);
   if (has_arguments()) {
     tab(st, true);
     st->print("argument types");
@@ -400,12 +438,12 @@
     }
   }
 }
-void ReceiverTypeData::print_data_on(outputStream* st) const {
-  print_shared(st, "ReceiverTypeData");
+void ReceiverTypeData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ReceiverTypeData", extra);
   print_receiver_data_on(st);
 }
-void VirtualCallData::print_data_on(outputStream* st) const {
-  print_shared(st, "VirtualCallData");
+void VirtualCallData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "VirtualCallData", extra);
   print_receiver_data_on(st);
 }
 #endif // !PRODUCT
@@ -461,8 +499,8 @@
 #endif // CC_INTERP
 
 #ifndef PRODUCT
-void RetData::print_data_on(outputStream* st) const {
-  print_shared(st, "RetData");
+void RetData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "RetData", extra);
   uint row;
   int entries = 0;
   for (row = 0; row < row_limit(); row++) {
@@ -496,8 +534,8 @@
 }
 
 #ifndef PRODUCT
-void BranchData::print_data_on(outputStream* st) const {
-  print_shared(st, "BranchData");
+void BranchData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "BranchData", extra);
   st->print_cr("taken(%u) displacement(%d)",
                taken(), displacement());
   tab(st);
@@ -570,8 +608,8 @@
 }
 
 #ifndef PRODUCT
-void MultiBranchData::print_data_on(outputStream* st) const {
-  print_shared(st, "MultiBranchData");
+void MultiBranchData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "MultiBranchData", extra);
   st->print_cr("default_count(%u) displacement(%d)",
                default_count(), default_displacement());
   int cases = number_of_cases();
@@ -584,8 +622,8 @@
 #endif
 
 #ifndef PRODUCT
-void ArgInfoData::print_data_on(outputStream* st) const {
-  print_shared(st, "ArgInfoData");
+void ArgInfoData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "ArgInfoData", extra);
   int nargs = number_of_args();
   for (int i = 0; i < nargs; i++) {
     st->print("  0x%x", arg_modified(i));
@@ -616,10 +654,17 @@
 }
 
 #ifndef PRODUCT
-void ParametersTypeData::print_data_on(outputStream* st) const {
-  st->print("parameter types");
+void ParametersTypeData::print_data_on(outputStream* st, const char* extra) const {
+  st->print("parameter types", extra);
   _parameters.print_data_on(st);
 }
+
+void SpeculativeTrapData::print_data_on(outputStream* st, const char* extra) const {
+  print_shared(st, "SpeculativeTrapData", extra);
+  tab(st);
+  method()->print_short_name(st);
+  st->cr();
+}
 #endif
 
 // ==================================================================
@@ -745,7 +790,27 @@
   return DataLayout::compute_size_in_bytes(cell_count);
 }
 
-int MethodData::compute_extra_data_count(int data_size, int empty_bc_count) {
+bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
+  // Bytecodes for which we may use speculation
+  switch (code) {
+  case Bytecodes::_checkcast:
+  case Bytecodes::_instanceof:
+  case Bytecodes::_aastore:
+  case Bytecodes::_invokevirtual:
+  case Bytecodes::_invokeinterface:
+  case Bytecodes::_if_acmpeq:
+  case Bytecodes::_if_acmpne:
+  case Bytecodes::_invokestatic:
+#ifdef COMPILER2
+    return UseTypeSpeculation;
+#endif
+  default:
+    return false;
+  }
+  return false;
+}
+
+int MethodData::compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps) {
   if (ProfileTraps) {
     // Assume that up to 3% of BCIs with no MDP will need to allocate one.
     int extra_data_count = (uint)(empty_bc_count * 3) / 128 + 1;
@@ -756,7 +821,18 @@
       extra_data_count = one_percent_of_data;
     if (extra_data_count > empty_bc_count)
       extra_data_count = empty_bc_count;  // no need for more
-    return extra_data_count;
+
+    // Make sure we have a minimum number of extra data slots to
+    // allocate SpeculativeTrapData entries. We would want to have one
+    // entry per compilation that inlines this method and for which
+    // some type speculation assumption fails. So the room we need for
+    // the SpeculativeTrapData entries doesn't directly depend on the
+    // size of the method. Because it's hard to estimate, we reserve
+    // space for an arbitrary number of entries.
+    int spec_data_count = (needs_speculative_traps ? SpecTrapLimitExtraEntries : 0) *
+      (SpeculativeTrapData::static_cell_count() + DataLayout::header_size_in_cells());
+
+    return MAX2(extra_data_count, spec_data_count);
   } else {
     return 0;
   }
@@ -769,15 +845,17 @@
   BytecodeStream stream(method);
   Bytecodes::Code c;
   int empty_bc_count = 0;  // number of bytecodes lacking data
+  bool needs_speculative_traps = false;
   while ((c = stream.next()) >= 0) {
     int size_in_bytes = compute_data_size(&stream);
     data_size += size_in_bytes;
     if (size_in_bytes == 0)  empty_bc_count += 1;
+    needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
   }
   int object_size = in_bytes(data_offset()) + data_size;
 
   // Add some extra DataLayout cells (at least one) to track stray traps.
-  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count);
+  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
   object_size += extra_data_count * DataLayout::compute_size_in_bytes(0);
 
   // Add a cell to record information about modified arguments.
@@ -1009,18 +1087,23 @@
   _data[0] = 0;  // apparently not set below.
   BytecodeStream stream(method);
   Bytecodes::Code c;
+  bool needs_speculative_traps = false;
   while ((c = stream.next()) >= 0) {
     int size_in_bytes = initialize_data(&stream, data_size);
     data_size += size_in_bytes;
     if (size_in_bytes == 0)  empty_bc_count += 1;
+    needs_speculative_traps = needs_speculative_traps || is_speculative_trap_bytecode(c);
   }
   _data_size = data_size;
   int object_size = in_bytes(data_offset()) + data_size;
 
   // Add some extra DataLayout cells (at least one) to track stray traps.
-  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count);
+  int extra_data_count = compute_extra_data_count(data_size, empty_bc_count, needs_speculative_traps);
   int extra_size = extra_data_count * DataLayout::compute_size_in_bytes(0);
 
+  // Let's zero the space for the extra data
+  Copy::zero_to_bytes(((address)_data) + data_size, extra_size);
+
   // Add a cell to record information about modified arguments.
   // Set up _args_modified array after traps cells so that
   // the code for traps cells works.
@@ -1032,17 +1115,17 @@
   int arg_data_size = DataLayout::compute_size_in_bytes(arg_size+1);
   object_size += extra_size + arg_data_size;
 
-  int args_cell = ParametersTypeData::compute_cell_count(method());
+  int parms_cell = ParametersTypeData::compute_cell_count(method());
   // If we are profiling parameters, we reserver an area near the end
   // of the MDO after the slots for bytecodes (because there's no bci
   // for method entry so they don't fit with the framework for the
   // profiling of bytecodes). We store the offset within the MDO of
   // this area (or -1 if no parameter is profiled)
-  if (args_cell > 0) {
-    object_size += DataLayout::compute_size_in_bytes(args_cell);
+  if (parms_cell > 0) {
+    object_size += DataLayout::compute_size_in_bytes(parms_cell);
     _parameters_type_data_di = data_size + extra_size + arg_data_size;
     DataLayout *dp = data_layout_at(data_size + extra_size + arg_data_size);
-    dp->initialize(DataLayout::parameters_type_data_tag, 0, args_cell);
+    dp->initialize(DataLayout::parameters_type_data_tag, 0, parms_cell);
   } else {
     _parameters_type_data_di = -1;
   }
@@ -1133,39 +1216,113 @@
       break;
     }
   }
-  return bci_to_extra_data(bci, false);
+  return bci_to_extra_data(bci, NULL, false);
 }
 
-// Translate a bci to its corresponding extra data, or NULL.
-ProfileData* MethodData::bci_to_extra_data(int bci, bool create_if_missing) {
-  DataLayout* dp    = extra_data_base();
-  DataLayout* end   = extra_data_limit();
-  DataLayout* avail = NULL;
-  for (; dp < end; dp = next_extra(dp)) {
+DataLayout* MethodData::next_extra(DataLayout* dp) {
+  int nb_cells = 0;
+  switch(dp->tag()) {
+  case DataLayout::bit_data_tag:
+  case DataLayout::no_tag:
+    nb_cells = BitData::static_cell_count();
+    break;
+  case DataLayout::speculative_trap_data_tag:
+    nb_cells = SpeculativeTrapData::static_cell_count();
+    break;
+  default:
+    fatal(err_msg("unexpected tag %d", dp->tag()));
+  }
+  return (DataLayout*)((address)dp + DataLayout::compute_size_in_bytes(nb_cells));
+}
+
+ProfileData* MethodData::bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp) {
+  DataLayout* end = extra_data_limit();
+
+  for (;; dp = next_extra(dp)) {
+    assert(dp < end, "moved past end of extra data");
     // No need for "OrderAccess::load_acquire" ops,
     // since the data structure is monotonic.
-    if (dp->tag() == DataLayout::no_tag)  break;
-    if (dp->tag() == DataLayout::arg_info_data_tag) {
-      dp = end; // ArgInfoData is at the end of extra data section.
+    switch(dp->tag()) {
+    case DataLayout::no_tag:
+      return NULL;
+    case DataLayout::arg_info_data_tag:
+      dp = end;
+      return NULL; // ArgInfoData is at the end of extra data section.
+    case DataLayout::bit_data_tag:
+      if (m == NULL && dp->bci() == bci) {
+        return new BitData(dp);
+      }
       break;
-    }
-    if (dp->bci() == bci) {
-      assert(dp->tag() == DataLayout::bit_data_tag, "sane");
-      return new BitData(dp);
+    case DataLayout::speculative_trap_data_tag:
+      if (m != NULL) {
+        SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+        // data->method() may be null in case of a concurrent
+        // allocation. Assume it's for the same method and use that
+        // entry in that case.
+        if (dp->bci() == bci) {
+          if (data->method() == NULL) {
+            return NULL;
+          } else if (data->method() == m) {
+            return data;
+          }
+        }
+      }
+      break;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
     }
   }
-  if (create_if_missing && dp < end) {
-    // Allocate this one.  There is no mutual exclusion,
-    // so two threads could allocate different BCIs to the
-    // same data layout.  This means these extra data
-    // records, like most other MDO contents, must not be
-    // trusted too much.
-    DataLayout temp;
-    temp.initialize(DataLayout::bit_data_tag, bci, 0);
-    dp->release_set_header(temp.header());
-    assert(dp->tag() == DataLayout::bit_data_tag, "sane");
-    //NO: assert(dp->bci() == bci, "no concurrent allocation");
-    return new BitData(dp);
+  return NULL;
+}
+
+
+// Translate a bci to its corresponding extra data, or NULL.
+ProfileData* MethodData::bci_to_extra_data(int bci, Method* m, bool create_if_missing) {
+  // This code assumes an entry for a SpeculativeTrapData is 2 cells
+  assert(2*DataLayout::compute_size_in_bytes(BitData::static_cell_count()) ==
+         DataLayout::compute_size_in_bytes(SpeculativeTrapData::static_cell_count()),
+         "code needs to be adjusted");
+
+  DataLayout* dp  = extra_data_base();
+  DataLayout* end = extra_data_limit();
+
+  // Allocation in the extra data space has to be atomic because not
+  // all entries have the same size and non atomic concurrent
+  // allocation would result in a corrupted extra data space.
+  while (true) {
+    ProfileData* result = bci_to_extra_data_helper(bci, m, dp);
+    if (result != NULL) {
+      return result;
+    }
+
+    if (create_if_missing && dp < end) {
+      assert(dp->tag() == DataLayout::no_tag || (dp->tag() == DataLayout::speculative_trap_data_tag && m != NULL), "should be free");
+      assert(next_extra(dp)->tag() == DataLayout::no_tag || next_extra(dp)->tag() == DataLayout::arg_info_data_tag, "should be free or arg info");
+      u1 tag = m == NULL ? DataLayout::bit_data_tag : DataLayout::speculative_trap_data_tag;
+      // SpeculativeTrapData is 2 slots. Make sure we have room.
+      if (m != NULL && next_extra(dp)->tag() != DataLayout::no_tag) {
+        return NULL;
+      }
+      DataLayout temp;
+      temp.initialize(tag, bci, 0);
+      // May have been set concurrently
+      if (dp->header() != temp.header() && !dp->atomic_set_header(temp.header())) {
+        // Allocation failure because of concurrent allocation. Try
+        // again.
+        continue;
+      }
+      assert(dp->tag() == tag, "sane");
+      assert(dp->bci() == bci, "no concurrent allocation");
+      if (tag == DataLayout::bit_data_tag) {
+        return new BitData(dp);
+      } else {
+        // If being allocated concurrently, one trap may be lost
+        SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+        data->set_method(m);
+        return data;
+      }
+    }
+    return NULL;
   }
   return NULL;
 }
@@ -1210,25 +1367,35 @@
   for ( ; is_valid(data); data = next_data(data)) {
     st->print("%d", dp_to_di(data->dp()));
     st->fill_to(6);
-    data->print_data_on(st);
+    data->print_data_on(st, this);
   }
   st->print_cr("--- Extra data:");
   DataLayout* dp    = extra_data_base();
   DataLayout* end   = extra_data_limit();
-  for (; dp < end; dp = next_extra(dp)) {
+  for (;; dp = next_extra(dp)) {
+    assert(dp < end, "moved past end of extra data");
     // No need for "OrderAccess::load_acquire" ops,
     // since the data structure is monotonic.
-    if (dp->tag() == DataLayout::no_tag)  continue;
-    if (dp->tag() == DataLayout::bit_data_tag) {
+    switch(dp->tag()) {
+    case DataLayout::no_tag:
+      continue;
+    case DataLayout::bit_data_tag:
       data = new BitData(dp);
-    } else {
-      assert(dp->tag() == DataLayout::arg_info_data_tag, "must be BitData or ArgInfo");
+      break;
+    case DataLayout::speculative_trap_data_tag:
+      data = new SpeculativeTrapData(dp);
+      break;
+    case DataLayout::arg_info_data_tag:
       data = new ArgInfoData(dp);
       dp = end; // ArgInfoData is at the end of extra data section.
+      break;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
     }
     st->print("%d", dp_to_di(data->dp()));
     st->fill_to(6);
     data->print_data_on(st);
+    if (dp >= end) return;
   }
 }
 #endif
@@ -1351,3 +1518,110 @@
   assert(profile_parameters_jsr292_only(), "inconsistent");
   return m->is_compiled_lambda_form();
 }
+
+void MethodData::clean_extra_data_helper(DataLayout* dp, int shift, bool reset) {
+  if (shift == 0) {
+    return;
+  }
+  if (!reset) {
+    // Move all cells of trap entry at dp left by "shift" cells
+    intptr_t* start = (intptr_t*)dp;
+    intptr_t* end = (intptr_t*)next_extra(dp);
+    for (intptr_t* ptr = start; ptr < end; ptr++) {
+      *(ptr-shift) = *ptr;
+    }
+  } else {
+    // Reset "shift" cells stopping at dp
+    intptr_t* start = ((intptr_t*)dp) - shift;
+    intptr_t* end = (intptr_t*)dp;
+    for (intptr_t* ptr = start; ptr < end; ptr++) {
+      *ptr = 0;
+    }
+  }
+}
+
+// Remove SpeculativeTrapData entries that reference an unloaded
+// method
+void MethodData::clean_extra_data(BoolObjectClosure* is_alive) {
+  DataLayout* dp  = extra_data_base();
+  DataLayout* end = extra_data_limit();
+
+  int shift = 0;
+  for (; dp < end; dp = next_extra(dp)) {
+    switch(dp->tag()) {
+    case DataLayout::speculative_trap_data_tag: {
+      SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+      Method* m = data->method();
+      assert(m != NULL, "should have a method");
+      if (!m->method_holder()->is_loader_alive(is_alive)) {
+        // "shift" accumulates the number of cells for dead
+        // SpeculativeTrapData entries that have been seen so
+        // far. Following entries must be shifted left by that many
+        // cells to remove the dead SpeculativeTrapData entries.
+        shift += (int)((intptr_t*)next_extra(dp) - (intptr_t*)dp);
+      } else {
+        // Shift this entry left if it follows dead
+        // SpeculativeTrapData entries
+        clean_extra_data_helper(dp, shift);
+      }
+      break;
+    }
+    case DataLayout::bit_data_tag:
+      // Shift this entry left if it follows dead SpeculativeTrapData
+      // entries
+      clean_extra_data_helper(dp, shift);
+      continue;
+    case DataLayout::no_tag:
+    case DataLayout::arg_info_data_tag:
+      // We are at end of the live trap entries. The previous "shift"
+      // cells contain entries that are either dead or were shifted
+      // left. They need to be reset to no_tag
+      clean_extra_data_helper(dp, shift, true);
+      return;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
+    }
+  }
+}
+
+// Verify there's no unloaded method referenced by a
+// SpeculativeTrapData entry
+void MethodData::verify_extra_data_clean(BoolObjectClosure* is_alive) {
+#ifdef ASSERT
+  DataLayout* dp  = extra_data_base();
+  DataLayout* end = extra_data_limit();
+
+  for (; dp < end; dp = next_extra(dp)) {
+    switch(dp->tag()) {
+    case DataLayout::speculative_trap_data_tag: {
+      SpeculativeTrapData* data = new SpeculativeTrapData(dp);
+      Method* m = data->method();
+      assert(m != NULL && m->method_holder()->is_loader_alive(is_alive), "Method should exist");
+      break;
+    }
+    case DataLayout::bit_data_tag:
+      continue;
+    case DataLayout::no_tag:
+    case DataLayout::arg_info_data_tag:
+      return;
+    default:
+      fatal(err_msg("unexpected tag %d", dp->tag()));
+    }
+  }
+#endif
+}
+
+void MethodData::clean_method_data(BoolObjectClosure* is_alive) {
+  for (ProfileData* data = first_data();
+       is_valid(data);
+       data = next_data(data)) {
+    data->clean_weak_klass_links(is_alive);
+  }
+  ParametersTypeData* parameters = parameters_type_data();
+  if (parameters != NULL) {
+    parameters->clean_weak_klass_links(is_alive);
+  }
+
+  clean_extra_data(is_alive);
+  verify_extra_data_clean(is_alive);
+}
--- a/src/share/vm/oops/methodData.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/oops/methodData.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -120,7 +120,8 @@
     arg_info_data_tag,
     call_type_data_tag,
     virtual_call_type_data_tag,
-    parameters_type_data_tag
+    parameters_type_data_tag,
+    speculative_trap_data_tag
   };
 
   enum {
@@ -189,8 +190,11 @@
   void set_header(intptr_t value) {
     _header._bits = value;
   }
-  void release_set_header(intptr_t value) {
-    OrderAccess::release_store_ptr(&_header._bits, value);
+  bool atomic_set_header(intptr_t value) {
+    if (Atomic::cmpxchg_ptr(value, (volatile intptr_t*)&_header._bits, 0) == 0) {
+      return true;
+    }
+    return false;
   }
   intptr_t header() {
     return _header._bits;
@@ -271,6 +275,7 @@
 class     MultiBranchData;
 class     ArgInfoData;
 class     ParametersTypeData;
+class   SpeculativeTrapData;
 
 // ProfileData
 //
@@ -291,6 +296,8 @@
   // This is a pointer to a section of profiling data.
   DataLayout* _data;
 
+  char* print_data_on_helper(const MethodData* md) const;
+
 protected:
   DataLayout* data() { return _data; }
   const DataLayout* data() const { return _data; }
@@ -440,6 +447,7 @@
   virtual bool is_CallTypeData()    const { return false; }
   virtual bool is_VirtualCallTypeData()const { return false; }
   virtual bool is_ParametersTypeData() const { return false; }
+  virtual bool is_SpeculativeTrapData()const { return false; }
 
 
   BitData* as_BitData() const {
@@ -494,6 +502,10 @@
     assert(is_ParametersTypeData(), "wrong type");
     return is_ParametersTypeData() ? (ParametersTypeData*)this : NULL;
   }
+  SpeculativeTrapData* as_SpeculativeTrapData() const {
+    assert(is_SpeculativeTrapData(), "wrong type");
+    return is_SpeculativeTrapData() ? (SpeculativeTrapData*)this : NULL;
+  }
 
 
   // Subclass specific initialization
@@ -509,12 +521,14 @@
   // translation here, and the required translators are in the ci subclasses.
   virtual void translate_from(const ProfileData* data) {}
 
-  virtual void print_data_on(outputStream* st) const {
+  virtual void print_data_on(outputStream* st, const char* extra = NULL) const {
     ShouldNotReachHere();
   }
 
+  void print_data_on(outputStream* st, const MethodData* md) const;
+
 #ifndef PRODUCT
-  void print_shared(outputStream* st, const char* name) const;
+  void print_shared(outputStream* st, const char* name, const char* extra) const;
   void tab(outputStream* st, bool first = false) const;
 #endif
 };
@@ -576,7 +590,7 @@
 #endif // CC_INTERP
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -639,7 +653,7 @@
 #endif // CC_INTERP
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -726,7 +740,7 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1137,7 +1151,7 @@
   }
 
 #ifndef PRODUCT
-  virtual void print_data_on(outputStream* st) const;
+  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1282,7 +1296,7 @@
 
 #ifndef PRODUCT
   void print_receiver_data_on(outputStream* st) const;
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1325,7 +1339,7 @@
 #endif // CC_INTERP
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1451,7 +1465,7 @@
   }
 
 #ifndef PRODUCT
-  virtual void print_data_on(outputStream* st) const;
+  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1554,7 +1568,7 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1632,7 +1646,7 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1825,7 +1839,7 @@
   void post_initialize(BytecodeStream* stream, MethodData* mdo);
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1852,7 +1866,7 @@
   }
 
 #ifndef PRODUCT
-  void print_data_on(outputStream* st) const;
+  void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 };
 
@@ -1913,7 +1927,7 @@
   }
 
 #ifndef PRODUCT
-  virtual void print_data_on(outputStream* st) const;
+  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
 #endif
 
   static ByteSize stack_slot_offset(int i) {
@@ -1925,6 +1939,54 @@
   }
 };
 
+// SpeculativeTrapData
+//
+// A SpeculativeTrapData is used to record traps due to type
+// speculation. It records the root of the compilation: that type
+// speculation is wrong in the context of one compilation (for
+// method1) doesn't mean it's wrong in the context of another one (for
+// method2). Type speculation could have more/different data in the
+// context of the compilation of method2 and it's worthwhile to try an
+// optimization that failed for compilation of method1 in the context
+// of compilation of method2.
+// Space for SpeculativeTrapData entries is allocated from the extra
+// data space in the MDO. If we run out of space, the trap data for
+// the ProfileData at that bci is updated.
+class SpeculativeTrapData : public ProfileData {
+protected:
+  enum {
+    method_offset,
+    speculative_trap_cell_count
+  };
+public:
+  SpeculativeTrapData(DataLayout* layout) : ProfileData(layout) {
+    assert(layout->tag() == DataLayout::speculative_trap_data_tag, "wrong type");
+  }
+
+  virtual bool is_SpeculativeTrapData() const { return true; }
+
+  static int static_cell_count() {
+    return speculative_trap_cell_count;
+  }
+
+  virtual int cell_count() const {
+    return static_cell_count();
+  }
+
+  // Direct accessor
+  Method* method() const {
+    return (Method*)intptr_at(method_offset);
+  }
+
+  void set_method(Method* m) {
+    set_intptr_at(method_offset, (intptr_t)m);
+  }
+
+#ifndef PRODUCT
+  virtual void print_data_on(outputStream* st, const char* extra = NULL) const;
+#endif
+};
+
 // MethodData*
 //
 // A MethodData* holds information which has been collected about
@@ -1994,7 +2056,7 @@
 
   // Whole-method sticky bits and flags
   enum {
-    _trap_hist_limit    = 17,   // decoupled from Deoptimization::Reason_LIMIT
+    _trap_hist_limit    = 18,   // decoupled from Deoptimization::Reason_LIMIT
     _trap_hist_mask     = max_jubyte,
     _extra_data_count   = 4     // extra DataLayout headers, for trap history
   }; // Public flag values
@@ -2049,6 +2111,7 @@
   // Helper for size computation
   static int compute_data_size(BytecodeStream* stream);
   static int bytecode_cell_count(Bytecodes::Code code);
+  static bool is_speculative_trap_bytecode(Bytecodes::Code code);
   enum { no_profile_data = -1, variable_cell_count = -2 };
 
   // Helper for initialization
@@ -2092,8 +2155,9 @@
   // What is the index of the first data entry?
   int first_di() const { return 0; }
 
+  ProfileData* bci_to_extra_data_helper(int bci, Method* m, DataLayout*& dp);
   // Find or create an extra ProfileData:
-  ProfileData* bci_to_extra_data(int bci, bool create_if_missing);
+  ProfileData* bci_to_extra_data(int bci, Method* m, bool create_if_missing);
 
   // return the argument info cell
   ArgInfoData *arg_info();
@@ -2116,6 +2180,10 @@
   static bool profile_parameters_jsr292_only();
   static bool profile_all_parameters();
 
+  void clean_extra_data(BoolObjectClosure* is_alive);
+  void clean_extra_data_helper(DataLayout* dp, int shift, bool reset = false);
+  void verify_extra_data_clean(BoolObjectClosure* is_alive);
+
 public:
   static int header_size() {
     return sizeof(MethodData)/wordSize;
@@ -2124,7 +2192,7 @@
   // Compute the size of a MethodData* before it is created.
   static int compute_allocation_size_in_bytes(methodHandle method);
   static int compute_allocation_size_in_words(methodHandle method);
-  static int compute_extra_data_count(int data_size, int empty_bc_count);
+  static int compute_extra_data_count(int data_size, int empty_bc_count, bool needs_speculative_traps);
 
   // Determine if a given bytecode can have profile information.
   static bool bytecode_has_profile(Bytecodes::Code code) {
@@ -2265,9 +2333,26 @@
   ProfileData* bci_to_data(int bci);
 
   // Same, but try to create an extra_data record if one is needed:
-  ProfileData* allocate_bci_to_data(int bci) {
-    ProfileData* data = bci_to_data(bci);
-    return (data != NULL) ? data : bci_to_extra_data(bci, true);
+  ProfileData* allocate_bci_to_data(int bci, Method* m) {
+    ProfileData* data = NULL;
+    // If m not NULL, try to allocate a SpeculativeTrapData entry
+    if (m == NULL) {
+      data = bci_to_data(bci);
+    }
+    if (data != NULL) {
+      return data;
+    }
+    data = bci_to_extra_data(bci, m, true);
+    if (data != NULL) {
+      return data;
+    }
+    // If SpeculativeTrapData allocation fails try to allocate a
+    // regular entry
+    data = bci_to_data(bci);
+    if (data != NULL) {
+      return data;
+    }
+    return bci_to_extra_data(bci, NULL, true);
   }
 
   // Add a handful of extra data records, for trap tracking.
@@ -2275,7 +2360,7 @@
   DataLayout* extra_data_limit() const { return (DataLayout*)((address)this + size_in_bytes()); }
   int extra_data_size() const { return (address)extra_data_limit()
                                - (address)extra_data_base(); }
-  static DataLayout* next_extra(DataLayout* dp) { return (DataLayout*)((address)dp + in_bytes(DataLayout::cell_offset(0))); }
+  static DataLayout* next_extra(DataLayout* dp);
 
   // Return (uint)-1 for overflow.
   uint trap_count(int reason) const {
@@ -2375,6 +2460,8 @@
   static bool profile_return();
   static bool profile_parameters();
   static bool profile_return_jsr292_only();
+
+  void clean_method_data(BoolObjectClosure* is_alive);
 };
 
 #endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
--- a/src/share/vm/oops/objArrayKlass.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/oops/objArrayKlass.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -674,8 +674,8 @@
 
 // Verification
 
-void ObjArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
-  ArrayKlass::verify_on(st, check_dictionary);
+void ObjArrayKlass::verify_on(outputStream* st) {
+  ArrayKlass::verify_on(st);
   guarantee(element_klass()->is_klass(), "should be klass");
   guarantee(bottom_klass()->is_klass(), "should be klass");
   Klass* bk = bottom_klass();
--- a/src/share/vm/oops/objArrayKlass.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/oops/objArrayKlass.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -151,7 +151,7 @@
   const char* internal_name() const;
 
   // Verification
-  void verify_on(outputStream* st, bool check_dictionary);
+  void verify_on(outputStream* st);
 
   void oop_verify_on(oop obj, outputStream* st);
 };
--- a/src/share/vm/opto/block.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/block.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -90,9 +90,9 @@
 class CFGElement : public ResourceObj {
   friend class VMStructs;
  public:
-  float _freq; // Execution frequency (estimate)
+  double _freq; // Execution frequency (estimate)
 
-  CFGElement() : _freq(0.0f) {}
+  CFGElement() : _freq(0.0) {}
   virtual bool is_block() { return false; }
   virtual bool is_loop()  { return false; }
   Block*   as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
@@ -202,7 +202,7 @@
   // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies.
   // It is currently also used to scale such frequencies relative to
   // FreqCountInvocations relative to the old value of 1500.
-#define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations)
+#define BLOCK_FREQUENCY(f) ((f * (double) 1500) / FreqCountInvocations)
 
   // Register Pressure (estimate) for Splitting heuristic
   uint _reg_pressure;
@@ -393,7 +393,7 @@
   CFGLoop* _root_loop;
 
   // Outmost loop frequency
-  float _outer_loop_frequency;
+  double _outer_loop_frequency;
 
   // Per node latency estimation, valid only during GCM
   GrowableArray<uint>* _node_latency;
@@ -508,7 +508,7 @@
   }
 
   // Get the outer most frequency
-  float get_outer_loop_frequency() const {
+  double get_outer_loop_frequency() const {
     return _outer_loop_frequency;
   }
 
@@ -656,13 +656,13 @@
 class BlockProbPair VALUE_OBJ_CLASS_SPEC {
 protected:
   Block* _target;      // block target
-  float  _prob;        // probability of edge to block
+  double  _prob;        // probability of edge to block
 public:
   BlockProbPair() : _target(NULL), _prob(0.0) {}
-  BlockProbPair(Block* b, float p) : _target(b), _prob(p) {}
+  BlockProbPair(Block* b, double p) : _target(b), _prob(p) {}
 
   Block* get_target() const { return _target; }
-  float get_prob() const { return _prob; }
+  double get_prob() const { return _prob; }
 };
 
 //------------------------------CFGLoop-------------------------------------------
@@ -675,8 +675,8 @@
   CFGLoop *_child;       // first child, use child's sibling to visit all immediately nested loops
   GrowableArray<CFGElement*> _members; // list of members of loop
   GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities
-  float _exit_prob;       // probability any loop exit is taken on a single loop iteration
-  void update_succ_freq(Block* b, float freq);
+  double _exit_prob;       // probability any loop exit is taken on a single loop iteration
+  void update_succ_freq(Block* b, double freq);
 
  public:
   CFGLoop(int id) :
@@ -702,9 +702,9 @@
   void compute_loop_depth(int depth);
   void compute_freq(); // compute frequency with loop assuming head freq 1.0f
   void scale_freq();   // scale frequency by loop trip count (including outer loops)
-  float outer_loop_freq() const; // frequency of outer loop
+  double outer_loop_freq() const; // frequency of outer loop
   bool in_loop_nest(Block* b);
-  float trip_count() const { return 1.0f / _exit_prob; }
+  double trip_count() const { return 1.0 / _exit_prob; }
   virtual bool is_loop()  { return true; }
   int id() { return _id; }
 
@@ -723,7 +723,7 @@
  private:
   Block * _from;        // Source basic block
   Block * _to;          // Destination basic block
-  float _freq;          // Execution frequency (estimate)
+  double _freq;          // Execution frequency (estimate)
   int   _state;
   bool  _infrequent;
   int   _from_pct;
@@ -742,13 +742,13 @@
     interior            // edge is interior to trace (could be backedge)
   };
 
-  CFGEdge(Block *from, Block *to, float freq, int from_pct, int to_pct) :
+  CFGEdge(Block *from, Block *to, double freq, int from_pct, int to_pct) :
     _from(from), _to(to), _freq(freq),
     _from_pct(from_pct), _to_pct(to_pct), _state(open) {
     _infrequent = from_infrequent() || to_infrequent();
   }
 
-  float  freq() const { return _freq; }
+  double  freq() const { return _freq; }
   Block* from() const { return _from; }
   Block* to  () const { return _to;   }
   int  infrequent() const { return _infrequent; }
--- a/src/share/vm/opto/c2_globals.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/c2_globals.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -644,7 +644,7 @@
   diagnostic(bool, OptimizeExpensiveOps, true,                              \
           "Find best control for expensive operations")                     \
                                                                             \
-  experimental(bool, UseMathExactIntrinsics, false,                         \
+  product(bool, UseMathExactIntrinsics, true,                               \
           "Enables intrinsification of various java.lang.Math functions")   \
                                                                             \
   experimental(bool, ReplaceInParentMaps, false,                            \
--- a/src/share/vm/opto/chaitin.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/chaitin.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -210,7 +210,7 @@
 {
   NOT_PRODUCT( Compile::TracePhase t3("ctorChaitin", &_t_ctorChaitin, TimeCompiler); )
 
-  _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
+  _high_frequency_lrg = MIN2(double(OPTO_LRG_HIGH_FREQ), _cfg.get_outer_loop_frequency());
 
   // Build a list of basic blocks, sorted by frequency
   _blks = NEW_RESOURCE_ARRAY(Block *, _cfg.number_of_blocks());
@@ -1799,7 +1799,7 @@
           Block *phi_block = _cfg.get_block_for_node(phi);
           if (_cfg.get_block_for_node(phi_block->pred(2)) == block) {
             const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
-            Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
+            Node *spill = new (C) MachSpillCopyNode(MachSpillCopyNode::LoopPhiInput, phi, *mask, *mask);
             insert_proj( phi_block, 1, spill, maxlrg++ );
             n->set_req(1,spill);
             must_recompute_live = true;
--- a/src/share/vm/opto/chaitin.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/chaitin.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -34,10 +34,9 @@
 #include "opto/phase.hpp"
 #include "opto/regalloc.hpp"
 #include "opto/regmask.hpp"
+#include "opto/machnode.hpp"
 
 class LoopTree;
-class MachCallNode;
-class MachSafePointNode;
 class Matcher;
 class PhaseCFG;
 class PhaseLive;
@@ -424,8 +423,8 @@
   uint _simplified;             // Linked list head of simplified LRGs
 
   // Helper functions for Split()
-  uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
-  uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
+  uint split_DEF(Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
+  uint split_USE(MachSpillCopyNode::SpillType spill_type, Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
 
   //------------------------------clone_projs------------------------------------
   // After cloning some rematerialized instruction, clone any MachProj's that
@@ -447,7 +446,7 @@
                             int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru);
   // True if lidx is used before any real register is def'd in the block
   bool prompt_use( Block *b, uint lidx );
-  Node *get_spillcopy_wide( Node *def, Node *use, uint uidx );
+  Node *get_spillcopy_wide(MachSpillCopyNode::SpillType spill_type, Node *def, Node *use, uint uidx );
   // Insert the spill at chosen location.  Skip over any intervening Proj's or
   // Phis.  Skip over a CatchNode and projs, inserting in the fall-through block
   // instead.  Update high-pressure indices.  Create a new live range.
@@ -501,8 +500,9 @@
   // Used for aggressive coalescing.
   void build_ifg_virtual( );
 
+  // used when computing the register pressure for each block in the CFG. This
+  // is done during IFG creation.
   class Pressure {
-    public:
       // keeps track of the register pressure at the current
       // instruction (used when stepping backwards in the block)
       uint _current_pressure;
@@ -518,6 +518,7 @@
 
       // number of live ranges that constitute high register pressure
       const uint _high_pressure_limit;
+    public:
 
       // lower the register pressure and look for a low to high pressure
       // transition
@@ -525,9 +526,6 @@
         _current_pressure -= lrg.reg_pressure();
         if (_current_pressure == _high_pressure_limit) {
           _high_pressure_index = location;
-          if (_current_pressure > _final_pressure) {
-            _final_pressure = _current_pressure + 1;
-          }
         }
       }
 
@@ -540,6 +538,45 @@
         }
       }
 
+      uint high_pressure_index() const {
+        return _high_pressure_index;
+      }
+
+      uint final_pressure() const {
+        return _final_pressure;
+      }
+
+      uint current_pressure() const {
+        return _current_pressure;
+      }
+
+      uint high_pressure_limit() const {
+        return _high_pressure_limit;
+      }
+
+      void lower_high_pressure_index() {
+        _high_pressure_index--;
+      }
+
+      void set_high_pressure_index_to_block_start() {
+        _high_pressure_index = 0;
+      }
+
+      void check_pressure_at_fatproj(uint fatproj_location, RegMask& fatproj_mask) {
+        // this pressure is only valid at this instruction, i.e. we don't need to lower
+        // the register pressure since the fat proj was never live before (going backwards)
+        uint new_pressure = current_pressure() + fatproj_mask.Size();
+        if (new_pressure > final_pressure()) {
+          _final_pressure = new_pressure;
+        }
+
+        // if we were at a low pressure and now and the fat proj is at high pressure, record the fat proj location
+        // as coming from a low to high (to low again)
+        if (current_pressure() <= high_pressure_limit() && new_pressure > high_pressure_limit()) {
+          _high_pressure_index = fatproj_location;
+        }
+      }
+
       Pressure(uint high_pressure_index, uint high_pressure_limit)
       : _current_pressure(0)
       , _high_pressure_index(high_pressure_index)
--- a/src/share/vm/opto/classes.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/classes.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -29,8 +29,6 @@
 macro(AbsF)
 macro(AbsI)
 macro(AddD)
-macro(AddExactI)
-macro(AddExactL)
 macro(AddF)
 macro(AddI)
 macro(AddL)
@@ -135,7 +133,6 @@
 macro(ExpD)
 macro(FastLock)
 macro(FastUnlock)
-macro(FlagsProj)
 macro(Goto)
 macro(Halt)
 macro(If)
@@ -170,9 +167,6 @@
 macro(LoopLimit)
 macro(Mach)
 macro(MachProj)
-macro(MathExact)
-macro(MathExactI)
-macro(MathExactL)
 macro(MaxI)
 macro(MemBarAcquire)
 macro(LoadFence)
@@ -194,22 +188,24 @@
 macro(MoveL2D)
 macro(MoveD2L)
 macro(MulD)
-macro(MulExactI)
-macro(MulExactL)
 macro(MulF)
 macro(MulHiL)
 macro(MulI)
 macro(MulL)
 macro(Multi)
 macro(NegD)
-macro(NegExactI)
-macro(NegExactL)
 macro(NegF)
 macro(NeverBranch)
 macro(Opaque1)
 macro(Opaque2)
 macro(OrI)
 macro(OrL)
+macro(OverflowAddI)
+macro(OverflowSubI)
+macro(OverflowMulI)
+macro(OverflowAddL)
+macro(OverflowSubL)
+macro(OverflowMulL)
 macro(PCTable)
 macro(Parm)
 macro(PartialSubtypeCheck)
@@ -253,8 +249,6 @@
 macro(StrEquals)
 macro(StrIndexOf)
 macro(SubD)
-macro(SubExactI)
-macro(SubExactL)
 macro(SubF)
 macro(SubI)
 macro(SubL)
--- a/src/share/vm/opto/coalesce.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/coalesce.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -291,7 +291,7 @@
               _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map);
             } else {
               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
-              copy = new (C) MachSpillCopyNode(m, *rm, *rm);
+              copy = new (C) MachSpillCopyNode(MachSpillCopyNode::PhiInput, m, *rm, *rm);
               // Find a good place to insert.  Kinda tricky, use a subroutine
               insert_copy_with_overlap(pred,copy,phi_name,src_name);
             }
@@ -325,7 +325,7 @@
               l += _phc.clone_projs(b, l, m, copy, _phc._lrg_map);
             } else {
               const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
-              copy = new (C) MachSpillCopyNode(m, *rm, *rm);
+              copy = new (C) MachSpillCopyNode(MachSpillCopyNode::TwoAddress, m, *rm, *rm);
               // Insert the copy in the basic block, just before us
               b->insert_node(copy, l++);
             }
@@ -372,7 +372,7 @@
                 continue;     // Live out; do not pre-split
               // Split the lrg at this use
               const RegMask *rm = C->matcher()->idealreg2spillmask[inp->ideal_reg()];
-              Node *copy = new (C) MachSpillCopyNode( inp, *rm, *rm );
+              Node* copy = new (C) MachSpillCopyNode(MachSpillCopyNode::DebugUse, inp, *rm, *rm);
               // Insert the copy in the use-def chain
               n->set_req(inpidx, copy );
               // Insert the copy in the basic block, just before us
--- a/src/share/vm/opto/compile.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/compile.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -3028,42 +3028,6 @@
       n->set_req(MemBarNode::Precedent, top());
     }
     break;
-    // Must set a control edge on all nodes that produce a FlagsProj
-    // so they can't escape the block that consumes the flags.
-    // Must also set the non throwing branch as the control
-    // for all nodes that depends on the result. Unless the node
-    // already have a control that isn't the control of the
-    // flag producer
-  case Op_FlagsProj:
-    {
-      MathExactNode* math = (MathExactNode*)  n->in(0);
-      Node* ctrl = math->control_node();
-      Node* non_throwing = math->non_throwing_branch();
-      math->set_req(0, ctrl);
-
-      Node* result = math->result_node();
-      if (result != NULL) {
-        for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
-          Node* out = result->fast_out(j);
-          // Phi nodes shouldn't be moved. They would only match below if they
-          // had the same control as the MathExactNode. The only time that
-          // would happen is if the Phi is also an input to the MathExact
-          //
-          // Cmp nodes shouldn't have control set at all.
-          if (out->is_Phi() ||
-              out->is_Cmp()) {
-            continue;
-          }
-
-          if (out->in(0) == NULL) {
-            out->set_req(0, non_throwing);
-          } else if (out->in(0) == ctrl) {
-            out->set_req(0, non_throwing);
-          }
-        }
-      }
-    }
-    break;
   default:
     assert( !n->is_Call(), "" );
     assert( !n->is_Mem(), "" );
@@ -3285,7 +3249,8 @@
     // because of a transient condition during start-up in the interpreter.
     return false;
   }
-  if (md->has_trap_at(bci, reason) != 0) {
+  ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
+  if (md->has_trap_at(bci, m, reason) != 0) {
     // Assume PerBytecodeTrapLimit==0, for a more conservative heuristic.
     // Also, if there are multiple reasons, or if there is no per-BCI record,
     // assume the worst.
@@ -3303,7 +3268,7 @@
 // Less-accurate variant which does not require a method and bci.
 bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
                              ciMethodData* logmd) {
- if (trap_count(reason) >= (uint)PerMethodTrapLimit) {
+  if (trap_count(reason) >= Deoptimization::per_method_trap_limit(reason)) {
     // Too many traps globally.
     // Note that we use cumulative trap_count, not just md->trap_count.
     if (log()) {
@@ -3338,10 +3303,11 @@
   uint m_cutoff  = (uint) PerMethodRecompilationCutoff / 2 + 1;  // not zero
   Deoptimization::DeoptReason per_bc_reason
     = Deoptimization::reason_recorded_per_bytecode_if_any(reason);
+  ciMethod* m = Deoptimization::reason_is_speculate(reason) ? this->method() : NULL;
   if ((per_bc_reason == Deoptimization::Reason_none
-       || md->has_trap_at(bci, reason) != 0)
+       || md->has_trap_at(bci, m, reason) != 0)
       // The trap frequency measure we care about is the recompile count:
-      && md->trap_recompiled_at(bci)
+      && md->trap_recompiled_at(bci, m)
       && md->overflow_recompile_count() >= bc_cutoff) {
     // Do not emit a trap here if it has already caused recompilations.
     // Also, if there are multiple reasons, or if there is no per-BCI record,
--- a/src/share/vm/opto/doCall.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/doCall.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -250,7 +250,7 @@
           CallGenerator* miss_cg;
           Deoptimization::DeoptReason reason = morphism == 2 ?
                                     Deoptimization::Reason_bimorphic :
-                                    Deoptimization::Reason_class_check;
+                                    (speculative_receiver_type == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check);
           if ((morphism == 1 || (morphism == 2 && next_hit_cg != NULL)) &&
               !too_many_traps(jvms->method(), jvms->bci(), reason)
              ) {
--- a/src/share/vm/opto/gcm.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/gcm.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1661,10 +1661,10 @@
   }
   assert (_members.length() > 0, "no empty loops");
   Block* hd = head();
-  hd->_freq = 1.0f;
+  hd->_freq = 1.0;
   for (int i = 0; i < _members.length(); i++) {
     CFGElement* s = _members.at(i);
-    float freq = s->_freq;
+    double freq = s->_freq;
     if (s->is_block()) {
       Block* b = s->as_Block();
       for (uint j = 0; j < b->_num_succs; j++) {
@@ -1676,7 +1676,7 @@
       assert(lp->_parent == this, "immediate child");
       for (int k = 0; k < lp->_exits.length(); k++) {
         Block* eb = lp->_exits.at(k).get_target();
-        float prob = lp->_exits.at(k).get_prob();
+        double prob = lp->_exits.at(k).get_prob();
         update_succ_freq(eb, freq * prob);
       }
     }
@@ -1688,7 +1688,7 @@
   // inner blocks do not get erroneously scaled.
   if (_depth != 0) {
     // Total the exit probabilities for this loop.
-    float exits_sum = 0.0f;
+    double exits_sum = 0.0f;
     for (int i = 0; i < _exits.length(); i++) {
       exits_sum += _exits.at(i).get_prob();
     }
@@ -1935,7 +1935,7 @@
 //------------------------------update_succ_freq-------------------------------
 // Update the appropriate frequency associated with block 'b', a successor of
 // a block in this loop.
-void CFGLoop::update_succ_freq(Block* b, float freq) {
+void CFGLoop::update_succ_freq(Block* b, double freq) {
   if (b->_loop == this) {
     if (b == head()) {
       // back branch within the loop
@@ -1976,11 +1976,11 @@
 // Scale frequency of loops and blocks by trip counts from outer loops
 // Do a top down traversal of loop tree (visit outer loops first.)
 void CFGLoop::scale_freq() {
-  float loop_freq = _freq * trip_count();
+  double loop_freq = _freq * trip_count();
   _freq = loop_freq;
   for (int i = 0; i < _members.length(); i++) {
     CFGElement* s = _members.at(i);
-    float block_freq = s->_freq * loop_freq;
+    double block_freq = s->_freq * loop_freq;
     if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
       block_freq = MIN_BLOCK_FREQUENCY;
     s->_freq = block_freq;
@@ -1993,7 +1993,7 @@
 }
 
 // Frequency of outer loop
-float CFGLoop::outer_loop_freq() const {
+double CFGLoop::outer_loop_freq() const {
   if (_child != NULL) {
     return _child->_freq;
   }
@@ -2042,7 +2042,7 @@
       k = 0;
     }
     Block *blk = _exits.at(i).get_target();
-    float prob = _exits.at(i).get_prob();
+    double prob = _exits.at(i).get_prob();
     tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
   }
   tty->print("\n");
--- a/src/share/vm/opto/graphKit.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/graphKit.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -612,9 +612,10 @@
   // Usual case:  Bail to interpreter.
   // Reserve the right to recompile if we haven't seen anything yet.
 
+  assert(!Deoptimization::reason_is_speculate(reason), "unsupported");
   Deoptimization::DeoptAction action = Deoptimization::Action_maybe_recompile;
   if (treat_throw_as_hot
-      && (method()->method_data()->trap_recompiled_at(bci())
+      && (method()->method_data()->trap_recompiled_at(bci(), NULL)
           || C->too_many_traps(reason))) {
     // We cannot afford to take more traps here.  Suffer in the interpreter.
     if (C->log() != NULL)
@@ -2145,7 +2146,7 @@
  *
  * @param n  receiver node
  *
- * @return           node with improved type
+ * @return   node with improved type
  */
 Node* GraphKit::record_profiled_receiver_for_speculation(Node* n) {
   if (!UseTypeSpeculation) {
@@ -2739,12 +2740,14 @@
 // Subsequent type checks will always fold up.
 Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
                                              ciKlass* require_klass,
-                                            ciKlass* spec_klass,
+                                             ciKlass* spec_klass,
                                              bool safe_for_replace) {
   if (!UseTypeProfile || !TypeProfileCasts) return NULL;
 
+  Deoptimization::DeoptReason reason = spec_klass == NULL ? Deoptimization::Reason_class_check : Deoptimization::Reason_speculate_class_check;
+
   // Make sure we haven't already deoptimized from this tactic.
-  if (too_many_traps(Deoptimization::Reason_class_check))
+  if (too_many_traps(reason))
     return NULL;
 
   // (No, this isn't a call, but it's enough like a virtual call
@@ -2766,7 +2769,7 @@
                                             &exact_obj);
       { PreserveJVMState pjvms(this);
         set_control(slow_ctl);
-        uncommon_trap(Deoptimization::Reason_class_check,
+        uncommon_trap(reason,
                       Deoptimization::Action_maybe_recompile);
       }
       if (safe_for_replace) {
@@ -2793,8 +2796,10 @@
                                         bool not_null) {
   // type == NULL if profiling tells us this object is always null
   if (type != NULL) {
-    if (!too_many_traps(Deoptimization::Reason_null_check) &&
-        !too_many_traps(Deoptimization::Reason_class_check)) {
+    Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
+    Deoptimization::DeoptReason null_reason = Deoptimization::Reason_null_check;
+    if (!too_many_traps(null_reason) &&
+        !too_many_traps(class_reason)) {
       Node* not_null_obj = NULL;
       // not_null is true if we know the object is not null and
       // there's no need for a null check
@@ -2813,7 +2818,7 @@
       {
         PreserveJVMState pjvms(this);
         set_control(slow_ctl);
-        uncommon_trap(Deoptimization::Reason_class_check,
+        uncommon_trap(class_reason,
                       Deoptimization::Action_maybe_recompile);
       }
       replace_in_map(not_null_obj, exact_obj);
@@ -2882,7 +2887,7 @@
   }
 
   if (known_statically && UseTypeSpeculation) {
-    // If we know the type check always succeed then we don't use the
+    // If we know the type check always succeeds then we don't use the
     // profiling data at this bytecode. Don't lose it, feed it to the
     // type system as a speculative type.
     not_null_obj = record_profiled_receiver_for_speculation(not_null_obj);
--- a/src/share/vm/opto/graphKit.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/graphKit.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -406,7 +406,7 @@
   // Use the type profile to narrow an object type.
   Node* maybe_cast_profiled_receiver(Node* not_null_obj,
                                      ciKlass* require_klass,
-                                    ciKlass* spec,
+                                     ciKlass* spec,
                                      bool safe_for_replace);
 
   // Cast obj to type and emit guard unless we had too many traps here already
--- a/src/share/vm/opto/ifg.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/ifg.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -439,8 +439,8 @@
       }
     }
   }
-  assert(int_pressure._current_pressure == count_int_pressure(liveout), "the int pressure is incorrect");
-  assert(float_pressure._current_pressure == count_float_pressure(liveout), "the float pressure is incorrect");
+  assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
+  assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
 }
 
 /* Go to the first non-phi index in a block */
@@ -513,8 +513,8 @@
     raise_pressure(b, lrg, int_pressure, float_pressure);
     lid = elements.next();
   }
-  assert(int_pressure._current_pressure == count_int_pressure(liveout), "the int pressure is incorrect");
-  assert(float_pressure._current_pressure == count_float_pressure(liveout), "the float pressure is incorrect");
+  assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
+  assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
 }
 
 /*
@@ -548,17 +548,7 @@
 void PhaseChaitin::check_for_high_pressure_transition_at_fatproj(uint& block_reg_pressure, uint location, LRG& lrg, Pressure& pressure, const int op_regtype) {
   RegMask mask_tmp = lrg.mask();
   mask_tmp.AND(*Matcher::idealreg2regmask[op_regtype]);
-  // this pressure is only valid at this instruction, i.e. we don't need to lower
-  // the register pressure since the fat proj was never live before (going backwards)
-  uint new_pressure = pressure._current_pressure + mask_tmp.Size();
-  if (new_pressure > pressure._final_pressure) {
-    pressure._final_pressure = new_pressure;
-  }
-  // if we were at a low pressure and now at the fat proj is at high pressure, record the fat proj location
-  // as coming from a low to high (to low again)
-  if (pressure._current_pressure <= pressure._high_pressure_limit && new_pressure > pressure._high_pressure_limit) {
-    pressure._high_pressure_index = location;
-  }
+  pressure.check_pressure_at_fatproj(location, mask_tmp);
 }
 
 /*
@@ -700,23 +690,23 @@
       // Newly live things assumed live from here to top of block
       lrg._area += cost;
       raise_pressure(b, lrg, int_pressure, float_pressure);
-      assert(int_pressure._current_pressure == count_int_pressure(liveout), "the int pressure is incorrect");
-      assert(float_pressure._current_pressure == count_float_pressure(liveout), "the float pressure is incorrect");
+      assert(int_pressure.current_pressure() == count_int_pressure(liveout), "the int pressure is incorrect");
+      assert(float_pressure.current_pressure() == count_float_pressure(liveout), "the float pressure is incorrect");
     }
-    assert(!(lrg._area < 0.0), "negative spill area" );
+    assert(lrg._area >= 0.0, "negative spill area" );
   }
 }
 
 /*
  * If we run off the top of the block with high pressure just record that the
  * whole block is high pressure. (Even though we might have a transition
- * lower down in the block)
+ * later down in the block)
  */
 void PhaseChaitin::check_for_high_pressure_block(Pressure& pressure) {
   // current pressure now means the pressure before the first instruction in the block
   // (since we have stepped through all instructions backwards)
-  if (pressure._current_pressure > pressure._high_pressure_limit) {
-    pressure._high_pressure_index = 0;
+  if (pressure.current_pressure() > pressure.high_pressure_limit()) {
+    pressure.set_high_pressure_index_to_block_start();
   }
 }
 
@@ -725,7 +715,7 @@
  * and set the high pressure index for the block
  */
 void PhaseChaitin::adjust_high_pressure_index(Block* b, uint& block_hrp_index, Pressure& pressure) {
-  uint i = pressure._high_pressure_index;
+  uint i = pressure.high_pressure_index();
   if (i < b->number_of_nodes() && i < b->end_idx() + 1) {
     Node* cur = b->get_node(i);
     while (cur->is_Proj() || (cur->is_MachNullCheck()) || cur->is_Catch()) {
@@ -772,7 +762,7 @@
 
     int inst_count = last_inst - first_inst;
     double cost = (inst_count <= 0) ? 0.0 : block->_freq * double(inst_count);
-    assert(!(cost < 0.0), "negative spill cost" );
+    assert(cost >= 0.0, "negative spill cost" );
 
     compute_initial_block_pressure(block, &liveout, int_pressure, float_pressure, cost);
 
@@ -789,8 +779,8 @@
 
         if (!liveout.member(lid) && n->Opcode() != Op_SafePoint) {
           if (remove_node_if_not_used(block, location, n, lid, &liveout)) {
-            float_pressure._high_pressure_index--;
-            int_pressure._high_pressure_index--;
+            float_pressure.lower_high_pressure_index();
+            int_pressure.lower_high_pressure_index();
             continue;
           }
           if (lrg._fat_proj) {
@@ -799,7 +789,11 @@
           }
         } else {
           // A live range ends at its definition, remove the remaining area.
-          lrg._area -= cost;
+          // If the cost is +Inf (which might happen in extreme cases), the lrg area will also be +Inf,
+          // and +Inf - +Inf = NaN. So let's not do that subtraction.
+          if (g_isfinite(cost)) {
+            lrg._area -= cost;
+          }
           assert(lrg._area >= 0.0, "negative spill area" );
 
           assign_high_score_to_immediate_copies(block, n, lrg, location + 1, last_inst);
@@ -837,13 +831,13 @@
     adjust_high_pressure_index(block, block->_ihrp_index, int_pressure);
     adjust_high_pressure_index(block, block->_fhrp_index, float_pressure);
     // set the final_pressure as the register pressure for the block
-    block->_reg_pressure = int_pressure._final_pressure;
-    block->_freg_pressure = float_pressure._final_pressure;
+    block->_reg_pressure = int_pressure.final_pressure();
+    block->_freg_pressure = float_pressure.final_pressure();
 
 #ifndef PRODUCT
     // Gather Register Pressure Statistics
     if (PrintOptoStatistics) {
-      if (block->_reg_pressure > int_pressure._high_pressure_limit || block->_freg_pressure > float_pressure._high_pressure_limit) {
+      if (block->_reg_pressure > int_pressure.high_pressure_limit() || block->_freg_pressure > float_pressure.high_pressure_limit()) {
         _high_pressure++;
       } else {
         _low_pressure++;
--- a/src/share/vm/opto/ifnode.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/ifnode.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -76,7 +76,6 @@
   if( !i1->is_Bool() ) return NULL;
   BoolNode *b = i1->as_Bool();
   Node *cmp = b->in(1);
-  if( cmp->is_FlagsProj() ) return NULL;
   if( !cmp->is_Cmp() ) return NULL;
   i1 = cmp->in(1);
   if( i1 == NULL || !i1->is_Phi() ) return NULL;
--- a/src/share/vm/opto/lcm.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/lcm.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -520,13 +520,6 @@
           break;
         }
 
-        // For nodes that produce a FlagsProj, make the node adjacent to the
-        // use of the FlagsProj
-        if (use->is_FlagsProj() && get_block_for_node(use) == block) {
-          found_machif = true;
-          break;
-        }
-
         // More than this instruction pending for successor to be ready,
         // don't choose this if other opportunities are ready
         if (ready_cnt.at(use->_idx) > 1)
--- a/src/share/vm/opto/library_call.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/library_call.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -203,7 +203,9 @@
   bool inline_math_native(vmIntrinsics::ID id);
   bool inline_trig(vmIntrinsics::ID id);
   bool inline_math(vmIntrinsics::ID id);
-  void inline_math_mathExact(Node* math);
+  template <typename OverflowOp>
+  bool inline_math_overflow(Node* arg1, Node* arg2);
+  void inline_math_mathExact(Node* math, Node* test);
   bool inline_math_addExactI(bool is_increment);
   bool inline_math_addExactL(bool is_increment);
   bool inline_math_multiplyExactI();
@@ -517,31 +519,31 @@
 
   case vmIntrinsics::_incrementExactI:
   case vmIntrinsics::_addExactI:
-    if (!Matcher::match_rule_supported(Op_AddExactI) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowAddI) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_incrementExactL:
   case vmIntrinsics::_addExactL:
-    if (!Matcher::match_rule_supported(Op_AddExactL) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowAddL) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_decrementExactI:
   case vmIntrinsics::_subtractExactI:
-    if (!Matcher::match_rule_supported(Op_SubExactI) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_decrementExactL:
   case vmIntrinsics::_subtractExactL:
-    if (!Matcher::match_rule_supported(Op_SubExactL) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_negateExactI:
-    if (!Matcher::match_rule_supported(Op_NegExactI) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowSubI) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_negateExactL:
-    if (!Matcher::match_rule_supported(Op_NegExactL) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowSubL) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_multiplyExactI:
-    if (!Matcher::match_rule_supported(Op_MulExactI) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowMulI) || !UseMathExactIntrinsics) return NULL;
     break;
   case vmIntrinsics::_multiplyExactL:
-    if (!Matcher::match_rule_supported(Op_MulExactL) || !UseMathExactIntrinsics) return NULL;
+    if (!Matcher::match_rule_supported(Op_OverflowMulL) || !UseMathExactIntrinsics) return NULL;
     break;
 
  default:
@@ -1937,7 +1939,7 @@
     runtime_math(OptoRuntime::Math_D_D_Type(), FN_PTR(SharedRuntime::dlog10), "LOG10");
 
     // These intrinsics are supported on all hardware
-  case vmIntrinsics::_dsqrt:  return Matcher::has_match_rule(Op_SqrtD)  ? inline_math(id) : false;
+  case vmIntrinsics::_dsqrt:  return Matcher::match_rule_supported(Op_SqrtD) ? inline_math(id) : false;
   case vmIntrinsics::_dabs:   return Matcher::has_match_rule(Op_AbsD)   ? inline_math(id) : false;
 
   case vmIntrinsics::_dexp:   return Matcher::has_match_rule(Op_ExpD)   ? inline_exp()    :
@@ -1970,18 +1972,8 @@
   return true;
 }
 
-void LibraryCallKit::inline_math_mathExact(Node* math) {
-  // If we didn't get the expected opcode it means we have optimized
-  // the node to something else and don't need the exception edge.
-  if (!math->is_MathExact()) {
-    set_result(math);
-    return;
-  }
-
-  Node* result = _gvn.transform( new(C) ProjNode(math, MathExactNode::result_proj_node));
-  Node* flags = _gvn.transform( new(C) FlagsProjNode(math, MathExactNode::flags_proj_node));
-
-  Node* bol = _gvn.transform( new (C) BoolNode(flags, BoolTest::overflow) );
+void LibraryCallKit::inline_math_mathExact(Node* math, Node *test) {
+  Node* bol = _gvn.transform( new (C) BoolNode(test, BoolTest::overflow) );
   IfNode* check = create_and_map_if(control(), bol, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN);
   Node* fast_path = _gvn.transform( new (C) IfFalseNode(check));
   Node* slow_path = _gvn.transform( new (C) IfTrueNode(check) );
@@ -1999,108 +1991,50 @@
   }
 
   set_control(fast_path);
-  set_result(result);
+  set_result(math);
 }
 
-bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
-  Node* arg1 = argument(0);
-  Node* arg2 = NULL;
-
-  if (is_increment) {
-    arg2 = intcon(1);
-  } else {
-    arg2 = argument(1);
-  }
-
-  Node* add = _gvn.transform( new(C) AddExactINode(NULL, arg1, arg2) );
-  inline_math_mathExact(add);
+template <typename OverflowOp>
+bool LibraryCallKit::inline_math_overflow(Node* arg1, Node* arg2) {
+  typedef typename OverflowOp::MathOp MathOp;
+
+  MathOp* mathOp = new(C) MathOp(arg1, arg2);
+  Node* operation = _gvn.transform( mathOp );
+  Node* ofcheck = _gvn.transform( new(C) OverflowOp(arg1, arg2) );
+  inline_math_mathExact(operation, ofcheck);
   return true;
 }
 
+bool LibraryCallKit::inline_math_addExactI(bool is_increment) {
+  return inline_math_overflow<OverflowAddINode>(argument(0), is_increment ? intcon(1) : argument(1));
+}
+
 bool LibraryCallKit::inline_math_addExactL(bool is_increment) {
-  Node* arg1 = argument(0); // type long
-  // argument(1) == TOP
-  Node* arg2 = NULL;
-
-  if (is_increment) {
-    arg2 = longcon(1);
-  } else {
-    arg2 = argument(2); // type long
-    // argument(3) == TOP
-  }
-
-  Node* add = _gvn.transform(new(C) AddExactLNode(NULL, arg1, arg2));
-  inline_math_mathExact(add);
-  return true;
+  return inline_math_overflow<OverflowAddLNode>(argument(0), is_increment ? longcon(1) : argument(2));
 }
 
 bool LibraryCallKit::inline_math_subtractExactI(bool is_decrement) {
-  Node* arg1 = argument(0);
-  Node* arg2 = NULL;
-
-  if (is_decrement) {
-    arg2 = intcon(1);
-  } else {
-    arg2 = argument(1);
-  }
-
-  Node* sub = _gvn.transform(new(C) SubExactINode(NULL, arg1, arg2));
-  inline_math_mathExact(sub);
-  return true;
+  return inline_math_overflow<OverflowSubINode>(argument(0), is_decrement ? intcon(1) : argument(1));
 }
 
 bool LibraryCallKit::inline_math_subtractExactL(bool is_decrement) {
-  Node* arg1 = argument(0); // type long
-  // argument(1) == TOP
-  Node* arg2 = NULL;
-
-  if (is_decrement) {
-    arg2 = longcon(1);
-  } else {
-    arg2 = argument(2); // type long
-    // argument(3) == TOP
-  }
-
-  Node* sub = _gvn.transform(new(C) SubExactLNode(NULL, arg1, arg2));
-  inline_math_mathExact(sub);
-  return true;
+  return inline_math_overflow<OverflowSubLNode>(argument(0), is_decrement ? longcon(1) : argument(2));
 }
 
 bool LibraryCallKit::inline_math_negateExactI() {
-  Node* arg1 = argument(0);
-
-  Node* neg = _gvn.transform(new(C) NegExactINode(NULL, arg1));
-  inline_math_mathExact(neg);
-  return true;
+  return inline_math_overflow<OverflowSubINode>(intcon(0), argument(0));
 }
 
 bool LibraryCallKit::inline_math_negateExactL() {
-  Node* arg1 = argument(0);
-  // argument(1) == TOP
-
-  Node* neg = _gvn.transform(new(C) NegExactLNode(NULL, arg1));
-  inline_math_mathExact(neg);
-  return true;
+  return inline_math_overflow<OverflowSubLNode>(longcon(0), argument(0));
 }
 
 bool LibraryCallKit::inline_math_multiplyExactI() {
-  Node* arg1 = argument(0);
-  Node* arg2 = argument(1);
-
-  Node* mul = _gvn.transform(new(C) MulExactINode(NULL, arg1, arg2));
-  inline_math_mathExact(mul);
-  return true;
+  return inline_math_overflow<OverflowMulINode>(argument(0), argument(1));
 }
 
 bool LibraryCallKit::inline_math_multiplyExactL() {
-  Node* arg1 = argument(0);
-  // argument(1) == TOP
-  Node* arg2 = argument(2);
-  // argument(3) == TOP
-
-  Node* mul = _gvn.transform(new(C) MulExactLNode(NULL, arg1, arg2));
-  inline_math_mathExact(mul);
-  return true;
+  return inline_math_overflow<OverflowMulLNode>(argument(0), argument(2));
 }
 
 Node*
--- a/src/share/vm/opto/loopTransform.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/loopTransform.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -713,10 +713,6 @@
       case Op_ModL: body_size += 30; break;
       case Op_DivL: body_size += 30; break;
       case Op_MulL: body_size += 10; break;
-      case Op_FlagsProj:
-        // Can't handle unrolling of loops containing
-        // nodes that generate a FlagsProj at the moment
-        return false;
       case Op_StrComp:
       case Op_StrEquals:
       case Op_StrIndexOf:
@@ -780,10 +776,6 @@
         continue; // not RC
 
       Node *cmp = bol->in(1);
-      if (cmp->is_FlagsProj()) {
-        continue;
-      }
-
       Node *rc_exp = cmp->in(1);
       Node *limit = cmp->in(2);
 
--- a/src/share/vm/opto/loopopts.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/loopopts.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -43,12 +43,6 @@
     return NULL;
   }
 
-  if (n->is_MathExact()) {
-    // MathExact has projections that are not correctly handled in the code
-    // below.
-    return NULL;
-  }
-
   int wins = 0;
   assert(!n->is_CFG(), "");
   assert(region->is_Region(), "");
@@ -2362,8 +2356,7 @@
         opc == Op_Catch     ||
         opc == Op_CatchProj ||
         opc == Op_Jump      ||
-        opc == Op_JumpProj  ||
-        opc == Op_FlagsProj) {
+        opc == Op_JumpProj) {
 #if !defined(PRODUCT)
       if (TracePartialPeeling) {
         tty->print_cr("\nExit control too complex: lp: %d", head->_idx);
--- a/src/share/vm/opto/machnode.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/machnode.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -520,12 +520,33 @@
 // Machine SpillCopy Node.  Copies 1 or 2 words from any location to any
 // location (stack or register).
 class MachSpillCopyNode : public MachIdealNode {
+public:
+  enum SpillType {
+    TwoAddress,                        // Inserted when coalescing of a two-address-instruction node and its input fails
+    PhiInput,                          // Inserted when coalescing of a phi node and its input fails
+    DebugUse,                          // Inserted as debug info spills to safepoints in non-frequent blocks
+    LoopPhiInput,                      // Pre-split compares of loop-phis
+    Definition,                        // An lrg marked as spilled will be spilled to memory right after its definition,
+                                       // if in high pressure region or the lrg is bound
+    RegToReg,                          // A register to register move
+    RegToMem,                          // A register to memory move
+    MemToReg,                          // A memory to register move
+    PhiLocationDifferToInputLocation,  // When coalescing phi nodes in PhaseChaitin::Split(), a move spill is inserted if
+                                       // the phi and its input resides at different locations (i.e. reg or mem)
+    BasePointerToMem,                  // Spill base pointer to memory at safepoint
+    InputToRematerialization,          // When rematerializing a node we stretch the inputs live ranges, and they might be
+                                       // stretched beyond a new definition point, therefore we split out new copies instead
+    CallUse,                           // Spill use at a call
+    Bound                              // An lrg marked as spill that is bound and needs to be spilled at a use
+  };
+private:
   const RegMask *_in;           // RegMask for input
   const RegMask *_out;          // RegMask for output
   const Type *_type;
+  const SpillType _spill_type;
 public:
-  MachSpillCopyNode( Node *n, const RegMask &in, const RegMask &out ) :
-    MachIdealNode(), _in(&in), _out(&out), _type(n->bottom_type()) {
+  MachSpillCopyNode(SpillType spill_type, Node *n, const RegMask &in, const RegMask &out ) :
+    MachIdealNode(), _spill_type(spill_type), _in(&in), _out(&out), _type(n->bottom_type()) {
     init_class_id(Class_MachSpillCopy);
     init_flags(Flag_is_Copy);
     add_req(NULL);
@@ -544,8 +565,42 @@
   virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
   virtual uint size(PhaseRegAlloc *ra_) const;
 
+
 #ifndef PRODUCT
-  virtual const char *Name() const { return "MachSpillCopy"; }
+  virtual const char *Name() const {
+    switch (_spill_type) {
+      case TwoAddress:
+        return "TwoAddressSpillCopy";
+      case PhiInput:
+        return "PhiInputSpillCopy";
+      case DebugUse:
+        return "DebugUseSpillCopy";
+      case LoopPhiInput:
+        return "LoopPhiInputSpillCopy";
+      case Definition:
+        return "DefinitionSpillCopy";
+      case RegToReg:
+        return "RegToRegSpillCopy";
+      case RegToMem:
+        return "RegToMemSpillCopy";
+      case MemToReg:
+        return "MemToRegSpillCopy";
+      case PhiLocationDifferToInputLocation:
+        return "PhiLocationDifferToInputLocationSpillCopy";
+      case BasePointerToMem:
+        return "BasePointerToMemSpillCopy";
+      case InputToRematerialization:
+        return "InputToRematerializationSpillCopy";
+      case CallUse:
+        return "CallUseSpillCopy";
+      case Bound:
+        return "BoundSpillCopy";
+      default:
+        assert(false, "Must have valid spill type");
+        return "MachSpillCopy";
+    }
+  }
+
   virtual void format( PhaseRegAlloc *, outputStream *st ) const;
 #endif
 };
--- a/src/share/vm/opto/matcher.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/matcher.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -1998,7 +1998,6 @@
       case Op_Catch:
       case Op_CatchProj:
       case Op_CProj:
-      case Op_FlagsProj:
       case Op_JumpProj:
       case Op_JProj:
       case Op_NeverBranch:
--- a/src/share/vm/opto/matcher.hpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/matcher.hpp	Wed Feb 26 02:38:46 2014 -0800
@@ -340,10 +340,6 @@
   // Register for MODL projection of divmodL
   static RegMask modL_proj_mask();
 
-  static const RegMask mathExactI_result_proj_mask();
-  static const RegMask mathExactL_result_proj_mask();
-  static const RegMask mathExactI_flags_proj_mask();
-
   // Use hardware DIV instruction when it is faster than
   // a code which use multiply for division by constant.
   static bool use_asm_for_ldiv_by_con( jlong divisor );
--- a/src/share/vm/opto/mathexactnode.cpp	Wed Feb 26 11:29:47 2014 +0100
+++ b/src/share/vm/opto/mathexactnode.cpp	Wed Feb 26 02:38:46 2014 -0800
@@ -31,358 +31,93 @@
 #include "opto/mathexactnode.hpp"
 #include "opto/subnode.hpp"
 
-MathExactNode::MathExactNode(Node* ctrl, Node* in1) : MultiNode(2) {