changeset 28538:48d009ddfd46

Merge
author prr
date Mon, 05 Jan 2015 13:30:23 -0800
parents 0a030920b0f5 ad51f784a352
children bb420dbad9d2 a492b5c5dc53
files hotspot/test/testlibrary/whitebox/Makefile hotspot/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java hotspot/test/testlibrary/whitebox/sun/hotspot/code/BlobType.java hotspot/test/testlibrary/whitebox/sun/hotspot/code/CodeBlob.java hotspot/test/testlibrary/whitebox/sun/hotspot/code/NMethod.java hotspot/test/testlibrary/whitebox/sun/hotspot/cpuinfo/CPUInfo.java hotspot/test/testlibrary/whitebox/sun/hotspot/parser/DiagnosticCommand.java langtools/test/tools/sjavac/SJavac.java
diffstat 517 files changed, 10769 insertions(+), 4842 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Dec 30 22:13:53 2014 +0300
+++ b/.hgtags	Mon Jan 05 13:30:23 2015 -0800
@@ -285,3 +285,5 @@
 82f4cb44b2d7af2352f48568a64b7b6a5ae960cd jdk9-b40
 9fffb959eb4197ff806e4ac12244761815b4deee jdk9-b41
 3107be2ba9c6e208a0b86bc7100a141abbc5b5fb jdk9-b42
+6494b13f88a867026ee316b444d9a4fa589dd6bd jdk9-b43
+abbfccd659b91a7bb815d5e36fed635dcdd40f31 jdk9-b44
--- a/.hgtags-top-repo	Tue Dec 30 22:13:53 2014 +0300
+++ b/.hgtags-top-repo	Mon Jan 05 13:30:23 2015 -0800
@@ -285,3 +285,5 @@
 cf136458ee747e151a27aa9ea0c1492ea55ef3e7 jdk9-b40
 67395f7ca2db3b52e3a62a84888487de5cb9210a jdk9-b41
 f7c11da0b0481d49cc7a65a453336c108191e821 jdk9-b42
+02ee8c65622e8bd97496d584e22fc7dcf0edc4ae jdk9-b43
+8994f5d87b3bb5e8d317d4e8ccb326da1a73684a jdk9-b44
--- a/common/autoconf/generated-configure.sh	Tue Dec 30 22:13:53 2014 +0300
+++ b/common/autoconf/generated-configure.sh	Mon Jan 05 13:30:23 2015 -0800
@@ -4329,7 +4329,7 @@
 #CUSTOM_AUTOCONF_INCLUDE
 
 # Do not change or remove the following line, it is needed for consistency checks:
-DATE_WHEN_GENERATED=1418036274
+DATE_WHEN_GENERATED=1418395009
 
 ###############################################################################
 #
@@ -13965,7 +13965,8 @@
 
   # ZERO_ARCHDEF is used to enable architecture-specific code
   case "${OPENJDK_TARGET_CPU}" in
-    ppc*)    ZERO_ARCHDEF=PPC   ;;
+    ppc)     ZERO_ARCHDEF=PPC32 ;;
+    ppc64)   ZERO_ARCHDEF=PPC64 ;;
     s390*)   ZERO_ARCHDEF=S390  ;;
     sparc*)  ZERO_ARCHDEF=SPARC ;;
     x86_64*) ZERO_ARCHDEF=AMD64 ;;
--- a/common/autoconf/platform.m4	Tue Dec 30 22:13:53 2014 +0300
+++ b/common/autoconf/platform.m4	Mon Jan 05 13:30:23 2015 -0800
@@ -367,7 +367,8 @@
 
   # ZERO_ARCHDEF is used to enable architecture-specific code
   case "${OPENJDK_TARGET_CPU}" in
-    ppc*)    ZERO_ARCHDEF=PPC   ;;
+    ppc)     ZERO_ARCHDEF=PPC32 ;;
+    ppc64)   ZERO_ARCHDEF=PPC64 ;;
     s390*)   ZERO_ARCHDEF=S390  ;;
     sparc*)  ZERO_ARCHDEF=SPARC ;;
     x86_64*) ZERO_ARCHDEF=AMD64 ;;
--- a/common/bin/hgforest.sh	Tue Dec 30 22:13:53 2014 +0300
+++ b/common/bin/hgforest.sh	Mon Jan 05 13:30:23 2015 -0800
@@ -106,12 +106,15 @@
   echo "# Mercurial command: ${command}" > ${status_output}
 fi
 
-
-# capture command options and arguments (if any)
-command_args="${@:-}"
+# At this point all command options and args are in "$@".
+# Always use "$@" (within double quotes) to avoid breaking
+# args with spaces into separate args.
 
 if [ ${vflag} = "true" ] ; then
-  echo "# Mercurial command arguments: ${command_args}" > ${status_output}
+  echo "# Mercurial command argument count: $#" > ${status_output}
+  for cmdarg in "$@" ; do
+    echo "# Mercurial command argument: ${cmdarg}" > ${status_output}
+  done
 fi
 
 # Clean out the temporary directory that stores the pid files.
@@ -205,13 +208,14 @@
 
   pull_default_tail=`echo ${pull_default} | sed -e 's@^.*://[^/]*/\(.*\)@\1@'`
 
-  if [ -n "${command_args}" ] ; then
+  if [ $# -gt 0 ] ; then
     # if there is an "extra sources" path then reparent "extra" repos to that path
     if [ "x${pull_default}" = "x${pull_default_tail}" ] ; then
       echo "ERROR: Need initial clone from non-local source" > ${status_output}
       exit 1
     fi
-    pull_extra="${command_args}/${pull_default_tail}"
+    # assume that "extra sources" path is the first arg
+    pull_extra="${1}/${pull_default_tail}"
 
     # determine which extra subrepos need to be cloned.
     for i in ${subrepos_extra} ; do
@@ -356,8 +360,8 @@
           (PYTHONUNBUFFERED=true hg${global_opts} clone ${clone_newrepo} ${i}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
         else
           # run the command.
-          echo "cd ${i} && hg${global_opts} ${command} ${command_args}" > ${status_output}
-          cd ${i} && (PYTHONUNBUFFERED=true hg${global_opts} ${command} ${command_args}; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
+          echo "cd ${i} && hg${global_opts} ${command} ${@}" > ${status_output}
+          cd ${i} && (PYTHONUNBUFFERED=true hg${global_opts} ${command} "${@}"; echo "$?" > ${tmp}/${repopidfile}.pid.rc ) 2>&1 &
         fi
 
         echo $! > ${tmp}/${repopidfile}.pid
--- a/corba/.hgtags	Tue Dec 30 22:13:53 2014 +0300
+++ b/corba/.hgtags	Mon Jan 05 13:30:23 2015 -0800
@@ -285,3 +285,5 @@
 e27c725d6c9d155667b35255f442d4ceb8c3c084 jdk9-b40
 1908b886ba1eda46fa725cf1160fe5d30fd1a7e5 jdk9-b41
 078bb11af876fe528d4b516f33ad4dd9bb60549e jdk9-b42
+9645e35616b60c5c07b4fdf11a132afc8081dfa8 jdk9-b43
+1f57bd728c9e6865ccb9d43ccd80a1c11230a32f jdk9-b44
--- a/hotspot/.hgtags	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/.hgtags	Mon Jan 05 13:30:23 2015 -0800
@@ -445,3 +445,5 @@
 6b09b3193d731e3288e2a240c504a20d0a06c766 jdk9-b40
 1d29b13e8a515a7ea3b882f140576d5d675bc11f jdk9-b41
 38cb4fbd47e3472bd1b5ebac83bda96fe4869c4f jdk9-b42
+65a9747147b8090037541040ba67156ec914db6a jdk9-b43
+43a44b56dca61a4d766a20f0528fdd8b5ceff873 jdk9-b44
--- a/hotspot/agent/src/os/win32/windbg/sawindbg.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/agent/src/os/win32/windbg/sawindbg.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -22,6 +22,9 @@
  *
  */
 
+// Disable CRT security warning against strcpy/strcat
+#pragma warning(disable: 4996)
+
 // this is source code windbg based SA debugger agent to debug
 // Dr. Watson dump files and process snapshots.
 
--- a/hotspot/agent/src/share/native/sadis.c	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/agent/src/share/native/sadis.c	Mon Jan 05 13:30:23 2015 -0800
@@ -33,6 +33,8 @@
  */
 
 #ifdef _WINDOWS
+// Disable CRT security warning against _snprintf
+#pragma warning (disable : 4996)
 
 #define snprintf  _snprintf
 #define vsnprintf _vsnprintf
@@ -90,12 +92,8 @@
     if (errno != 0)
     {
       /* C runtime error that has no corresponding DOS error code */
-      const char *s = strerror(errno);
-      size_t n = strlen(s);
-      if (n >= len) n = len - 1;
-      strncpy(buf, s, n);
-      buf[n] = '\0';
-      return (int)n;
+      strerror_s(buf, len, errno);
+      return strlen(buf);
     }
     return 0;
 }
@@ -111,16 +109,30 @@
                                                                            jstring jrepath_s,
                                                                            jstring libname_s) {
   uintptr_t func = 0;
-  const char* error_message = NULL;
-  jboolean isCopy;
+  const char *error_message = NULL;
+  const char *jrepath = NULL;
+  const char *libname = NULL;
+  char buffer[128];
 
-  const char * jrepath = (*env)->GetStringUTFChars(env, jrepath_s, &isCopy); // like $JAVA_HOME/jre/lib/sparc/
-  const char * libname = (*env)->GetStringUTFChars(env, libname_s, &isCopy);
-  char buffer[128];
+#ifdef _WINDOWS
+  HINSTANCE hsdis_handle = (HINSTANCE) NULL;
+#else
+  void* hsdis_handle = NULL;
+#endif
+
+  jrepath = (*env)->GetStringUTFChars(env, jrepath_s, NULL); // like $JAVA_HOME/jre/lib/sparc/
+  if (jrepath == NULL || (*env)->ExceptionOccurred(env)) {
+    return 0;
+  }
+
+  libname = (*env)->GetStringUTFChars(env, libname_s, NULL);
+  if (libname == NULL || (*env)->ExceptionOccurred(env)) {
+    (*env)->ReleaseStringUTFChars(env, jrepath_s, jrepath);
+    return 0;
+  }
 
   /* Load the hsdis library */
 #ifdef _WINDOWS
-  HINSTANCE hsdis_handle;
   hsdis_handle = LoadLibrary(libname);
   if (hsdis_handle == NULL) {
     snprintf(buffer, sizeof(buffer), "%s%s", jrepath, libname);
@@ -134,7 +146,6 @@
     error_message = buffer;
   }
 #else
-  void* hsdis_handle;
   hsdis_handle = dlopen(libname, RTLD_LAZY | RTLD_GLOBAL);
   if (hsdis_handle == NULL) {
     snprintf(buffer, sizeof(buffer), "%s%s", jrepath, libname);
@@ -156,6 +167,11 @@
      * platform dependent error message.
      */
     jclass eclass = (*env)->FindClass(env, "sun/jvm/hotspot/debugger/DebuggerException");
+    if ((*env)->ExceptionOccurred(env)) {
+      /* Can't throw exception, probably OOM, so silently return 0 */
+      return (jlong) 0;
+    }
+
     (*env)->ThrowNew(env, eclass, error_message);
   }
   return (jlong)func;
@@ -184,16 +200,22 @@
 
 /* event callback binding to Disassembler.handleEvent */
 static void* event_to_env(void* env_pv, const char* event, void* arg) {
+  jlong result = 0;
   decode_env* denv = (decode_env*)env_pv;
   JNIEnv* env = denv->env;
   jstring event_string = (*env)->NewStringUTF(env, event);
-  jlong result = (*env)->CallLongMethod(env, denv->dis, denv->handle_event, denv->visitor,
-                                        event_string, (jlong) (uintptr_t)arg);
-  if ((*env)->ExceptionOccurred(env) != NULL) {
+  if ((*env)->ExceptionOccurred(env)) {
+    return NULL;
+  }
+
+  result = (*env)->CallLongMethod(env, denv->dis, denv->handle_event, denv->visitor,
+                                  event_string, (jlong) (uintptr_t)arg);
+  if ((*env)->ExceptionOccurred(env)) {
     /* ignore exceptions for now */
     (*env)->ExceptionClear(env);
-    result = 0;
+    return NULL;
   }
+
   return (void*)(uintptr_t)result;
 }
 
@@ -219,10 +241,13 @@
   }
   if (raw != NULL) {
     jstring output = (*env)->NewStringUTF(env, raw);
-    (*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
-    if ((*env)->ExceptionOccurred(env) != NULL) {
+    if (!(*env)->ExceptionOccurred(env)) {
+      /* make sure that UTF allocation doesn't cause OOM */
+      (*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
+    }
+    if ((*env)->ExceptionOccurred(env)) {
       /* ignore exceptions for now */
-      (*env)->ExceptionClear(env);
+        (*env)->ExceptionClear(env);
     }
     return (int) flen;
   }
@@ -231,11 +256,16 @@
   va_end(ap);
 
   output = (*env)->NewStringUTF(env, denv->buffer);
-  (*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
-  if ((*env)->ExceptionOccurred(env) != NULL) {
+  if (!(*env)->ExceptionOccurred(env)) {
+    /* make sure that UTF allocation doesn't cause OOM */
+    (*env)->CallVoidMethod(env, denv->dis, denv->raw_print, denv->visitor, output);
+  }
+
+  if ((*env)->ExceptionOccurred(env)) {
     /* ignore exceptions for now */
     (*env)->ExceptionClear(env);
   }
+
   return cnt;
 }
 
@@ -251,13 +281,24 @@
                                                                     jbyteArray code,
                                                                     jstring options_s,
                                                                     jlong decode_instructions_virtual) {
-  jboolean isCopy;
-  jbyte* start = (*env)->GetByteArrayElements(env, code, &isCopy);
-  jbyte* end = start + (*env)->GetArrayLength(env, code);
-  const char * options = (*env)->GetStringUTFChars(env, options_s, &isCopy);
-  jclass disclass = (*env)->GetObjectClass(env, dis);
+  jbyte *start = NULL;
+  jbyte *end = NULL;
+  jclass disclass = NULL;
+  const char *options = NULL;
+  decode_env denv;
 
-  decode_env denv;
+  start = (*env)->GetByteArrayElements(env, code, NULL);
+  if ((*env)->ExceptionOccurred(env)) {
+    return;
+  }
+  end = start + (*env)->GetArrayLength(env, code);
+  options = (*env)->GetStringUTFChars(env, options_s, NULL);
+  if ((*env)->ExceptionOccurred(env)) {
+    (*env)->ReleaseByteArrayElements(env, code, start, JNI_ABORT);
+    return;
+  }
+  disclass = (*env)->GetObjectClass(env, dis);
+
   denv.env = env;
   denv.dis = dis;
   denv.visitor = visitor;
@@ -266,6 +307,8 @@
   denv.handle_event = (*env)->GetMethodID(env, disclass, "handleEvent",
                                           "(Lsun/jvm/hotspot/asm/InstructionVisitor;Ljava/lang/String;J)J");
   if ((*env)->ExceptionOccurred(env)) {
+    (*env)->ReleaseByteArrayElements(env, code, start, JNI_ABORT);
+    (*env)->ReleaseStringUTFChars(env, options_s, options);
     return;
   }
 
@@ -273,11 +316,13 @@
   denv.raw_print = (*env)->GetMethodID(env, disclass, "rawPrint",
                                        "(Lsun/jvm/hotspot/asm/InstructionVisitor;Ljava/lang/String;)V");
   if ((*env)->ExceptionOccurred(env)) {
+    (*env)->ReleaseByteArrayElements(env, code, start, JNI_ABORT);
+    (*env)->ReleaseStringUTFChars(env, options_s, options);
     return;
   }
 
   /* decode the buffer */
-  (*(decode_func)(uintptr_t)decode_instructions_virtual)(startPc,
+  (*(decode_func)(uintptr_t)decode_instructions_virtual)((uintptr_t) startPc,
                                                          startPc + end - start,
                                                          (unsigned char*)start,
                                                          end - start,
--- a/hotspot/make/aix/makefiles/mapfile-vers-debug	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/make/aix/makefiles/mapfile-vers-debug	Mon Jan 05 13:30:23 2015 -0800
@@ -72,7 +72,6 @@
                 JVM_FillInStackTrace;
                 JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
-                JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
                 JVM_FindLibraryEntry;
                 JVM_FindLoadedClass;
--- a/hotspot/make/aix/makefiles/mapfile-vers-product	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/make/aix/makefiles/mapfile-vers-product	Mon Jan 05 13:30:23 2015 -0800
@@ -72,7 +72,6 @@
                 JVM_FillInStackTrace;
                 JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
-                JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
                 JVM_FindLibraryEntry;
                 JVM_FindLoadedClass;
--- a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-debug	Mon Jan 05 13:30:23 2015 -0800
@@ -70,7 +70,6 @@
                 _JVM_FillInStackTrace
                 _JVM_FindClassFromCaller
                 _JVM_FindClassFromClass
-                _JVM_FindClassFromClassLoader
                 _JVM_FindClassFromBootLoader
                 _JVM_FindLibraryEntry
                 _JVM_FindLoadedClass
--- a/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-darwin-product	Mon Jan 05 13:30:23 2015 -0800
@@ -70,7 +70,6 @@
                 _JVM_FillInStackTrace
                 _JVM_FindClassFromCaller
                 _JVM_FindClassFromClass
-                _JVM_FindClassFromClassLoader
                 _JVM_FindClassFromBootLoader
                 _JVM_FindLibraryEntry
                 _JVM_FindLoadedClass
--- a/hotspot/make/bsd/makefiles/mapfile-vers-debug	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-debug	Mon Jan 05 13:30:23 2015 -0800
@@ -72,7 +72,6 @@
                 JVM_FillInStackTrace;
                 JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
-                JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
                 JVM_FindLibraryEntry;
                 JVM_FindLoadedClass;
--- a/hotspot/make/bsd/makefiles/mapfile-vers-product	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/make/bsd/makefiles/mapfile-vers-product	Mon Jan 05 13:30:23 2015 -0800
@@ -72,7 +72,6 @@
                 JVM_FillInStackTrace;
                 JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
-                JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
                 JVM_FindLibraryEntry;
                 JVM_FindLoadedClass;
--- a/hotspot/make/linux/makefiles/mapfile-vers-debug	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/make/linux/makefiles/mapfile-vers-debug	Mon Jan 05 13:30:23 2015 -0800
@@ -72,7 +72,6 @@
                 JVM_FillInStackTrace;
                 JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
-                JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
                 JVM_FindLibraryEntry;
                 JVM_FindLoadedClass;
--- a/hotspot/make/linux/makefiles/mapfile-vers-product	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/make/linux/makefiles/mapfile-vers-product	Mon Jan 05 13:30:23 2015 -0800
@@ -72,7 +72,6 @@
                 JVM_FillInStackTrace;
                 JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
-                JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
                 JVM_FindLibraryEntry;
                 JVM_FindLoadedClass;
--- a/hotspot/make/solaris/makefiles/mapfile-vers	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/make/solaris/makefiles/mapfile-vers	Mon Jan 05 13:30:23 2015 -0800
@@ -72,7 +72,6 @@
                 JVM_FillInStackTrace;
                 JVM_FindClassFromCaller;
                 JVM_FindClassFromClass;
-                JVM_FindClassFromClassLoader;
                 JVM_FindClassFromBootLoader;
                 JVM_FindLibraryEntry;
                 JVM_FindLoadedClass;
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -544,6 +544,9 @@
   cmplw(CCR0, Rindex, Rlength);
   sldi(RsxtIndex, RsxtIndex, index_shift);
   blt(CCR0, LnotOOR);
+  // Index should be in R17_tos, array should be in R4_ARG2.
+  mr(R17_tos, Rindex);
+  mr(R4_ARG2, Rarray);
   load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
   mtctr(Rtmp);
   bctr();
@@ -1678,6 +1681,228 @@
   }
 }
 
+// Argument and return type profilig.
+// kills: tmp, tmp2, R0, CR0, CR1
+void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base,
+                                                 RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2) {
+  Label do_nothing, do_update;
+
+  // tmp2 = obj is allowed
+  assert_different_registers(obj, mdo_addr_base, tmp, R0);
+  assert_different_registers(tmp2, mdo_addr_base, tmp, R0);
+  const Register klass = tmp2;
+
+  verify_oop(obj);
+
+  ld(tmp, mdo_addr_offs, mdo_addr_base);
+
+  // Set null_seen if obj is 0.
+  cmpdi(CCR0, obj, 0);
+  ori(R0, tmp, TypeEntries::null_seen);
+  beq(CCR0, do_update);
+
+  load_klass(klass, obj);
+
+  clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
+  // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
+  cmpd(CCR1, R0, klass);
+  // Klass seen before, nothing to do (regardless of unknown bit).
+  //beq(CCR1, do_nothing);
+
+  andi_(R0, klass, TypeEntries::type_unknown);
+  // Already unknown. Nothing to do anymore.
+  //bne(CCR0, do_nothing);
+  crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2); // cr0 eq = cr1 eq or cr0 ne
+  beq(CCR0, do_nothing);
+
+  clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
+  orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
+  beq(CCR0, do_update); // First time here. Set profile type.
+
+  // Different than before. Cannot keep accurate profile.
+  ori(R0, tmp, TypeEntries::type_unknown);
+
+  bind(do_update);
+  // update profile
+  std(R0, mdo_addr_offs, mdo_addr_base);
+
+  align(32, 12);
+  bind(do_nothing);
+}
+
+void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) {
+  if (!ProfileInterpreter) {
+    return;
+  }
+
+  assert_different_registers(callee, tmp1, tmp2, R28_mdx);
+
+  if (MethodData::profile_arguments() || MethodData::profile_return()) {
+    Label profile_continue;
+
+    test_method_data_pointer(profile_continue);
+
+    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
+
+    lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
+    cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
+    bne(CCR0, profile_continue);
+
+    if (MethodData::profile_arguments()) {
+      Label done;
+      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
+      add(R28_mdx, off_to_args, R28_mdx);
+
+      for (int i = 0; i < TypeProfileArgsLimit; i++) {
+        if (i > 0 || MethodData::profile_return()) {
+          // If return value type is profiled we may have no argument to profile.
+          ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
+          cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
+          addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
+          blt(CCR0, done);
+        }
+        ld(tmp1, in_bytes(Method::const_offset()), callee);
+        lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
+        // Stack offset o (zero based) from the start of the argument
+        // list, for n arguments translates into offset n - o - 1 from
+        // the end of the argument list. But there's an extra slot at
+        // the top of the stack. So the offset is n - o from Lesp.
+        ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx);
+        subf(tmp1, tmp2, tmp1);
+
+        sldi(tmp1, tmp1, Interpreter::logStackElementSize);
+        ldx(tmp1, tmp1, R15_esp);
+
+        profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1);
+
+        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
+        addi(R28_mdx, R28_mdx, to_add);
+        off_to_args += to_add;
+      }
+
+      if (MethodData::profile_return()) {
+        ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
+        addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
+      }
+
+      bind(done);
+
+      if (MethodData::profile_return()) {
+        // We're right after the type profile for the last
+        // argument. tmp1 is the number of cells left in the
+        // CallTypeData/VirtualCallTypeData to reach its end. Non null
+        // if there's a return to profile.
+        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
+        sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size));
+        add(R28_mdx, tmp1, R28_mdx);
+      }
+    } else {
+      assert(MethodData::profile_return(), "either profile call args or call ret");
+      update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));
+    }
+
+    // Mdp points right after the end of the
+    // CallTypeData/VirtualCallTypeData, right after the cells for the
+    // return value type if there's one.
+    align(32, 12);
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {
+  assert_different_registers(ret, tmp1, tmp2);
+  if (ProfileInterpreter && MethodData::profile_return()) {
+    Label profile_continue;
+
+    test_method_data_pointer(profile_continue);
+
+    if (MethodData::profile_return_jsr292_only()) {
+      // If we don't profile all invoke bytecodes we must make sure
+      // it's a bytecode we indeed profile. We can't go back to the
+      // begining of the ProfileData we intend to update to check its
+      // type because we're right after it and we don't known its
+      // length.
+      lbz(tmp1, 0, R14_bcp);
+      lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method);
+      cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
+      cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
+      cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
+      cmpwi(CCR1, tmp2, vmIntrinsics::_compiledLambdaForm);
+      cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
+      bne(CCR0, profile_continue);
+    }
+
+    profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2);
+
+    align(32, 12);
+    bind(profile_continue);
+  }
+}
+
+void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
+  if (ProfileInterpreter && MethodData::profile_parameters()) {
+    Label profile_continue, done;
+
+    test_method_data_pointer(profile_continue);
+
+    // Load the offset of the area within the MDO used for
+    // parameters. If it's negative we're not profiling any parameters.
+    lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
+    cmpwi(CCR0, tmp1, 0);
+    blt(CCR0, profile_continue);
+
+    // Compute a pointer to the area for parameters from the offset
+    // and move the pointer to the slot for the last
+    // parameters. Collect profiling from last parameter down.
+    // mdo start + parameters offset + array length - 1
+
+    // Pointer to the parameter area in the MDO.
+    const Register mdp = tmp1;
+    add(mdp, tmp1, R28_mdx);
+
+    // Pffset of the current profile entry to update.
+    const Register entry_offset = tmp2;
+    // entry_offset = array len in number of cells
+    ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp);
+
+    int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
+    assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");
+
+    // entry_offset (number of cells)  = array len - size of 1 entry + offset of the stack slot field
+    addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size));
+    // entry_offset in bytes
+    sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
+
+    Label loop;
+    align(32, 12);
+    bind(loop);
+
+    // Load offset on the stack from the slot for this parameter.
+    ld(tmp3, entry_offset, mdp);
+    sldi(tmp3, tmp3, Interpreter::logStackElementSize);
+    neg(tmp3, tmp3);
+    // Read the parameter from the local area.
+    ldx(tmp3, tmp3, R18_locals);
+
+    // Make entry_offset now point to the type field for this parameter.
+    int type_base = in_bytes(ParametersTypeData::type_offset(0));
+    assert(type_base > off_base, "unexpected");
+    addi(entry_offset, entry_offset, type_base - off_base);
+
+    // Profile the parameter.
+    profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3);
+
+    // Go to next parameter.
+    int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
+    cmpdi(CCR0, entry_offset, off_base + delta);
+    addi(entry_offset, entry_offset, -delta);
+    bge(CCR0, loop);
+
+    align(32, 12);
+    bind(profile_continue);
+  }
+}
+
 // Add a InterpMonitorElem to stack (see frame_sparc.hpp).
 void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
 
@@ -2039,20 +2264,19 @@
   bne(CCR0, test);
 
   address fd = CAST_FROM_FN_PTR(address, verify_return_address);
-  unsigned int nbytes_save = 10*8; // 10 volatile gprs
-
-  save_LR_CR(Rtmp);
+  const int nbytes_save = 11*8; // volatile gprs except R0
+  save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+  save_LR_CR(Rtmp); // Save in old frame.
   push_frame_reg_args(nbytes_save, Rtmp);
-  save_volatile_gprs(R1_SP, 112); // except R0
 
   load_const_optimized(Rtmp, fd, R0);
   mr_if_needed(R4_ARG2, reg);
   mr(R3_ARG1, R19_method);
   call_c(Rtmp); // call C
 
-  restore_volatile_gprs(R1_SP, 112); // except R0
   pop_frame();
   restore_LR_CR(Rtmp);
+  restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
   b(skip);
 
   // Perform a more elaborate out-of-line call.
--- a/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/cpu/ppc/vm/interp_masm_ppc_64.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -255,6 +255,12 @@
   void record_klass_in_profile(Register receiver, Register scratch1, Register scratch2, bool is_virtual_call);
   void record_klass_in_profile_helper(Register receiver, Register scratch1, Register scratch2, int start_row, Label& done, bool is_virtual_call);
 
+  // Argument and return type profiling.
+  void profile_obj_type(Register obj, Register mdo_addr_base, RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2);
+  void profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual);
+  void profile_return_type(Register ret, Register tmp1, Register tmp2);
+  void profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4);
+
 #endif // !CC_INTERP
 
   // Debugging
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -807,6 +807,7 @@
 
 // For verify_oops.
 void MacroAssembler::save_volatile_gprs(Register dst, int offset) {
+  std(R2,  offset, dst);   offset += 8;
   std(R3,  offset, dst);   offset += 8;
   std(R4,  offset, dst);   offset += 8;
   std(R5,  offset, dst);   offset += 8;
@@ -821,6 +822,7 @@
 
 // For verify_oops.
 void MacroAssembler::restore_volatile_gprs(Register src, int offset) {
+  ld(R2,  offset, src);   offset += 8;
   ld(R3,  offset, src);   offset += 8;
   ld(R4,  offset, src);   offset += 8;
   ld(R5,  offset, src);   offset += 8;
@@ -1187,6 +1189,16 @@
   call_VM(oop_result, entry_point, check_exceptions);
 }
 
+void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3,
+                             bool check_exceptions) {
+  // R3_ARG1 is reserved for the thread
+  mr_if_needed(R4_ARG2, arg_1);
+  assert(arg_2 != R4_ARG2, "smashed argument");
+  mr_if_needed(R5_ARG3, arg_2);
+  mr_if_needed(R6_ARG4, arg_3);
+  call_VM(oop_result, entry_point, check_exceptions);
+}
+
 void MacroAssembler::call_VM_leaf(address entry_point) {
   call_VM_leaf_base(entry_point);
 }
@@ -3059,35 +3071,27 @@
   if (!VerifyOops) {
     return;
   }
-  // Will be preserved.
-  Register tmp = R11;
-  assert(oop != tmp, "precondition");
-  unsigned int nbytes_save = 10*8; // 10 volatile gprs
+
   address/* FunctionDescriptor** */fd = StubRoutines::verify_oop_subroutine_entry_address();
-  // save tmp
-  mr(R0, tmp);
-  // kill tmp
-  save_LR_CR(tmp);
+  const Register tmp = R11; // Will be preserved.
+  const int nbytes_save = 11*8; // Volatile gprs except R0.
+  save_volatile_gprs(R1_SP, -nbytes_save); // except R0
+
+  if (oop == tmp) mr(R4_ARG2, oop);
+  save_LR_CR(tmp); // save in old frame
   push_frame_reg_args(nbytes_save, tmp);
-  // restore tmp
-  mr(tmp, R0);
-  save_volatile_gprs(R1_SP, 112); // except R0
   // load FunctionDescriptor** / entry_address *
-  load_const(tmp, fd);
+  load_const_optimized(tmp, fd, R0);
   // load FunctionDescriptor* / entry_address
   ld(tmp, 0, tmp);
-  mr(R4_ARG2, oop);
-  load_const(R3_ARG1, (address)msg);
-  // call destination for its side effect
+  if (oop != tmp) mr_if_needed(R4_ARG2, oop);
+  load_const_optimized(R3_ARG1, (address)msg, R0);
+  // Call destination for its side effect.
   call_c(tmp);
-  restore_volatile_gprs(R1_SP, 112); // except R0
+
   pop_frame();
-  // save tmp
-  mr(R0, tmp);
-  // kill tmp
   restore_LR_CR(tmp);
-  // restore tmp
-  mr(tmp, R0);
+  restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
 }
 
 const char* stop_types[] = {
--- a/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/cpu/ppc/vm/macroAssembler_ppc.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -369,6 +369,7 @@
   void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
   void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
   void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
+  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg3, bool check_exceptions = true);
   void call_VM_leaf(address entry_point);
   void call_VM_leaf(address entry_point, Register arg_1);
   void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
--- a/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/cpu/ppc/vm/nativeInst_ppc.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -100,10 +100,7 @@
   MacroAssembler* a = new MacroAssembler(&cb);
 
   // Patch the call.
-  if (ReoptimizeCallSequences &&
-      a->is_within_range_of_b(dest, addr_call)) {
-    a->bl(dest);
-  } else {
+  if (!ReoptimizeCallSequences || !a->is_within_range_of_b(dest, addr_call)) {
     address trampoline_stub_addr = get_trampoline();
 
     // We did not find a trampoline stub because the current codeblob
@@ -115,9 +112,12 @@
 
     // Patch the constant in the call's trampoline stub.
     NativeCallTrampolineStub_at(trampoline_stub_addr)->set_destination(dest);
+    dest = trampoline_stub_addr;
+  }
 
-    a->bl(trampoline_stub_addr);
-  }
+  OrderAccess::release();
+  a->bl(dest);
+
   ICache::ppc64_flush_icache_bytes(addr_call, code_size);
 }
 
--- a/hotspot/src/cpu/ppc/vm/ppc.ad	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/cpu/ppc/vm/ppc.ad	Mon Jan 05 13:30:23 2015 -0800
@@ -1936,8 +1936,9 @@
   // --------------------------------------------------------------------
   // Check for hi bits still needing moving. Only happens for misaligned
   // arguments to native calls.
-  if (src_hi == dst_hi)
+  if (src_hi == dst_hi) {
     return ppc64Opcode_none;               // Self copy; no move.
+  }
 
   ShouldNotReachHere();
   return ppc64Opcode_undefined;
@@ -1959,14 +1960,15 @@
 }
 
 uint MachNopNode::size(PhaseRegAlloc *ra_) const {
-   return _count * 4;
+  return _count * 4;
 }
 
 #ifndef PRODUCT
 void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const {
   int offset = ra_->reg2offset(in_RegMask(0).find_first_elem());
-  int reg = ra_->get_reg_first(this);
-  st->print("ADDI %s, SP, %d \t// box node", Matcher::regName[reg], offset);
+  char reg_str[128];
+  ra_->dump_register(this, reg_str);
+  st->print("ADDI    %s, SP, %d \t// box node", reg_str, offset);
 }
 #endif
 
--- a/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/cpu/ppc/vm/templateInterpreter_ppc.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -91,7 +91,7 @@
 
   // Thread will be loaded to R3_ARG1.
   // Target class oop is in register R5_ARG3 by convention!
-  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose, R17_tos, R5_ARG3));
+  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_ClassCastException_verbose), R17_tos, R5_ARG3);
   // Above call must not return here since exception pending.
   DEBUG_ONLY(__ should_not_reach_here();)
   return entry;
@@ -172,6 +172,10 @@
   // Compiled code destroys templateTableBase, reload.
   __ load_const_optimized(R25_templateTableBase, (address)Interpreter::dispatch_table((TosState)0), R12_scratch2);
 
+  if (state == atos) {
+    __ profile_return_type(R3_RET, R11_scratch1, R12_scratch2);
+  }
+
   const Register cache = R11_scratch1;
   const Register size  = R12_scratch2;
   __ get_cache_and_index_at_bcp(cache, 1, index_size);
@@ -1189,6 +1193,10 @@
       __ li(R0, 1);
       __ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
     }
+
+    // Argument and return type profiling.
+    __ profile_parameters_type(R3_ARG1, R4_ARG2, R5_ARG3, R6_ARG4);
+
     // Increment invocation counter and check for overflow.
     if (inc_counter) {
       generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
@@ -1469,6 +1477,8 @@
     __ resize_frame_absolute(R12_scratch2, R11_scratch1, R0);
     if (ProfileInterpreter) {
       __ set_method_data_pointer_for_bcp();
+      __ ld(R11_scratch1, 0, R1_SP);
+      __ std(R28_mdx, _ijava_state_neg(mdx), R11_scratch1);
     }
 #if INCLUDE_JVMTI
     Label L_done;
@@ -1480,13 +1490,11 @@
     // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
     // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
     __ ld(R4_ARG2, 0, R18_locals);
-    __ call_VM(R11_scratch1, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null),
-               R4_ARG2, R19_method, R14_bcp);
-
-    __ cmpdi(CCR0, R11_scratch1, 0);
+    __ MacroAssembler::call_VM(R4_ARG2, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), R4_ARG2, R19_method, R14_bcp, false);
+    __ restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
+    __ cmpdi(CCR0, R4_ARG2, 0);
     __ beq(CCR0, L_done);
-
-    __ std(R11_scratch1, wordSize, R15_esp);
+    __ std(R4_ARG2, wordSize, R15_esp);
     __ bind(L_done);
 #endif // INCLUDE_JVMTI
     __ dispatch_next(vtos);
--- a/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/cpu/ppc/vm/templateTable_ppc_64.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -3235,6 +3235,8 @@
   // Load target.
   __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
   __ ldx(Rtarget_method, Rindex, Rrecv_klass);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rtarget_method, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */, true);
   __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
 }
 
@@ -3318,6 +3320,8 @@
   __ null_check_throw(Rrecv, -1, Rscratch1);
 
   __ profile_final_call(Rrecv, Rscratch1);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
 
   // Do the call.
   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
@@ -3339,6 +3343,8 @@
   __ null_check_throw(Rreceiver, -1, R11_scratch1);
 
   __ profile_call(R11_scratch1, R12_scratch2);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, R11_scratch1, R12_scratch2, false);
   __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
 }
 
@@ -3353,6 +3359,8 @@
   prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
 
   __ profile_call(R11_scratch1, R12_scratch2);
+  // Argument and return type profiling.
+  __ profile_arguments_type(R19_method, R11_scratch1, R12_scratch2, false);
   __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
 }
 
@@ -3374,6 +3382,8 @@
 
   // Final call case.
   __ profile_final_call(Rtemp1, Rscratch);
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rindex, Rscratch, Rrecv_klass /* scratch */, true);
   // Do the final call - the index (f2) contains the method.
   __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
 
@@ -3425,6 +3435,8 @@
   __ cmpdi(CCR0, Rindex, 0);
   __ beq(CCR0, Lthrow_ame);
   // Found entry. Jump off!
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rindex, Rscratch1, Rscratch2, true);
   __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
 
   // Vtable entry was NULL => Throw abstract method error.
@@ -3468,6 +3480,8 @@
   // to be the callsite object the bootstrap method returned. This is passed to a
   // "link" method which does the dispatch (Most likely just grabs the MH stored
   // inside the callsite and does an invokehandle).
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, false);
   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
 }
 
@@ -3488,6 +3502,8 @@
   __ profile_final_call(Rrecv, Rscratch1);
 
   // Still no call from handle => We call the method handle interpreter here.
+  // Argument and return type profiling.
+  __ profile_arguments_type(Rmethod, Rscratch1, Rscratch2, true);
   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
 }
 
--- a/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/cpu/ppc/vm/vm_version_ppc.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -134,13 +134,44 @@
   }
 
   assert(AllocatePrefetchLines > 0, "invalid value");
-  if (AllocatePrefetchLines < 1) // Set valid value in product VM.
+  if (AllocatePrefetchLines < 1) { // Set valid value in product VM.
     AllocatePrefetchLines = 1; // Conservative value.
+  }
 
-  if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size)
+  if (AllocatePrefetchStyle == 3 && AllocatePrefetchDistance < cache_line_size) {
     AllocatePrefetchStyle = 1; // Fall back if inappropriate.
+  }
 
   assert(AllocatePrefetchStyle >= 0, "AllocatePrefetchStyle should be positive");
+
+  if (UseCRC32Intrinsics) {
+    if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics))
+      warning("CRC32 intrinsics  are not available on this CPU");
+    FLAG_SET_DEFAULT(UseCRC32Intrinsics, false);
+  }
+
+  // The AES intrinsic stubs require AES instruction support.
+  if (UseAES) {
+    warning("AES instructions are not available on this CPU");
+    FLAG_SET_DEFAULT(UseAES, false);
+  }
+  if (UseAESIntrinsics) {
+    if (!FLAG_IS_DEFAULT(UseAESIntrinsics))
+      warning("AES intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseAESIntrinsics, false);
+  }
+
+  if (UseSHA) {
+    warning("SHA instructions are not available on this CPU");
+    FLAG_SET_DEFAULT(UseSHA, false);
+  }
+  if (UseSHA1Intrinsics || UseSHA256Intrinsics || UseSHA512Intrinsics) {
+    warning("SHA intrinsics are not available on this CPU");
+    FLAG_SET_DEFAULT(UseSHA1Intrinsics, false);
+    FLAG_SET_DEFAULT(UseSHA256Intrinsics, false);
+    FLAG_SET_DEFAULT(UseSHA512Intrinsics, false);
+  }
+
 }
 
 void VM_Version::print_features() {
--- a/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -675,7 +675,7 @@
   case handle_exception_nofpu_id:
   case handle_exception_id:
     // At this point all registers MAY be live.
-    oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id);
+    oop_map = save_live_registers(sasm, 1 /*thread*/, id != handle_exception_nofpu_id);
     break;
   case handle_exception_from_callee_id: {
     // At this point all registers except exception oop (RAX) and
@@ -748,7 +748,7 @@
   case handle_exception_nofpu_id:
   case handle_exception_id:
     // Restore the registers that were saved at the beginning.
-    restore_live_registers(sasm, id == handle_exception_nofpu_id);
+    restore_live_registers(sasm, id != handle_exception_nofpu_id);
     break;
   case handle_exception_from_callee_id:
     // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP
--- a/hotspot/src/os/aix/vm/osThread_aix.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os/aix/vm/osThread_aix.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2012, 2013 SAP AG. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -45,7 +45,8 @@
 
   sigemptyset(&_caller_sigmask);
 
-  _startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
+  _startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
+                                  Monitor::_safepoint_check_never);
   assert(_startThread_lock !=NULL, "check");
 }
 
--- a/hotspot/src/os/aix/vm/os_aix.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os/aix/vm/os_aix.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -124,12 +124,6 @@
 }
 #endif
 
-// Excerpts from systemcfg.h definitions newer than AIX 5.3
-#ifndef PV_7
-# define PV_7 0x200000          // Power PC 7
-# define PV_7_Compat 0x208000   // Power PC 7
-#endif
-
 #define MAX_PATH (2 * K)
 
 // for timer info max values which include all bits
@@ -140,17 +134,40 @@
 #define ERROR_MP_VMGETINFO_FAILED                    102
 #define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
 
-// the semantics in this file are thus that codeptr_t is a *real code ptr*
+// The semantics in this file are thus that codeptr_t is a *real code ptr*.
 // This means that any function taking codeptr_t as arguments will assume
 // a real codeptr and won't handle function descriptors (eg getFuncName),
 // whereas functions taking address as args will deal with function
-// descriptors (eg os::dll_address_to_library_name)
+// descriptors (eg os::dll_address_to_library_name).
 typedef unsigned int* codeptr_t;
 
-// typedefs for stackslots, stack pointers, pointers to op codes
+// Typedefs for stackslots, stack pointers, pointers to op codes.
 typedef unsigned long stackslot_t;
 typedef stackslot_t* stackptr_t;
 
+// Excerpts from systemcfg.h definitions newer than AIX 5.3.
+#ifndef PV_7
+#define PV_7 0x200000          /* Power PC 7 */
+#define PV_7_Compat 0x208000   /* Power PC 7 */
+#endif
+#ifndef PV_8
+#define PV_8 0x300000          /* Power PC 8 */
+#define PV_8_Compat 0x308000   /* Power PC 8 */
+#endif
+
+#define trcVerbose(fmt, ...) { /* PPC port */  \
+  if (Verbose) { \
+    fprintf(stderr, fmt, ##__VA_ARGS__); \
+    fputc('\n', stderr); fflush(stderr); \
+  } \
+}
+#define trc(fmt, ...)        /* PPC port */
+
+#define ERRBYE(s) { \
+    trcVerbose(s); \
+    return -1; \
+}
+
 // query dimensions of the stack of the calling thread
 static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
 
@@ -182,12 +199,12 @@
   return true;
 }
 
-// macro to check a given stack pointer against given stack limits and to die if test fails
+// Macro to check a given stack pointer against given stack limits and to die if test fails.
 #define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
     guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
 }
 
-// macro to check the current stack pointer against given stacklimits
+// Macro to check the current stack pointer against given stacklimits.
 #define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
   address sp; \
   sp = os::current_stack_pointer(); \
@@ -221,7 +238,7 @@
 static pid_t    _initial_pid       = 0;
 static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
 static sigset_t SR_sigset;
-static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
+static pthread_mutex_t dl_mutex;              // Used to protect dlsym() calls.
 
 julong os::available_memory() {
   return Aix::available_memory();
@@ -253,7 +270,6 @@
   return false;
 }
 
-
 // Return true if user is running as root.
 
 bool os::have_special_privileges() {
@@ -284,8 +300,7 @@
 
   for (int i = 0; i < numFullDisclaimsNeeded; i ++) {
     if (::disclaim(p, maxDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
-      //if (Verbose)
-      fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
+      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + maxDisclaimSize, errno);
       return false;
     }
     p += maxDisclaimSize;
@@ -293,8 +308,7 @@
 
   if (lastDisclaimSize > 0) {
     if (::disclaim(p, lastDisclaimSize, DISCLAIM_ZEROMEM) != 0) {
-      //if (Verbose)
-        fprintf(stderr, "Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
+      trc("Cannot disclaim %p - %p (errno %d)\n", p, p + lastDisclaimSize, errno);
       return false;
     }
   }
@@ -334,11 +348,11 @@
 
 void os::Aix::initialize_system_info() {
 
-  // get the number of online(logical) cpus instead of configured
+  // Get the number of online(logical) cpus instead of configured.
   os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
   assert(_processor_count > 0, "_processor_count must be > 0");
 
-  // retrieve total physical storage
+  // Retrieve total physical storage.
   os::Aix::meminfo_t mi;
   if (!os::Aix::get_meminfo(&mi)) {
     fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
@@ -513,7 +527,6 @@
 
 } // end os::Aix::query_multipage_support()
 
-// The code for this method was initially derived from the version in os_linux.cpp.
 void os::init_system_properties_values() {
 
 #define DEFAULT_LIBPATH "/usr/lib:/lib"
@@ -603,10 +616,11 @@
   sigaction(sig, (struct sigaction*)NULL, &oact);
   void* ohlr = oact.sa_sigaction ? CAST_FROM_FN_PTR(void*, oact.sa_sigaction)
     : CAST_FROM_FN_PTR(void*, oact.sa_handler);
-  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN))
+  if (ohlr == CAST_FROM_FN_PTR(void*, SIG_IGN)) {
     return true;
-  else
+  } else {
     return false;
+  }
 }
 
 void os::Aix::signal_sets_init() {
@@ -780,6 +794,9 @@
 
   // get the processor version from _system_configuration
   switch (_system_configuration.version) {
+  case PV_8:
+    strcpy(pci->version, "Power PC 8");
+    break;
   case PV_7:
     strcpy(pci->version, "Power PC 7");
     break;
@@ -807,6 +824,9 @@
   case PV_7_Compat:
     strcpy(pci->version, "PV_7_Compat");
     break;
+  case PV_8_Compat:
+    strcpy(pci->version, "PV_8_Compat");
+    break;
   default:
     strcpy(pci->version, "unknown");
   }
@@ -942,7 +962,9 @@
 
   pthread_attr_destroy(&attr);
 
-  if (ret != 0) {
+  if (ret == 0) {
+    // PPC port traceOsMisc(("Created New Thread : pthread-id %u", tid));
+  } else {
     if (PrintMiscellaneous && (Verbose || WizardMode)) {
       perror("pthread_create()");
     }
@@ -1103,8 +1125,7 @@
   if (os::Aix::on_pase()) {
     Unimplemented();
     return 0;
-  }
-  else {
+  } else {
     // On AIX use the precision of processors real time clock
     // or time base registers.
     timebasestruct_t time;
@@ -1152,7 +1173,6 @@
   }
 }
 
-
 char * os::local_time_string(char *buf, size_t buflen) {
   struct tm t;
   time_t long_time;
@@ -1190,7 +1210,6 @@
   if (abort_hook != NULL) {
     abort_hook();
   }
-
 }
 
 // Note: os::abort() might be called very early during initialization, or
@@ -1222,8 +1241,7 @@
 // from src/solaris/hpi/src/system_md.c
 
 size_t os::lasterror(char *buf, size_t len) {
-
-  if (errno == 0)  return 0;
+  if (errno == 0) return 0;
 
   const char *s = ::strerror(errno);
   size_t n = ::strlen(s);
@@ -1236,6 +1254,7 @@
 }
 
 intx os::current_thread_id() { return (intx)pthread_self(); }
+
 int os::current_process_id() {
 
   // This implementation returns a unique pid, the pid of the
@@ -1372,9 +1391,9 @@
   if (offset) {
     *offset = -1;
   }
-  if (buf) {
-    buf[0] = '\0';
-  }
+  // Buf is not optional, but offset is optional.
+  assert(buf != NULL, "sanity check");
+  buf[0] = '\0';
 
   // Resolve function ptr literals first.
   addr = resolve_function_descriptor_to_code_pointer(addr);
@@ -1407,12 +1426,9 @@
     return 0;
   }
 
-  if (Verbose) {
-    fprintf(stderr, "pc outside any module");
-  }
+  trcVerbose("pc outside any module");
 
   return -1;
-
 }
 
 bool os::dll_address_to_library_name(address addr, char* buf,
@@ -1420,9 +1436,9 @@
   if (offset) {
     *offset = -1;
   }
-  if (buf) {
-      buf[0] = '\0';
-  }
+  // Buf is not optional, but offset is optional.
+  assert(buf != NULL, "sanity check");
+  buf[0] = '\0';
 
   // Resolve function ptr literals first.
   addr = resolve_function_descriptor_to_code_pointer(addr);
@@ -1437,7 +1453,7 @@
 }
 
 // Loads .dll/.so and in case of error it checks if .dll/.so was built
-// for the same architecture as Hotspot is running on
+// for the same architecture as Hotspot is running on.
 void *os::dll_load(const char *filename, char *ebuf, int ebuflen) {
 
   if (ebuf && ebuflen > 0) {
@@ -1600,7 +1616,6 @@
   st->cr();
 }
 
-
 static void print_signal_handler(outputStream* st, int sig,
                                  char* buf, size_t buflen);
 
@@ -1624,7 +1639,7 @@
 
 static char saved_jvm_path[MAXPATHLEN] = {0};
 
-// Find the full path to the current module, libjvm.so or libjvm_g.so
+// Find the full path to the current module, libjvm.so.
 void os::jvm_path(char *buf, jint buflen) {
   // Error checking.
   if (buflen < MAXPATHLEN) {
@@ -1695,7 +1710,7 @@
   // Do not block out synchronous signals in the signal handler.
   // Blocking synchronous signals only makes sense if you can really
   // be sure that those signals won't happen during signal handling,
-  // when the blocking applies.  Normal signal handlers are lean and
+  // when the blocking applies. Normal signal handlers are lean and
   // do not cause signals. But our signal handlers tend to be "risky"
   // - secondary SIGSEGV, SIGILL, SIGBUS' may and do happen.
   // On AIX, PASE there was a case where a SIGSEGV happened, followed
@@ -2861,13 +2876,9 @@
   param.sched_priority = newpri;
   int ret = pthread_setschedparam(thr, policy, &param);
 
-  if (Verbose) {
-    if (ret == 0) {
-      fprintf(stderr, "changed priority of thread %d to %d\n", (int)thr, newpri);
-    } else {
-      fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)\n",
-              (int)thr, newpri, ret, strerror(ret));
-    }
+  if (ret != 0) {
+    trcVerbose("Could not change priority for thread %d to %d (error %d, %s)",
+        (int)thr, newpri, ret, strerror(ret));
   }
   return (ret == 0) ? OS_OK : OS_ERR;
 }
@@ -2988,7 +2999,6 @@
   errno = old_errno;
 }
 
-
 static int SR_initialize() {
   struct sigaction act;
   char *s;
@@ -3187,7 +3197,6 @@
   JVM_handle_aix_signal(sig, info, uc, true);
 }
 
-
 // This boolean allows users to forward their own non-matching signals
 // to JVM_handle_aix_signal, harmlessly.
 bool os::Aix::signal_handlers_are_installed = false;
@@ -3381,7 +3390,7 @@
     set_signal_handler(SIGDANGER, true);
 
     if (libjsig_is_loaded) {
-      // Tell libjsig jvm finishes setting signal handlers
+      // Tell libjsig jvm finishes setting signal handlers.
       (*end_signal_setting)();
     }
 
@@ -3397,7 +3406,7 @@
         tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
         check_signals = false;
       }
-      // need to initialize check_signal_done
+      // Need to initialize check_signal_done.
       ::sigemptyset(&check_signal_done);
     }
   }
@@ -3471,7 +3480,6 @@
   st->cr();
 }
 
-
 #define DO_SIGNAL_CHECK(sig) \
   if (!sigismember(&check_signal_done, sig)) \
     os::Aix::check_signal_handler(sig)
@@ -3532,7 +3540,6 @@
     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
     : CAST_FROM_FN_PTR(address, act.sa_handler);
 
-
   switch(sig) {
   case SIGSEGV:
   case SIGBUS:
@@ -3685,15 +3692,13 @@
   pthread_mutex_init(&dl_mutex, NULL);
 }
 
-// this is called _after_ the global arguments have been parsed
+// This is called _after_ the global arguments have been parsed.
 jint os::init_2(void) {
 
-  if (Verbose) {
-    fprintf(stderr, "processor count: %d\n", os::_processor_count);
-    fprintf(stderr, "physical memory: %lu\n", Aix::_physical_memory);
-  }
-
-  // initially build up the loaded dll map
+  trcVerbose("processor count: %d", os::_processor_count);
+  trcVerbose("physical memory: %lu", Aix::_physical_memory);
+
+  // Initially build up the loaded dll map.
   LoadedLibraries::reload();
 
   const int page_size = Aix::page_size();
@@ -3743,7 +3748,7 @@
       }
 
       if (map_address != (address) MAP_FAILED) {
-        // map succeeded, but polling_page is not at wished address, unmap and continue.
+        // Map succeeded, but polling_page is not at wished address, unmap and continue.
         ::munmap(map_address, map_size);
         map_address = (address) MAP_FAILED;
       }
@@ -3797,7 +3802,7 @@
 
   // Make the stack size a multiple of the page size so that
   // the yellow/red zones can be guarded.
-  // note that this can be 0, if no default stacksize was set
+  // Note that this can be 0, if no default stacksize was set.
   JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
 
   Aix::libpthread_init();
@@ -4088,7 +4093,6 @@
   return fd;
 }
 
-
 // create binary file, rewriting existing file if required
 int os::create_binary_file(const char* path, bool rewrite_existing) {
   int oflags = O_WRONLY | O_CREAT;
@@ -4169,7 +4173,6 @@
   return mapped_address;
 }
 
-
 // Remap a block of memory.
 char* os::pd_remap_memory(int fd, const char* file_name, size_t file_offset,
                           char *addr, size_t bytes, bool read_only,
@@ -4217,14 +4220,14 @@
   jlong sys_time = 0;
   jlong user_time = 0;
 
-  // reimplemented using getthrds64().
+  // Reimplemented using getthrds64().
   //
-  // goes like this:
+  // Works like this:
   // For the thread in question, get the kernel thread id. Then get the
   // kernel thread statistics using that id.
   //
   // This only works of course when no pthread scheduling is used,
-  // ie there is a 1:1 relationship to kernel threads.
+  // i.e. there is a 1:1 relationship to kernel threads.
   // On AIX, see AIXTHREAD_SCOPE variable.
 
   pthread_t pthtid = thread->osthread()->pthread_id();
@@ -4371,14 +4374,12 @@
   memset(&uts, 0, sizeof(uts));
   strcpy(uts.sysname, "?");
   if (::uname(&uts) == -1) {
-    fprintf(stderr, "uname failed (%d)\n", errno);
+    trc("uname failed (%d)", errno);
     guarantee(0, "Could not determine whether we run on AIX or PASE");
   } else {
-    if (Verbose) {
-      fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
-              "node \"%s\" machine \"%s\"\n",
-              uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
-    }
+    trcVerbose("uname says: sysname \"%s\" version \"%s\" release \"%s\" "
+               "node \"%s\" machine \"%s\"\n",
+               uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
     const int major = atoi(uts.version);
     assert(major > 0, "invalid OS version");
     const int minor = atoi(uts.release);
@@ -4390,12 +4391,10 @@
       // We run on AIX. We do not support versions older than AIX 5.3.
       _on_pase = 0;
       if (_os_version < 0x0503) {
-        fprintf(stderr, "AIX release older than AIX 5.3 not supported.\n");
+        trc("AIX release older than AIX 5.3 not supported.");
         assert(false, "AIX release too old.");
       } else {
-        if (Verbose) {
-          fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
-        }
+        trcVerbose("We run on AIX %d.%d\n", major, minor);
       }
     } else {
       assert(false, "unknown OS");
@@ -4403,7 +4402,6 @@
   }
 
   guarantee(_on_pase != -1 && _os_version, "Could not determine AIX/OS400 release");
-
 } // end: os::Aix::initialize_os_info()
 
 // Scan environment for important settings which might effect the VM.
@@ -4441,12 +4439,10 @@
   // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
   // exec() ? before loading the libjvm ? ....)
   p = ::getenv("XPG_SUS_ENV");
-  if (Verbose) {
-    fprintf(stderr, "XPG_SUS_ENV=%s.\n", p ? p : "<unset>");
-  }
+  trcVerbose("XPG_SUS_ENV=%s.", p ? p : "<unset>");
   if (p && strcmp(p, "ON") == 0) {
     _xpg_sus_mode = 1;
-    fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON\n");
+    trc("Unsupported setting: XPG_SUS_ENV=ON");
     // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
     // clobber address ranges. If we ever want to support that, we have to do some
     // testing first.
@@ -4458,10 +4454,7 @@
   // Switch off AIX internal (pthread) guard pages. This has
   // immediate effect for any pthread_create calls which follow.
   p = ::getenv("AIXTHREAD_GUARDPAGES");
-  if (Verbose) {
-    fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.\n", p ? p : "<unset>");
-    fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.\n");
-  }
+  trcVerbose("AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
   rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
   guarantee(rc == 0, "");
 
@@ -4479,7 +4472,7 @@
   assert(os::Aix::on_aix(), "AIX only");
 
   if (!libperfstat::init()) {
-    fprintf(stderr, "libperfstat initialization failed.\n");
+    trc("libperfstat initialization failed.");
     assert(false, "libperfstat initialization failed");
   } else {
     if (Verbose) {
@@ -4651,7 +4644,6 @@
   return abstime;
 }
 
-
 // Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
 // Conceptually TryPark() should be equivalent to park(0).
 
@@ -4732,7 +4724,7 @@
   while (_Event < 0) {
     status = pthread_cond_timedwait(_cond, _mutex, &abst);
     assert_status(status == 0 || status == ETIMEDOUT,
-          status, "cond_timedwait");
+                  status, "cond_timedwait");
     if (!FilterSpuriousWakeups) break;         // previous semantics
     if (status == ETIMEDOUT) break;
     // We consume and ignore EINTR and spurious wakeups.
@@ -4866,9 +4858,9 @@
   // Optional fast-path check:
   // Return immediately if a permit is available.
   if (_counter > 0) {
-      _counter = 0;
-      OrderAccess::fence();
-      return;
+    _counter = 0;
+    OrderAccess::fence();
+    return;
   }
 
   Thread* thread = Thread::current();
@@ -4890,7 +4882,6 @@
     unpackTime(&absTime, isAbsolute, time);
   }
 
-
   // Enter safepoint region
   // Beware of deadlocks such as 6317397.
   // The per-thread Parker:: mutex is a classic leaf-lock.
@@ -4978,7 +4969,6 @@
   }
 }
 
-
 extern char** environ;
 
 // Run the specified command in a separate process. Return its exit value,
@@ -4997,44 +4987,43 @@
   } else if (pid == 0) {
     // child process
 
-    // try to be consistent with system(), which uses "/usr/bin/sh" on AIX
+    // Try to be consistent with system(), which uses "/usr/bin/sh" on AIX.
     execve("/usr/bin/sh", argv, environ);
 
     // execve failed
     _exit(-1);
 
-  } else  {
+  } else {
     // copied from J2SE ..._waitForProcessExit() in UNIXProcess_md.c; we don't
     // care about the actual exit code, for now.
 
     int status;
 
-    // Wait for the child process to exit.  This returns immediately if
+    // Wait for the child process to exit. This returns immediately if
     // the child has already exited. */
     while (waitpid(pid, &status, 0) < 0) {
-        switch (errno) {
+      switch (errno) {
         case ECHILD: return 0;
         case EINTR: break;
         default: return -1;
-        }
+      }
     }
 
     if (WIFEXITED(status)) {
-       // The child exited normally; get its exit code.
-       return WEXITSTATUS(status);
+      // The child exited normally; get its exit code.
+      return WEXITSTATUS(status);
     } else if (WIFSIGNALED(status)) {
-       // The child exited because of a signal
-       // The best value to return is 0x80 + signal number,
-       // because that is what all Unix shells do, and because
-       // it allows callers to distinguish between process exit and
-       // process death by signal.
-       return 0x80 + WTERMSIG(status);
+      // The child exited because of a signal.
+      // The best value to return is 0x80 + signal number,
+      // because that is what all Unix shells do, and because
+      // it allows callers to distinguish between process exit and
+      // process death by signal.
+      return 0x80 + WTERMSIG(status);
     } else {
-       // Unknown exit code; pass it through
-       return status;
+      // Unknown exit code; pass it through.
+      return status;
     }
   }
-  // Remove warning.
   return -1;
 }
 
@@ -5049,7 +5038,7 @@
   struct stat statbuf;
   char buf[MAXPATHLEN];
   char libmawtpath[MAXPATHLEN];
-  const char *xawtstr  = "/xawt/libmawt.so";
+  const char *xawtstr = "/xawt/libmawt.so";
   const char *new_xawtstr = "/libawt_xawt.so";
 
   char *p;
@@ -5090,6 +5079,9 @@
     return 0;
   }
 
+  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
+                                               p, current_process_id());
+
   return strlen(buffer);
 }
 
--- a/hotspot/src/os/aix/vm/os_aix.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os/aix/vm/os_aix.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -209,7 +209,7 @@
     return _can_use_16M_pages == 1 ? true : false;
   }
 
-  static address   ucontext_get_pc(ucontext_t* uc);
+  static address   ucontext_get_pc(const ucontext_t* uc);
   static intptr_t* ucontext_get_sp(ucontext_t* uc);
   static intptr_t* ucontext_get_fp(ucontext_t* uc);
   // Set PC into context. Needed for continuation after signal.
--- a/hotspot/src/os/bsd/vm/osThread_bsd.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os/bsd/vm/osThread_bsd.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -43,7 +43,8 @@
 
   sigemptyset(&_caller_sigmask);
 
-  _startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
+  _startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
+                                  Monitor::_safepoint_check_never);
   assert(_startThread_lock !=NULL, "check");
 }
 
--- a/hotspot/src/os/bsd/vm/os_bsd.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os/bsd/vm/os_bsd.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -4673,7 +4673,7 @@
 // Get the default path to the core file
 // Returns the length of the string
 int os::get_core_path(char* buffer, size_t bufferSize) {
-  int n = jio_snprintf(buffer, bufferSize, "/cores");
+  int n = jio_snprintf(buffer, bufferSize, "/cores/core.%d", current_process_id());
 
   // Truncate if theoretical string was longer than bufferSize
   n = MIN2(n, (int)bufferSize);
--- a/hotspot/src/os/linux/vm/osThread_linux.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os/linux/vm/osThread_linux.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1999, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,8 @@
 
   sigemptyset(&_caller_sigmask);
 
-  _startThread_lock = new Monitor(Mutex::event, "startThread_lock", true);
+  _startThread_lock = new Monitor(Mutex::event, "startThread_lock", true,
+                                  Monitor::_safepoint_check_never);
   assert(_startThread_lock !=NULL, "check");
 }
 
--- a/hotspot/src/os/linux/vm/os_linux.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os/linux/vm/os_linux.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -5988,13 +5988,70 @@
 // Get the default path to the core file
 // Returns the length of the string
 int os::get_core_path(char* buffer, size_t bufferSize) {
-  const char* p = get_current_directory(buffer, bufferSize);
-
-  if (p == NULL) {
-    assert(p != NULL, "failed to get current directory");
+  /*
+   * Max length of /proc/sys/kernel/core_pattern is 128 characters.
+   * See https://www.kernel.org/doc/Documentation/sysctl/kernel.txt
+   */
+  const int core_pattern_len = 129;
+  char core_pattern[core_pattern_len] = {0};
+
+  int core_pattern_file = ::open("/proc/sys/kernel/core_pattern", O_RDONLY);
+  if (core_pattern_file != -1) {
+    ssize_t ret = ::read(core_pattern_file, core_pattern, core_pattern_len);
+    ::close(core_pattern_file);
+
+    if (ret > 0) {
+      char *last_char = core_pattern + strlen(core_pattern) - 1;
+
+      if (*last_char == '\n') {
+        *last_char = '\0';
+      }
+    }
+  }
+
+  if (strlen(core_pattern) == 0) {
     return 0;
   }
 
+  char *pid_pos = strstr(core_pattern, "%p");
+  size_t written;
+
+  if (core_pattern[0] == '/') {
+    written = jio_snprintf(buffer, bufferSize, core_pattern);
+  } else {
+    char cwd[PATH_MAX];
+
+    const char* p = get_current_directory(cwd, PATH_MAX);
+    if (p == NULL) {
+      assert(p != NULL, "failed to get current directory");
+      return 0;
+    }
+
+    if (core_pattern[0] == '|') {
+      written = jio_snprintf(buffer, bufferSize,
+                        "\"%s\" (or dumping to %s/core.%d)",
+                                     &core_pattern[1], p, current_process_id());
+    } else {
+      written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern);
+    }
+  }
+
+  if ((written >= 0) && (written < bufferSize)
+            && (pid_pos == NULL) && (core_pattern[0] != '|')) {
+    int core_uses_pid_file = ::open("/proc/sys/kernel/core_uses_pid", O_RDONLY);
+
+    if (core_uses_pid_file != -1) {
+      char core_uses_pid = 0;
+      ssize_t ret = ::read(core_uses_pid_file, &core_uses_pid, 1);
+      ::close(core_uses_pid_file);
+
+      if (core_uses_pid == '1'){
+        jio_snprintf(buffer + written, bufferSize - written,
+                                          ".%d", current_process_id());
+      }
+    }
+  }
+
   return strlen(buffer);
 }
 
--- a/hotspot/src/os/posix/vm/os_posix.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os/posix/vm/os_posix.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -51,15 +51,24 @@
   struct rlimit rlim;
   bool success;
 
-  n = get_core_path(buffer, bufferSize);
+  char core_path[PATH_MAX];
+  n = get_core_path(core_path, PATH_MAX);
 
-  if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
-    jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d (may not exist)", current_process_id());
+  if (n <= 0) {
+    jio_snprintf(buffer, bufferSize, "core.%d (may not exist)", current_process_id());
+    success = true;
+#ifdef LINUX
+  } else if (core_path[0] == '"') { // redirect to user process
+    jio_snprintf(buffer, bufferSize, "Core dumps may be processed with %s", core_path);
+    success = true;
+#endif
+  } else if (getrlimit(RLIMIT_CORE, &rlim) != 0) {
+    jio_snprintf(buffer, bufferSize, "%s (may not exist)", core_path);
     success = true;
   } else {
     switch(rlim.rlim_cur) {
       case RLIM_INFINITY:
-        jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d", current_process_id());
+        jio_snprintf(buffer, bufferSize, "%s", core_path);
         success = true;
         break;
       case 0:
@@ -67,11 +76,12 @@
         success = false;
         break;
       default:
-        jio_snprintf(buffer + n, bufferSize - n, "/core or core.%d (max size %lu kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", current_process_id(), (unsigned long)(rlim.rlim_cur >> 10));
+        jio_snprintf(buffer, bufferSize, "%s (max size %lu kB). To ensure a full core dump, try \"ulimit -c unlimited\" before starting Java again", core_path, (unsigned long)(rlim.rlim_cur >> 10));
         success = true;
         break;
     }
   }
+
   VMError::report_coredump_status(buffer, success);
 }
 
@@ -89,8 +99,8 @@
     } else {
       stack[frame_idx ++] = fr.pc();
     }
-    if (fr.fp() == NULL || os::is_first_C_frame(&fr)
-        ||fr.sender_pc() == NULL || fr.cb() != NULL) break;
+    if (fr.fp() == NULL || fr.cb() != NULL ||
+        fr.sender_pc() == NULL || os::is_first_C_frame(&fr)) break;
 
     if (fr.sender_pc() && !os::is_first_C_frame(&fr)) {
       fr = os::get_sender_for_C_frame(&fr);
--- a/hotspot/src/os/solaris/vm/os_solaris.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os/solaris/vm/os_solaris.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -5979,6 +5979,9 @@
     return 0;
   }
 
+  jio_snprintf(buffer, bufferSize, "%s/core or core.%d",
+                                              p, current_process_id());
+
   return strlen(buffer);
 }
 
--- a/hotspot/src/os/windows/vm/os_windows.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os/windows/vm/os_windows.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -3074,7 +3074,7 @@
 char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
   assert((size_t)addr % os::vm_allocation_granularity() == 0,
          "reserve alignment");
-  assert(bytes % os::vm_allocation_granularity() == 0, "reserve block size");
+  assert(bytes % os::vm_page_size() == 0, "reserve page size");
   char* res;
   // note that if UseLargePages is on, all the areas that require interleaving
   // will go thru reserve_memory_special rather than thru here.
@@ -3768,7 +3768,6 @@
   return NULL;
 }
 
-#define MAX_EXIT_HANDLES PRODUCT_ONLY(32)   NOT_PRODUCT(128)
 #define EXIT_TIMEOUT     PRODUCT_ONLY(1000) NOT_PRODUCT(4000) /* 1 sec in product, 4 sec in debug */
 
 static BOOL CALLBACK init_crit_sect_call(PINIT_ONCE, PVOID pcrit_sect, PVOID*) {
@@ -3787,7 +3786,7 @@
     // _endthreadex().
     // Should be large enough to avoid blocking the exiting thread due to lack of
     // a free slot.
-    static HANDLE handles[MAX_EXIT_HANDLES];
+    static HANDLE handles[MAXIMUM_WAIT_OBJECTS];
     static int handle_count = 0;
 
     static INIT_ONCE init_once_crit_sect = INIT_ONCE_STATIC_INIT;
@@ -3809,32 +3808,34 @@
           if (res == WAIT_TIMEOUT) {
             handles[j++] = handles[i];
           } else {
-            if (res != WAIT_OBJECT_0) {
-              warning("WaitForSingleObject failed in %s: %d\n", __FILE__, __LINE__);
-              // Don't keep the handle, if we failed waiting for it.
+            if (res == WAIT_FAILED) {
+              warning("WaitForSingleObject failed (%u) in %s: %d\n",
+                      GetLastError(), __FILE__, __LINE__);
             }
+            // Don't keep the handle, if we failed waiting for it.
             CloseHandle(handles[i]);
           }
         }
 
         // If there's no free slot in the array of the kept handles, we'll have to
         // wait until at least one thread completes exiting.
-        if ((handle_count = j) == MAX_EXIT_HANDLES) {
+        if ((handle_count = j) == MAXIMUM_WAIT_OBJECTS) {
           // Raise the priority of the oldest exiting thread to increase its chances
           // to complete sooner.
           SetThreadPriority(handles[0], THREAD_PRIORITY_ABOVE_NORMAL);
-          res = WaitForMultipleObjects(MAX_EXIT_HANDLES, handles, FALSE, EXIT_TIMEOUT);
-          if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAX_EXIT_HANDLES)) {
+          res = WaitForMultipleObjects(MAXIMUM_WAIT_OBJECTS, handles, FALSE, EXIT_TIMEOUT);
+          if (res >= WAIT_OBJECT_0 && res < (WAIT_OBJECT_0 + MAXIMUM_WAIT_OBJECTS)) {
             i = (res - WAIT_OBJECT_0);
-            handle_count = MAX_EXIT_HANDLES - 1;
+            handle_count = MAXIMUM_WAIT_OBJECTS - 1;
             for (; i < handle_count; ++i) {
               handles[i] = handles[i + 1];
             }
           } else {
-            warning("WaitForMultipleObjects %s in %s: %d\n",
-                    (res == WAIT_FAILED ? "failed" : "timed out"), __FILE__, __LINE__);
+            warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
+                    (res == WAIT_FAILED ? "failed" : "timed out"),
+                    GetLastError(), __FILE__, __LINE__);
             // Don't keep handles, if we failed waiting for them.
-            for (i = 0; i < MAX_EXIT_HANDLES; ++i) {
+            for (i = 0; i < MAXIMUM_WAIT_OBJECTS; ++i) {
               CloseHandle(handles[i]);
             }
             handle_count = 0;
@@ -3846,7 +3847,8 @@
         hthr = GetCurrentThread();
         if (!DuplicateHandle(hproc, hthr, hproc, &handles[handle_count],
                              0, FALSE, DUPLICATE_SAME_ACCESS)) {
-          warning("DuplicateHandle failed in %s: %d\n", __FILE__, __LINE__);
+          warning("DuplicateHandle failed (%u) in %s: %d\n",
+                  GetLastError(), __FILE__, __LINE__);
         } else {
           ++handle_count;
         }
@@ -3869,9 +3871,10 @@
             SetThreadPriority(handles[i], THREAD_PRIORITY_ABOVE_NORMAL);
           }
           res = WaitForMultipleObjects(handle_count, handles, TRUE, EXIT_TIMEOUT);
-          if (res < WAIT_OBJECT_0 || res >= (WAIT_OBJECT_0 + MAX_EXIT_HANDLES)) {
-            warning("WaitForMultipleObjects %s in %s: %d\n",
-                    (res == WAIT_FAILED ? "failed" : "timed out"), __FILE__, __LINE__);
+          if (res == WAIT_FAILED || res == WAIT_TIMEOUT) {
+            warning("WaitForMultipleObjects %s (%u) in %s: %d\n",
+                    (res == WAIT_FAILED ? "failed" : "timed out"),
+                    GetLastError(), __FILE__, __LINE__);
           }
           for (i = 0; i < handle_count; ++i) {
             CloseHandle(handles[i]);
@@ -3909,7 +3912,6 @@
   return exit_code;
 }
 
-#undef MAX_EXIT_HANDLES
 #undef EXIT_TIMEOUT
 
 void os::win32::setmode_streams() {
--- a/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -91,8 +91,9 @@
 
 // Frame information (pc, sp, fp) retrieved via ucontext
 // always looks like a C-frame according to the frame
-// conventions in frame_ppc64.hpp.
-address os::Aix::ucontext_get_pc(ucontext_t * uc) {
+// conventions in frame_ppc.hpp.
+
+address os::Aix::ucontext_get_pc(const ucontext_t * uc) {
   return (address)uc->uc_mcontext.jmp_context.iar;
 }
 
@@ -486,7 +487,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 // thread stack
 
-size_t os::Aix::min_stack_allowed = 768*K;
+size_t os::Aix::min_stack_allowed = 128*K;
 
 // Aix is always in floating stack mode. The stack size for a new
 // thread can be set via pthread_attr_setstacksize().
@@ -499,7 +500,7 @@
   // because of the strange 'fallback logic' in os::create_thread().
   // Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
   // specify a different stack size for compiler threads!
-  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
+  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1 * M);
   return s;
 }
 
--- a/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os_cpu/aix_ppc/vm/os_aix_ppc.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
+#ifndef OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
+#define OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
 
   static void setup_fpu() {}
 
@@ -32,4 +32,4 @@
   // Note: Currently only used in 64 bit Windows implementations
   static bool register_code_area(char *low, char *high) { return true; }
 
-#endif // OS_CPU_AIX_OJDKPPC_VM_OS_AIX_PPC_HPP
+#endif // OS_CPU_AIX_PPC_VM_OS_AIX_PPC_HPP
--- a/hotspot/src/os_cpu/aix_ppc/vm/prefetch_aix_ppc.inline.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os_cpu/aix_ppc/vm/prefetch_aix_ppc.inline.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
-#define OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
+#ifndef OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
+#define OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
 
 #include "runtime/prefetch.hpp"
 
@@ -55,4 +55,4 @@
 #endif
 }
 
-#endif // OS_CPU_AIX_PPC_64_VM_PREFETCH_AIX_PPC_64_INLINE_HPP
+#endif // OS_CPU_AIX_PPC_VM_PREFETCH_AIX_PPC_INLINE_HPP
--- a/hotspot/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os_cpu/aix_ppc/vm/threadLS_aix_ppc.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
+#ifndef OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
+#define OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
 
   // Processor dependent parts of ThreadLocalStorage
 
@@ -33,4 +33,4 @@
     return (Thread *) os::thread_local_storage_at(thread_index());
   }
 
-#endif // OS_CPU_AIX_OJDKPPC_VM_THREADLS_AIX_PPC_HPP
+#endif // OS_CPU_AIX_PPC_VM_THREADLS_AIX_PPC_HPP
--- a/hotspot/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -23,8 +23,8 @@
  *
  */
 
-#ifndef OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
+#ifndef OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
+#define OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
 
  private:
   void pd_initialize() {
@@ -76,4 +76,4 @@
 
   intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
 
-#endif // OS_CPU_AIX_OJDKPPC_VM_THREAD_AIX_PPC_HPP
+#endif // OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
--- a/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/os_cpu/linux_ppc/vm/os_linux_ppc.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -453,7 +453,7 @@
 ////////////////////////////////////////////////////////////////////////////////
 // thread stack
 
-size_t os::Linux::min_stack_allowed = 768*K;
+size_t os::Linux::min_stack_allowed = 128*K;
 
 bool os::Linux::supports_variable_stack_size() { return true; }
 
--- a/hotspot/src/share/vm/classfile/classFileParser.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/classfile/classFileParser.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -3108,21 +3108,39 @@
   }
 }
 
-// Transfer ownership of metadata allocated to the InstanceKlass.
-void ClassFileParser::apply_parsed_class_metadata(
-                                            instanceKlassHandle this_klass,
-                                            int java_fields_count, TRAPS) {
-  // Assign annotations if needed
-  if (_annotations != NULL || _type_annotations != NULL ||
-      _fields_annotations != NULL || _fields_type_annotations != NULL) {
+// Create the Annotations object that will
+// hold the annotations array for the Klass.
+void ClassFileParser::create_combined_annotations(TRAPS) {
+    if (_annotations == NULL &&
+        _type_annotations == NULL &&
+        _fields_annotations == NULL &&
+        _fields_type_annotations == NULL) {
+      // Don't create the Annotations object unnecessarily.
+      return;
+    }
+
     Annotations* annotations = Annotations::allocate(_loader_data, CHECK);
     annotations->set_class_annotations(_annotations);
     annotations->set_class_type_annotations(_type_annotations);
     annotations->set_fields_annotations(_fields_annotations);
     annotations->set_fields_type_annotations(_fields_type_annotations);
-    this_klass->set_annotations(annotations);
-  }
-
+
+    // This is the Annotations object that will be
+    // assigned to InstanceKlass being constructed.
+    _combined_annotations = annotations;
+
+    // The annotations arrays below has been transfered the
+    // _combined_annotations so these fields can now be cleared.
+    _annotations             = NULL;
+    _type_annotations        = NULL;
+    _fields_annotations      = NULL;
+    _fields_type_annotations = NULL;
+}
+
+// Transfer ownership of metadata allocated to the InstanceKlass.
+void ClassFileParser::apply_parsed_class_metadata(
+                                            instanceKlassHandle this_klass,
+                                            int java_fields_count, TRAPS) {
   _cp->set_pool_holder(this_klass());
   this_klass->set_constants(_cp);
   this_klass->set_fields(_fields, java_fields_count);
@@ -3130,6 +3148,7 @@
   this_klass->set_inner_classes(_inner_classes);
   this_klass->set_local_interfaces(_local_interfaces);
   this_klass->set_transitive_interfaces(_transitive_interfaces);
+  this_klass->set_annotations(_combined_annotations);
 
   // Clear out these fields so they don't get deallocated by the destructor
   clear_class_metadata();
@@ -4002,6 +4021,10 @@
     ClassAnnotationCollector parsed_annotations;
     parse_classfile_attributes(&parsed_annotations, CHECK_(nullHandle));
 
+    // Finalize the Annotations metadata object,
+    // now that all annotation arrays have been created.
+    create_combined_annotations(CHECK_(nullHandle));
+
     // Make sure this is the end of class file stream
     guarantee_property(cfs->at_eos(), "Extra bytes at the end of class file %s", CHECK_(nullHandle));
 
@@ -4302,10 +4325,27 @@
   InstanceKlass::deallocate_interfaces(_loader_data, _super_klass(),
                                        _local_interfaces, _transitive_interfaces);
 
-  MetadataFactory::free_array<u1>(_loader_data, _annotations);
-  MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
-  Annotations::free_contents(_loader_data, _fields_annotations);
-  Annotations::free_contents(_loader_data, _fields_type_annotations);
+  if (_combined_annotations != NULL) {
+    // After all annotations arrays have been created, they are installed into the
+    // Annotations object that will be assigned to the InstanceKlass being created.
+
+    // Deallocate the Annotations object and the installed annotations arrays.
+    _combined_annotations->deallocate_contents(_loader_data);
+
+    // If the _combined_annotations pointer is non-NULL,
+    // then the other annotations fields should have been cleared.
+    assert(_annotations             == NULL, "Should have been cleared");
+    assert(_type_annotations        == NULL, "Should have been cleared");
+    assert(_fields_annotations      == NULL, "Should have been cleared");
+    assert(_fields_type_annotations == NULL, "Should have been cleared");
+  } else {
+    // If the annotations arrays were not installed into the Annotations object,
+    // then they have to be deallocated explicitly.
+    MetadataFactory::free_array<u1>(_loader_data, _annotations);
+    MetadataFactory::free_array<u1>(_loader_data, _type_annotations);
+    Annotations::free_contents(_loader_data, _fields_annotations);
+    Annotations::free_contents(_loader_data, _fields_type_annotations);
+  }
 
   clear_class_metadata();
 
--- a/hotspot/src/share/vm/classfile/classFileParser.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/classfile/classFileParser.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -75,6 +75,7 @@
   Array<u2>*       _inner_classes;
   Array<Klass*>*   _local_interfaces;
   Array<Klass*>*   _transitive_interfaces;
+  Annotations*     _combined_annotations;
   AnnotationArray* _annotations;
   AnnotationArray* _type_annotations;
   Array<AnnotationArray*>* _fields_annotations;
@@ -86,6 +87,8 @@
   void set_class_generic_signature_index(u2 x) { _generic_signature_index = x; }
   void set_class_sde_buffer(char* x, int len)  { _sde_buffer = x; _sde_length = len; }
 
+  void create_combined_annotations(TRAPS);
+
   void init_parsed_class_attributes(ClassLoaderData* loader_data) {
     _loader_data = loader_data;
     _synthetic_flag = false;
@@ -110,6 +113,7 @@
     _inner_classes = NULL;
     _local_interfaces = NULL;
     _transitive_interfaces = NULL;
+    _combined_annotations = NULL;
     _annotations = _type_annotations = NULL;
     _fields_annotations = _fields_type_annotations = NULL;
   }
--- a/hotspot/src/share/vm/classfile/classLoaderData.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/classfile/classLoaderData.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -81,7 +81,8 @@
   _metaspace(NULL), _unloading(false), _klasses(NULL),
   _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
   _next(NULL), _dependencies(dependencies),
-  _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
+  _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
+                            Monitor::_safepoint_check_never)) {
     // empty
 }
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -83,9 +83,11 @@
   // Note: this requires that CFLspace c'tors
   // are called serially in the order in which the locks are
   // are acquired in the program text. This is true today.
-  _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
+  _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
+                Monitor::_safepoint_check_sometimes),
   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
-                          "CompactibleFreeListSpace._dict_par_lock", true),
+                          "CompactibleFreeListSpace._dict_par_lock", true,
+                          Monitor::_safepoint_check_never),
   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
                     CMSRescanMultiple),
   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
@@ -152,8 +154,7 @@
   // Initialize locks for parallel case.
   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
-                                            "a freelist par lock",
-                                            true);
+                                            "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
     DEBUG_ONLY(
       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
     )
@@ -2559,12 +2560,12 @@
      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
      x }
 
-// Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
-// OldPLABSize, whose static default is different; if overridden at the
+// Initialize with default setting for CMS, _not_
+// generic OldPLABSize, whose static default is different; if overridden at the
 // command-line, this will get reinitialized via a call to
 // modify_initialization() below.
 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
-  VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
+  VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
 
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -690,6 +690,9 @@
   void get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl);
 
 public:
+  static const int _default_dynamic_old_plab_size = 16;
+  static const int _default_static_old_plab_size  = 50;
+
   CFLS_LAB(CompactibleFreeListSpace* cfls);
 
   // Allocate and return a block of the given size, or else return NULL.
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -42,6 +42,7 @@
 #include "gc_implementation/shared/isGCActiveMark.hpp"
 #include "gc_interface/collectedHeap.inline.hpp"
 #include "memory/allocation.hpp"
+#include "memory/cardGeneration.inline.hpp"
 #include "memory/cardTableRS.hpp"
 #include "memory/collectorPolicy.hpp"
 #include "memory/gcLocker.inline.hpp"
@@ -479,7 +480,9 @@
   _restart_addr(NULL),
   _overflow_list(NULL),
   _stats(cmsGen),
-  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true)),
+  _eden_chunk_lock(new Mutex(Mutex::leaf + 1, "CMS_eden_chunk_lock", true,
+                             //verify that this lock should be acquired with safepoint check.
+                             Monitor::_safepoint_check_sometimes)),
   _eden_chunk_array(NULL),     // may be set in ctor body
   _eden_chunk_capacity(0),     // -- ditto --
   _eden_chunk_index(0),        // -- ditto --
@@ -793,11 +796,6 @@
   }
 }
 
-CompactibleSpace*
-ConcurrentMarkSweepGeneration::first_compaction_space() const {
-  return _cmsSpace;
-}
-
 void ConcurrentMarkSweepGeneration::reset_after_compaction() {
   // Clear the promotion information.  These pointers can be adjusted
   // along with all the other pointers into the heap but
@@ -808,10 +806,6 @@
   }
 }
 
-void ConcurrentMarkSweepGeneration::space_iterate(SpaceClosure* blk, bool usedOnly) {
-  blk->do_space(_cmsSpace);
-}
-
 void ConcurrentMarkSweepGeneration::compute_new_size() {
   assert_locked_or_safepoint(Heap_lock);
 
@@ -882,7 +876,7 @@
         expand_bytes);
     }
     // safe if expansion fails
-    expand(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
+    expand_for_gc_cause(expand_bytes, 0, CMSExpansionCause::_satisfy_free_ratio);
     if (PrintGCDetails && Verbose) {
       gclog_or_tty->print_cr("  Expanded free fraction %f",
         ((double) free()) / capacity());
@@ -1048,8 +1042,7 @@
   if (res == NULL) {
     // expand and retry
     size_t s = _cmsSpace->expansionSpaceRequired(obj_size);  // HeapWords
-    expand(s*HeapWordSize, MinHeapDeltaBytes,
-      CMSExpansionCause::_satisfy_promotion);
+    expand_for_gc_cause(s*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_promotion);
     // Since there's currently no next generation, we don't try to promote
     // into a more senior generation.
     assert(next_gen() == NULL, "assumption, based upon which no attempt "
@@ -1618,14 +1611,15 @@
 
   // If the collection is being acquired from the background
   // collector, there may be references on the discovered
-  // references lists that have NULL referents (being those
-  // that were concurrently cleared by a mutator) or
-  // that are no longer active (having been enqueued concurrently
-  // by the mutator).
-  // Scrub the list of those references because Mark-Sweep-Compact
-  // code assumes referents are not NULL and that all discovered
-  // Reference objects are active.
-  ref_processor()->clean_up_discovered_references();
+  // references lists.  Abandon those references, since some
+  // of them may have become unreachable after concurrent
+  // discovery; the STW compacting collector will redo discovery
+  // more precisely, without being subject to floating garbage.
+  // Leaving otherwise unreachable references in the discovered
+  // lists would require special handling.
+  ref_processor()->disable_discovery();
+  ref_processor()->abandon_partial_discovery();
+  ref_processor()->verify_no_references_recorded();
 
   if (first_state > Idling) {
     save_heap_summary();
@@ -1691,7 +1685,7 @@
   ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
 
   ref_processor()->set_enqueuing_is_done(false);
-  ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
+  ref_processor()->enable_discovery();
   ref_processor()->setup_policy(clear_all_soft_refs);
   // If an asynchronous collection finishes, the _modUnionTable is
   // all clear.  If we are assuming the collection from an asynchronous
@@ -2625,13 +2619,6 @@
 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DEFN)
 
 void
-ConcurrentMarkSweepGeneration::younger_refs_iterate(OopsInGenClosure* cl) {
-  cl->set_generation(this);
-  younger_refs_in_space_iterate(_cmsSpace, cl);
-  cl->reset_generation();
-}
-
-void
 ConcurrentMarkSweepGeneration::oop_iterate(ExtendedOopClosure* cl) {
   if (freelistLock()->owned_by_self()) {
     Generation::oop_iterate(cl);
@@ -2803,23 +2790,17 @@
   CMSSynchronousYieldRequest yr;
   assert(!tlab, "Can't deal with TLAB allocation");
   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
-  expand(word_size*HeapWordSize, MinHeapDeltaBytes,
-    CMSExpansionCause::_satisfy_allocation);
+  expand_for_gc_cause(word_size*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_satisfy_allocation);
   if (GCExpandToAllocateDelayMillis > 0) {
     os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
   }
   return have_lock_and_allocate(word_size, tlab);
 }
 
-// YSR: All of this generation expansion/shrinking stuff is an exact copy of
-// TenuredGeneration, which makes me wonder if we should move this
-// to CardGeneration and share it...
-bool ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes) {
-  return CardGeneration::expand(bytes, expand_bytes);
-}
-
-void ConcurrentMarkSweepGeneration::expand(size_t bytes, size_t expand_bytes,
-  CMSExpansionCause::Cause cause)
+void ConcurrentMarkSweepGeneration::expand_for_gc_cause(
+    size_t bytes,
+    size_t expand_bytes,
+    CMSExpansionCause::Cause cause)
 {
 
   bool success = expand(bytes, expand_bytes);
@@ -2848,8 +2829,7 @@
       return NULL;
     }
     // Otherwise, we try expansion.
-    expand(word_sz*HeapWordSize, MinHeapDeltaBytes,
-      CMSExpansionCause::_allocate_par_lab);
+    expand_for_gc_cause(word_sz*HeapWordSize, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_lab);
     // Now go around the loop and try alloc again;
     // A competing par_promote might beat us to the expansion space,
     // so we may go around the loop again if promotion fails again.
@@ -2876,8 +2856,7 @@
       return false;
     }
     // Otherwise, we try expansion.
-    expand(refill_size_bytes, MinHeapDeltaBytes,
-      CMSExpansionCause::_allocate_par_spooling_space);
+    expand_for_gc_cause(refill_size_bytes, MinHeapDeltaBytes, CMSExpansionCause::_allocate_par_spooling_space);
     // Now go around the loop and try alloc again;
     // A competing allocation might beat us to the expansion space,
     // so we may go around the loop again if allocation fails again.
@@ -2887,77 +2866,16 @@
   }
 }
 
-
-void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
-  assert_locked_or_safepoint(ExpandHeap_lock);
-  // Shrink committed space
-  _virtual_space.shrink_by(bytes);
-  // Shrink space; this also shrinks the space's BOT
-  _cmsSpace->set_end((HeapWord*) _virtual_space.high());
-  size_t new_word_size = heap_word_size(_cmsSpace->capacity());
-  // Shrink the shared block offset array
-  _bts->resize(new_word_size);
-  MemRegion mr(_cmsSpace->bottom(), new_word_size);
-  // Shrink the card table
-  Universe::heap()->barrier_set()->resize_covered_region(mr);
-
-  if (Verbose && PrintGC) {
-    size_t new_mem_size = _virtual_space.committed_size();
-    size_t old_mem_size = new_mem_size + bytes;
-    gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                  name(), old_mem_size/K, new_mem_size/K);
-  }
-}
-
 void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
-  assert_locked_or_safepoint(Heap_lock);
-  size_t size = ReservedSpace::page_align_size_down(bytes);
   // Only shrink if a compaction was done so that all the free space
   // in the generation is in a contiguous block at the end.
-  if (size > 0 && did_compact()) {
-    shrink_by(size);
-  }
-}
-
-bool ConcurrentMarkSweepGeneration::grow_by(size_t bytes) {
+  if (did_compact()) {
+    CardGeneration::shrink(bytes);
+  }
+}
+
+void ConcurrentMarkSweepGeneration::assert_correct_size_change_locking() {
   assert_locked_or_safepoint(Heap_lock);
-  bool result = _virtual_space.expand_by(bytes);
-  if (result) {
-    size_t new_word_size =
-      heap_word_size(_virtual_space.committed_size());
-    MemRegion mr(_cmsSpace->bottom(), new_word_size);
-    _bts->resize(new_word_size);  // resize the block offset shared array
-    Universe::heap()->barrier_set()->resize_covered_region(mr);
-    // Hmmmm... why doesn't CFLS::set_end verify locking?
-    // This is quite ugly; FIX ME XXX
-    _cmsSpace->assert_locked(freelistLock());
-    _cmsSpace->set_end((HeapWord*)_virtual_space.high());
-
-    // update the space and generation capacity counters
-    if (UsePerfData) {
-      _space_counters->update_capacity();
-      _gen_counters->update_all();
-    }
-
-    if (Verbose && PrintGC) {
-      size_t new_mem_size = _virtual_space.committed_size();
-      size_t old_mem_size = new_mem_size - bytes;
-      gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                    name(), old_mem_size/K, bytes/K, new_mem_size/K);
-    }
-  }
-  return result;
-}
-
-bool ConcurrentMarkSweepGeneration::grow_to_reserved() {
-  assert_locked_or_safepoint(Heap_lock);
-  bool success = true;
-  const size_t remaining_bytes = _virtual_space.uncommitted_size();
-  if (remaining_bytes > 0) {
-    success = grow_by(remaining_bytes);
-    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
-  }
-  return success;
 }
 
 void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
@@ -3084,7 +3002,7 @@
                     Mutex::_no_safepoint_check_flag);
     checkpointRootsInitialWork();
     // enable ("weak") refs discovery
-    rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
+    rp->enable_discovery();
     _collectorState = Marking;
   }
   SpecializationStats::print();
@@ -6031,7 +5949,8 @@
 CMSBitMap::CMSBitMap(int shifter, int mutex_rank, const char* mutex_name):
   _bm(),
   _shifter(shifter),
-  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true) : NULL)
+  _lock(mutex_rank >= 0 ? new Mutex(mutex_rank, mutex_name, true,
+                                    Monitor::_safepoint_check_sometimes) : NULL)
 {
   _bmStartWord = 0;
   _bmWordSize  = 0;
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,9 +30,10 @@
 #include "gc_implementation/shared/gcStats.hpp"
 #include "gc_implementation/shared/gcWhen.hpp"
 #include "gc_implementation/shared/generationCounters.hpp"
+#include "memory/cardGeneration.hpp"
 #include "memory/freeBlockDictionary.hpp"
-#include "memory/generation.hpp"
 #include "memory/iterator.hpp"
+#include "memory/space.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/virtualspace.hpp"
 #include "services/memoryService.hpp"
@@ -171,9 +172,7 @@
 // Represents a marking stack used by the CMS collector.
 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
 class CMSMarkStack: public CHeapObj<mtGC>  {
-  //
   friend class CMSCollector;   // To get at expansion stats further below.
-  //
 
   VirtualSpace _virtual_space;  // Space for the stack
   oop*   _base;      // Bottom of stack
@@ -188,7 +187,8 @@
 
  public:
   CMSMarkStack():
-    _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
+    _par_lock(Mutex::event, "CMSMarkStack._par_lock", true,
+              Monitor::_safepoint_check_never),
     _hit_limit(0),
     _failed_double(0) {}
 
@@ -1031,6 +1031,9 @@
   void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
   CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
 
+  // Accessing spaces
+  CompactibleSpace* space() const { return (CompactibleSpace*)_cmsSpace; }
+
  private:
   // For parallel young-gen GC support.
   CMSParGCThreadState** _par_gc_thread_states;
@@ -1064,6 +1067,10 @@
   double initiating_occupancy() const { return _initiating_occupancy; }
   void   init_initiating_occupancy(intx io, uintx tr);
 
+  void expand_for_gc_cause(size_t bytes, size_t expand_bytes, CMSExpansionCause::Cause cause);
+
+  void assert_correct_size_change_locking();
+
  public:
   ConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
                                 int level, CardTableRS* ct,
@@ -1100,23 +1107,14 @@
   // Override
   virtual void ref_processor_init();
 
-  // Grow generation by specified size (returns false if unable to grow)
-  bool grow_by(size_t bytes);
-  // Grow generation to reserved size.
-  bool grow_to_reserved();
-
   void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
 
   // Space enquiries
-  size_t capacity() const;
-  size_t used() const;
-  size_t free() const;
   double occupancy() const { return ((double)used())/((double)capacity()); }
   size_t contiguous_available() const;
   size_t unsafe_max_alloc_nogc() const;
 
   // over-rides
-  MemRegion used_region() const;
   MemRegion used_region_at_save_marks() const;
 
   // Does a "full" (forced) collection invoked on this generation collect
@@ -1127,10 +1125,6 @@
     return !ScavengeBeforeFullGC;
   }
 
-  void space_iterate(SpaceClosure* blk, bool usedOnly = false);
-
-  // Support for compaction
-  CompactibleSpace* first_compaction_space() const;
   // Adjust quantities in the generation affected by
   // the compaction.
   void reset_after_compaction();
@@ -1190,18 +1184,13 @@
   }
 
   // Allocation failure
-  void expand(size_t bytes, size_t expand_bytes,
-    CMSExpansionCause::Cause cause);
-  virtual bool expand(size_t bytes, size_t expand_bytes);
   void shrink(size_t bytes);
-  void shrink_by(size_t bytes);
   HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
   bool expand_and_ensure_spooling_space(PromotionInfo* promo);
 
   // Iteration support and related enquiries
   void save_marks();
   bool no_allocs_since_save_marks();
-  void younger_refs_iterate(OopsInGenClosure* cl);
 
   // Iteration support specific to CMS generations
   void save_sweep_limit();
--- a/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -369,22 +369,6 @@
   cmsSpace()->save_sweep_limit();
 }
 
-inline size_t ConcurrentMarkSweepGeneration::capacity() const {
-  return _cmsSpace->capacity();
-}
-
-inline size_t ConcurrentMarkSweepGeneration::used() const {
-  return _cmsSpace->used();
-}
-
-inline size_t ConcurrentMarkSweepGeneration::free() const {
-  return _cmsSpace->free();
-}
-
-inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
-  return _cmsSpace->used_region();
-}
-
 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
   return _cmsSpace->used_region_at_save_marks();
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentG1RefineThread.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -52,7 +52,8 @@
   // The 0th worker in notified by mutator threads and has a special monitor.
   // The last worker is used for young gen rset size sampling.
   if (worker_id > 0) {
-    _monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true);
+    _monitor = new Monitor(Mutex::nonleaf, "Refinement monitor", true,
+                           Monitor::_safepoint_check_never);
   } else {
     _monitor = DirtyCardQ_CBL_mon;
   }
--- a/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -971,7 +971,7 @@
   // Start Concurrent Marking weak-reference discovery.
   ReferenceProcessor* rp = g1h->ref_processor_cm();
   // enable ("weak") refs discovery
-  rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+  rp->enable_discovery();
   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 
   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
--- a/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -254,25 +254,23 @@
 HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
                                                        bool force) {
   assert(!force, "not supported for GC alloc regions");
-  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
+  return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Young);
 }
 
 void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
                                           size_t allocated_bytes) {
-  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
-                               GCAllocForSurvived);
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Young);
 }
 
 HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
                                                   bool force) {
   assert(!force, "not supported for GC alloc regions");
-  return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
+  return _g1h->new_gc_alloc_region(word_size, count(), InCSetState::Old);
 }
 
 void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
                                      size_t allocated_bytes) {
-  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
-                               GCAllocForTenured);
+  _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Old);
 }
 
 HeapRegion* OldGCAllocRegion::release() {
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -113,15 +113,16 @@
 G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
   ParGCAllocBuffer(gclab_word_size), _retired(true) { }
 
-HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
-  HeapWord* obj = NULL;
-  size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
+HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
+                                                        size_t word_sz,
+                                                        AllocationContext_t context) {
+  size_t gclab_word_size = _g1h->desired_plab_sz(dest);
   if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
-    G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
+    G1ParGCAllocBuffer* alloc_buf = alloc_buffer(dest, context);
     add_to_alloc_buffer_waste(alloc_buf->words_remaining());
     alloc_buf->retire(false /* end_of_gc */, false /* retain */);
 
-    HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
+    HeapWord* buf = _g1h->par_allocate_during_gc(dest, gclab_word_size, context);
     if (buf == NULL) {
       return NULL; // Let caller handle allocation failure.
     }
@@ -129,30 +130,33 @@
     alloc_buf->set_word_size(gclab_word_size);
     alloc_buf->set_buf(buf);
 
-    obj = alloc_buf->allocate(word_sz);
+    HeapWord* const obj = alloc_buf->allocate(word_sz);
     assert(obj != NULL, "buffer was definitely big enough...");
+    return obj;
   } else {
-    obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
+    return _g1h->par_allocate_during_gc(dest, word_sz, context);
   }
-  return obj;
 }
 
 G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
-            G1ParGCAllocator(g1h),
-            _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
-            _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
-
-  _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
-  _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
-
+  G1ParGCAllocator(g1h),
+  _surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
+  _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
+  for (uint state = 0; state < InCSetState::Num; state++) {
+    _alloc_buffers[state] = NULL;
+  }
+  _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
+  _alloc_buffers[InCSetState::Old]  = &_tenured_alloc_buffer;
 }
 
 void G1DefaultParGCAllocator::retire_alloc_buffers() {
-  for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
-    size_t waste = _alloc_buffers[ap]->words_remaining();
-    add_to_alloc_buffer_waste(waste);
-    _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
-                                               true /* end_of_gc */,
-                                               false /* retain */);
+  for (uint state = 0; state < InCSetState::Num; state++) {
+    G1ParGCAllocBuffer* const buf = _alloc_buffers[state];
+    if (buf != NULL) {
+      add_to_alloc_buffer_waste(buf->words_remaining());
+      buf->flush_stats_and_retire(_g1h->alloc_buffer_stats(state),
+                                  true /* end_of_gc */,
+                                  false /* retain */);
+    }
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1Allocator.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -27,14 +27,9 @@
 
 #include "gc_implementation/g1/g1AllocationContext.hpp"
 #include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/g1/g1InCSetState.hpp"
 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
 
-enum GCAllocPurpose {
-  GCAllocForTenured,
-  GCAllocForSurvived,
-  GCAllocPurposeCount
-};
-
 // Base class for G1 allocators.
 class G1Allocator : public CHeapObj<mtGC> {
   friend class VMStructs;
@@ -178,20 +173,40 @@
 protected:
   G1CollectedHeap* _g1h;
 
+  // The survivor alignment in effect in bytes.
+  // == 0 : don't align survivors
+  // != 0 : align survivors to that alignment
+  // These values were chosen to favor the non-alignment case since some
+  // architectures have a special compare against zero instructions.
+  const uint _survivor_alignment_bytes;
+
   size_t _alloc_buffer_waste;
   size_t _undo_waste;
 
   void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
   void add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 
-  HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
+  virtual void retire_alloc_buffers() = 0;
+  virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) = 0;
 
-  virtual void retire_alloc_buffers() = 0;
-  virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
+  // Calculate the survivor space object alignment in bytes. Returns that or 0 if
+  // there are no restrictions on survivor alignment.
+  static uint calc_survivor_alignment_bytes() {
+    assert(SurvivorAlignmentInBytes >= ObjectAlignmentInBytes, "sanity");
+    if (SurvivorAlignmentInBytes == ObjectAlignmentInBytes) {
+      // No need to align objects in the survivors differently, return 0
+      // which means "survivor alignment is not used".
+      return 0;
+    } else {
+      assert(SurvivorAlignmentInBytes > 0, "sanity");
+      return SurvivorAlignmentInBytes;
+    }
+  }
 
 public:
   G1ParGCAllocator(G1CollectedHeap* g1h) :
-    _g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
+    _g1h(g1h), _survivor_alignment_bytes(calc_survivor_alignment_bytes()),
+    _alloc_buffer_waste(0), _undo_waste(0) {
   }
 
   static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
@@ -199,24 +214,40 @@
   size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
   size_t undo_waste() {return _undo_waste; }
 
-  HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
-    HeapWord* obj = NULL;
-    if (purpose == GCAllocForSurvived) {
-      obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
+  // Allocate word_sz words in dest, either directly into the regions or by
+  // allocating a new PLAB. Returns the address of the allocated memory, NULL if
+  // not successful.
+  HeapWord* allocate_direct_or_new_plab(InCSetState dest,
+                                        size_t word_sz,
+                                        AllocationContext_t context);
+
+  // Allocate word_sz words in the PLAB of dest.  Returns the address of the
+  // allocated memory, NULL if not successful.
+  HeapWord* plab_allocate(InCSetState dest,
+                          size_t word_sz,
+                          AllocationContext_t context) {
+    G1ParGCAllocBuffer* buffer = alloc_buffer(dest, context);
+    if (_survivor_alignment_bytes == 0) {
+      return buffer->allocate(word_sz);
     } else {
-      obj = alloc_buffer(purpose, context)->allocate(word_sz);
+      return buffer->allocate_aligned(word_sz, _survivor_alignment_bytes);
     }
+  }
+
+  HeapWord* allocate(InCSetState dest, size_t word_sz,
+                     AllocationContext_t context) {
+    HeapWord* const obj = plab_allocate(dest, word_sz, context);
     if (obj != NULL) {
       return obj;
     }
-    return allocate_slow(purpose, word_sz, context);
+    return allocate_direct_or_new_plab(dest, word_sz, context);
   }
 
-  void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
-    if (alloc_buffer(purpose, context)->contains(obj)) {
-      assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
+  void undo_allocation(InCSetState dest, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
+    if (alloc_buffer(dest, context)->contains(obj)) {
+      assert(alloc_buffer(dest, context)->contains(obj + word_sz - 1),
              "should contain whole object");
-      alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
+      alloc_buffer(dest, context)->undo_allocation(obj, word_sz);
     } else {
       CollectedHeap::fill_with_object(obj, word_sz);
       add_to_undo_waste(word_sz);
@@ -227,13 +258,17 @@
 class G1DefaultParGCAllocator : public G1ParGCAllocator {
   G1ParGCAllocBuffer  _surviving_alloc_buffer;
   G1ParGCAllocBuffer  _tenured_alloc_buffer;
-  G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
+  G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
 
 public:
   G1DefaultParGCAllocator(G1CollectedHeap* g1h);
 
-  virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
-    return _alloc_buffers[purpose];
+  virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
+    assert(dest.is_valid(),
+           err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
+    assert(_alloc_buffers[dest.value()] != NULL,
+           err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
+    return _alloc_buffers[dest.value()];
   }
 
   virtual void retire_alloc_buffers() ;
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -352,7 +352,7 @@
 }
 
 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
-  OtherRegionsTable::invalidate(start_idx, num_regions);
+  HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
 }
 
 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
@@ -1301,7 +1301,7 @@
       // Temporarily clear the STW ref processor's _is_alive_non_header field.
       ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
 
-      ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+      ref_processor_stw()->enable_discovery();
       ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
 
       // Do collection work
@@ -1886,13 +1886,12 @@
 
   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*)(heap_rs.base() + heap_rs.size()));
 
-  // Create the gen rem set (and barrier set) for the entire reserved region.
-  _rem_set = collector_policy()->create_rem_set(reserved_region());
-  set_barrier_set(rem_set()->bs());
-  if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
-    vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
-    return JNI_ENOMEM;
-  }
+  // Create the barrier set for the entire reserved region.
+  G1SATBCardTableLoggingModRefBS* bs
+    = new G1SATBCardTableLoggingModRefBS(reserved_region());
+  bs->initialize();
+  assert(bs->is_a(BarrierSet::G1SATBCTLogging), "sanity");
+  set_barrier_set(bs);
 
   // Also create a G1 rem set.
   _g1_rem_set = new G1RemSet(this, g1_barrier_set());
@@ -3153,8 +3152,6 @@
         failures = true;
       }
     }
-    if (!silent) gclog_or_tty->print("RemSet ");
-    rem_set()->verify();
 
     if (G1StringDedup::is_enabled()) {
       if (!silent) gclog_or_tty->print("StrDedup ");
@@ -3750,8 +3747,7 @@
       // reference processing currently works in G1.
 
       // Enable discovery in the STW reference processor
-      ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
-                                            true /*verify_no_refs*/);
+      ref_processor_stw()->enable_discovery();
 
       {
         // We want to temporarily turn off discovery by the
@@ -3819,6 +3815,8 @@
 
         register_humongous_regions_with_in_cset_fast_test();
 
+        assert(check_cset_fast_test(), "Inconsistency in the InCSetState table.");
+
         _cm->note_start_of_gc();
         // We should not verify the per-thread SATB buffers given that
         // we have not filtered them yet (we'll do so during the
@@ -4048,29 +4046,6 @@
   return true;
 }
 
-size_t G1CollectedHeap::desired_plab_sz(GCAllocPurpose purpose)
-{
-  size_t gclab_word_size;
-  switch (purpose) {
-    case GCAllocForSurvived:
-      gclab_word_size = _survivor_plab_stats.desired_plab_sz();
-      break;
-    case GCAllocForTenured:
-      gclab_word_size = _old_plab_stats.desired_plab_sz();
-      break;
-    default:
-      assert(false, "unknown GCAllocPurpose");
-      gclab_word_size = _old_plab_stats.desired_plab_sz();
-      break;
-  }
-
-  // Prevent humongous PLAB sizes for two reasons:
-  // * PLABs are allocated using a similar paths as oops, but should
-  //   never be in a humongous region
-  // * Allowing humongous PLABs needlessly churns the region free lists
-  return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
-}
-
 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
   _drain_in_progress = false;
   set_evac_failure_closure(cl);
@@ -4196,35 +4171,6 @@
   }
 }
 
-HeapWord* G1CollectedHeap::par_allocate_during_gc(GCAllocPurpose purpose,
-                                                  size_t word_size,
-                                                  AllocationContext_t context) {
-  if (purpose == GCAllocForSurvived) {
-    HeapWord* result = survivor_attempt_allocation(word_size, context);
-    if (result != NULL) {
-      return result;
-    } else {
-      // Let's try to allocate in the old gen in case we can fit the
-      // object there.
-      return old_attempt_allocation(word_size, context);
-    }
-  } else {
-    assert(purpose ==  GCAllocForTenured, "sanity");
-    HeapWord* result = old_attempt_allocation(word_size, context);
-    if (result != NULL) {
-      return result;
-    } else {
-      // Let's try to allocate in the survivors in case we can fit the
-      // object there.
-      return survivor_attempt_allocation(word_size, context);
-    }
-  }
-
-  ShouldNotReachHere();
-  // Trying to keep some compilers happy.
-  return NULL;
-}
-
 void G1ParCopyHelper::mark_object(oop obj) {
   assert(!_g1->heap_region_containing(obj)->in_collection_set(), "should not mark objects in the CSet");
 
@@ -4267,15 +4213,14 @@
 
   assert(_worker_id == _par_scan_state->queue_num(), "sanity");
 
-  G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
-
-  if (state == G1CollectedHeap::InCSet) {
+  const InCSetState state = _g1->in_cset_state(obj);
+  if (state.is_in_cset()) {
     oop forwardee;
     markOop m = obj->mark();
     if (m->is_marked()) {
       forwardee = (oop) m->decode_pointer();
     } else {
-      forwardee = _par_scan_state->copy_to_survivor_space(obj, m);
+      forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
     }
     assert(forwardee != NULL, "forwardee should not be NULL");
     oopDesc::encode_store_heap_oop(p, forwardee);
@@ -4289,7 +4234,7 @@
       do_klass_barrier(p, forwardee);
     }
   } else {
-    if (state == G1CollectedHeap::IsHumongous) {
+    if (state.is_humongous()) {
       _g1->set_humongous_is_live(obj);
     }
     // The object is not in collection set. If we're a root scanning
@@ -4609,7 +4554,7 @@
 G1CollectedHeap::
 g1_process_roots(OopClosure* scan_non_heap_roots,
                  OopClosure* scan_non_heap_weak_roots,
-                 OopsInHeapRegionClosure* scan_rs,
+                 G1ParPushHeapRSClosure* scan_rs,
                  CLDClosure* scan_strong_clds,
                  CLDClosure* scan_weak_clds,
                  CodeBlobClosure* scan_strong_code,
@@ -4933,7 +4878,7 @@
   }
 };
 
-Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock");
+Monitor* G1CodeCacheUnloadingTask::_lock = new Monitor(Mutex::leaf, "Code Cache Unload lock", false, Monitor::_safepoint_check_never);
 
 class G1KlassCleaningTask : public StackObj {
   BoolObjectClosure*                      _is_alive;
@@ -5145,17 +5090,17 @@
     oop obj = *p;
     assert(obj != NULL, "the caller should have filtered out NULL values");
 
-    G1CollectedHeap::in_cset_state_t cset_state = _g1->in_cset_state(obj);
-    if (cset_state == G1CollectedHeap::InNeither) {
+    const InCSetState cset_state = _g1->in_cset_state(obj);
+    if (!cset_state.is_in_cset_or_humongous()) {
       return;
     }
-    if (cset_state == G1CollectedHeap::InCSet) {
+    if (cset_state.is_in_cset()) {
       assert( obj->is_forwarded(), "invariant" );
       *p = obj->forwardee();
     } else {
       assert(!obj->is_forwarded(), "invariant" );
-      assert(cset_state == G1CollectedHeap::IsHumongous,
-             err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
+      assert(cset_state.is_humongous(),
+             err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state.value()));
       _g1->set_humongous_is_live(obj);
     }
   }
@@ -5640,8 +5585,6 @@
 
   init_for_evac_failure(NULL);
 
-  rem_set()->prepare_for_younger_refs_iterate(true);
-
   assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
   double start_par_time_sec = os::elapsedTime();
   double end_par_time_sec;
@@ -5951,6 +5894,70 @@
   heap_region_iterate(&cl);
   guarantee(!cl.failures(), "bitmap verification");
 }
+
+class G1CheckCSetFastTableClosure : public HeapRegionClosure {
+ private:
+  bool _failures;
+ public:
+  G1CheckCSetFastTableClosure() : HeapRegionClosure(), _failures(false) { }
+
+  virtual bool doHeapRegion(HeapRegion* hr) {
+    uint i = hr->hrm_index();
+    InCSetState cset_state = (InCSetState) G1CollectedHeap::heap()->_in_cset_fast_test.get_by_index(i);
+    if (hr->is_humongous()) {
+      if (hr->in_collection_set()) {
+        gclog_or_tty->print_cr("\n## humongous region %u in CSet", i);
+        _failures = true;
+        return true;
+      }
+      if (cset_state.is_in_cset()) {
+        gclog_or_tty->print_cr("\n## inconsistent cset state %d for humongous region %u", cset_state.value(), i);
+        _failures = true;
+        return true;
+      }
+      if (hr->is_continues_humongous() && cset_state.is_humongous()) {
+        gclog_or_tty->print_cr("\n## inconsistent cset state %d for continues humongous region %u", cset_state.value(), i);
+        _failures = true;
+        return true;
+      }
+    } else {
+      if (cset_state.is_humongous()) {
+        gclog_or_tty->print_cr("\n## inconsistent cset state %d for non-humongous region %u", cset_state.value(), i);
+        _failures = true;
+        return true;
+      }
+      if (hr->in_collection_set() != cset_state.is_in_cset()) {
+        gclog_or_tty->print_cr("\n## in CSet %d / cset state %d inconsistency for region %u",
+                               hr->in_collection_set(), cset_state.value(), i);
+        _failures = true;
+        return true;
+      }
+      if (cset_state.is_in_cset()) {
+        if (hr->is_young() != (cset_state.is_young())) {
+          gclog_or_tty->print_cr("\n## is_young %d / cset state %d inconsistency for region %u",
+                                 hr->is_young(), cset_state.value(), i);
+          _failures = true;
+          return true;
+        }
+        if (hr->is_old() != (cset_state.is_old())) {
+          gclog_or_tty->print_cr("\n## is_old %d / cset state %d inconsistency for region %u",
+                                 hr->is_old(), cset_state.value(), i);
+          _failures = true;
+          return true;
+        }
+      }
+    }
+    return false;
+  }
+
+  bool failures() const { return _failures; }
+};
+
+bool G1CollectedHeap::check_cset_fast_test() {
+  G1CheckCSetFastTableClosure cl;
+  _hrm.iterate(&cl);
+  return !cl.failures();
+}
 #endif // PRODUCT
 
 void G1CollectedHeap::cleanUpCardTable() {
@@ -6519,20 +6526,20 @@
 
 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
                                                  uint count,
-                                                 GCAllocPurpose ap) {
+                                                 InCSetState dest) {
   assert(FreeList_lock->owned_by_self(), "pre-condition");
 
-  if (count < g1_policy()->max_regions(ap)) {
-    bool survivor = (ap == GCAllocForSurvived);
+  if (count < g1_policy()->max_regions(dest)) {
+    const bool is_survivor = (dest.is_young());
     HeapRegion* new_alloc_region = new_region(word_size,
-                                              !survivor,
+                                              !is_survivor,
                                               true /* do_expand */);
     if (new_alloc_region != NULL) {
       // We really only need to do this for old regions given that we
       // should never scan survivors. But it doesn't hurt to do it
       // for survivors too.
       new_alloc_region->record_timestamp();
-      if (survivor) {
+      if (is_survivor) {
         new_alloc_region->set_survivor();
         _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
         check_bitmaps("Survivor Region Allocation", new_alloc_region);
@@ -6544,8 +6551,6 @@
       bool during_im = g1_policy()->during_initial_mark_pause();
       new_alloc_region->note_start_of_copying(during_im);
       return new_alloc_region;
-    } else {
-      g1_policy()->note_alloc_region_limit_reached(ap);
     }
   }
   return NULL;
@@ -6553,11 +6558,11 @@
 
 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
                                              size_t allocated_bytes,
-                                             GCAllocPurpose ap) {
+                                             InCSetState dest) {
   bool during_im = g1_policy()->during_initial_mark_pause();
   alloc_region->note_end_of_copying(during_im);
   g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
-  if (ap == GCAllocForSurvived) {
+  if (dest.is_young()) {
     young_list()->add_survivor_region(alloc_region);
   } else {
     _old_set.add(alloc_region);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -32,6 +32,7 @@
 #include "gc_implementation/g1/g1AllocRegion.hpp"
 #include "gc_implementation/g1/g1BiasedArray.hpp"
 #include "gc_implementation/g1/g1HRPrinter.hpp"
+#include "gc_implementation/g1/g1InCSetState.hpp"
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
@@ -213,6 +214,9 @@
   friend class G1MarkSweep;
   friend class HeapRegionClaimer;
 
+  // Testing classes.
+  friend class G1CheckCSetFastTableClosure;
+
 private:
   // The one and only G1CollectedHeap, so static functions can find it.
   static G1CollectedHeap* _g1h;
@@ -547,15 +551,9 @@
   // allocation region, either by picking one or expanding the
   // heap, and then allocate a block of the given size. The block
   // may not be a humongous - it must fit into a single heap region.
-  HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
-                                   size_t word_size,
-                                   AllocationContext_t context);
-
-  HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
-                                    HeapRegion*    alloc_region,
-                                    bool           par,
-                                    size_t         word_size);
-
+  inline HeapWord* par_allocate_during_gc(InCSetState dest,
+                                          size_t word_size,
+                                          AllocationContext_t context);
   // Ensure that no further allocations can happen in "r", bearing in mind
   // that parallel threads might be attempting allocations.
   void par_allocate_remaining_space(HeapRegion* r);
@@ -577,9 +575,9 @@
 
   // For GC alloc regions.
   HeapRegion* new_gc_alloc_region(size_t word_size, uint count,
-                                  GCAllocPurpose ap);
+                                  InCSetState dest);
   void retire_gc_alloc_region(HeapRegion* alloc_region,
-                              size_t allocated_bytes, GCAllocPurpose ap);
+                              size_t allocated_bytes, InCSetState dest);
 
   // - if explicit_gc is true, the GC is for a System.gc() or a heap
   //   inspection request and should collect the entire heap
@@ -640,26 +638,11 @@
   // (Rounds up to a HeapRegion boundary.)
   bool expand(size_t expand_bytes);
 
-  // Returns the PLAB statistics given a purpose.
-  PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
-    PLABStats* stats = NULL;
+  // Returns the PLAB statistics for a given destination.
+  inline PLABStats* alloc_buffer_stats(InCSetState dest);
 
-    switch (purpose) {
-    case GCAllocForSurvived:
-      stats = &_survivor_plab_stats;
-      break;
-    case GCAllocForTenured:
-      stats = &_old_plab_stats;
-      break;
-    default:
-      assert(false, "unrecognized GCAllocPurpose");
-    }
-
-    return stats;
-  }
-
-  // Determines PLAB size for a particular allocation purpose.
-  size_t desired_plab_sz(GCAllocPurpose purpose);
+  // Determines PLAB size for a given destination.
+  inline size_t desired_plab_sz(InCSetState dest);
 
   inline AllocationContextStats& allocation_context_stats();
 
@@ -683,8 +666,11 @@
   void register_humongous_regions_with_in_cset_fast_test();
   // We register a region with the fast "in collection set" test. We
   // simply set to true the array slot corresponding to this region.
-  void register_region_with_in_cset_fast_test(HeapRegion* r) {
-    _in_cset_fast_test.set_in_cset(r->hrm_index());
+  void register_young_region_with_in_cset_fast_test(HeapRegion* r) {
+    _in_cset_fast_test.set_in_young(r->hrm_index());
+  }
+  void register_old_region_with_in_cset_fast_test(HeapRegion* r) {
+    _in_cset_fast_test.set_in_old(r->hrm_index());
   }
 
   // This is a fast test on whether a reference points into the
@@ -821,7 +807,7 @@
   // In the sequential case this param will be ignored.
   void g1_process_roots(OopClosure* scan_non_heap_roots,
                         OopClosure* scan_non_heap_weak_roots,
-                        OopsInHeapRegionClosure* scan_rs,
+                        G1ParPushHeapRSClosure* scan_rs,
                         CLDClosure* scan_strong_clds,
                         CLDClosure* scan_weak_clds,
                         CodeBlobClosure* scan_strong_code,
@@ -1181,6 +1167,9 @@
   // appropriate error messages and crash.
   void check_bitmaps(const char* caller) PRODUCT_RETURN;
 
+  // Do sanity check on the contents of the in-cset fast test table.
+  bool check_cset_fast_test() PRODUCT_RETURN_( return true; );
+
   // verify_region_sets() performs verification over the region
   // lists. It will be compiled in the product code to be used when
   // necessary (i.e., during heap verification).
@@ -1276,53 +1265,15 @@
 
   inline bool is_in_cset_or_humongous(const oop obj);
 
-  enum in_cset_state_t {
-   InNeither,           // neither in collection set nor humongous
-   InCSet,              // region is in collection set only
-   IsHumongous          // region is a humongous start region
-  };
  private:
-  // Instances of this class are used for quick tests on whether a reference points
-  // into the collection set or is a humongous object (points into a humongous
-  // object).
-  // Each of the array's elements denotes whether the corresponding region is in
-  // the collection set or a humongous region.
-  // We use this to quickly reclaim humongous objects: by making a humongous region
-  // succeed this test, we sort-of add it to the collection set. During the reference
-  // iteration closures, when we see a humongous region, we simply mark it as
-  // referenced, i.e. live.
-  class G1FastCSetBiasedMappedArray : public G1BiasedMappedArray<char> {
-   protected:
-    char default_value() const { return G1CollectedHeap::InNeither; }
-   public:
-    void set_humongous(uintptr_t index) {
-      assert(get_by_index(index) != InCSet, "Should not overwrite InCSet values");
-      set_by_index(index, G1CollectedHeap::IsHumongous);
-    }
-
-    void clear_humongous(uintptr_t index) {
-      set_by_index(index, G1CollectedHeap::InNeither);
-    }
-
-    void set_in_cset(uintptr_t index) {
-      assert(get_by_index(index) != G1CollectedHeap::IsHumongous, "Should not overwrite IsHumongous value");
-      set_by_index(index, G1CollectedHeap::InCSet);
-    }
-
-    bool is_in_cset_or_humongous(HeapWord* addr) const { return get_by_address(addr) != G1CollectedHeap::InNeither; }
-    bool is_in_cset(HeapWord* addr) const { return get_by_address(addr) == G1CollectedHeap::InCSet; }
-    G1CollectedHeap::in_cset_state_t at(HeapWord* addr) const { return (G1CollectedHeap::in_cset_state_t)get_by_address(addr); }
-    void clear() { G1BiasedMappedArray<char>::clear(); }
-  };
-
   // This array is used for a quick test on whether a reference points into
   // the collection set or not. Each of the array's elements denotes whether the
   // corresponding region is in the collection set or not.
-  G1FastCSetBiasedMappedArray _in_cset_fast_test;
+  G1InCSetStateFastTestBiasedMappedArray _in_cset_fast_test;
 
  public:
 
-  inline in_cset_state_t in_cset_state(const oop obj);
+  inline InCSetState in_cset_state(const oop obj);
 
   // Return "TRUE" iff the given object address is in the reserved
   // region of g1.
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -35,6 +35,41 @@
 #include "runtime/orderAccess.inline.hpp"
 #include "utilities/taskqueue.hpp"
 
+PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
+  switch (dest.value()) {
+    case InCSetState::Young:
+      return &_survivor_plab_stats;
+    case InCSetState::Old:
+      return &_old_plab_stats;
+    default:
+      ShouldNotReachHere();
+      return NULL; // Keep some compilers happy
+  }
+}
+
+size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
+  size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz();
+  // Prevent humongous PLAB sizes for two reasons:
+  // * PLABs are allocated using a similar paths as oops, but should
+  //   never be in a humongous region
+  // * Allowing humongous PLABs needlessly churns the region free lists
+  return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
+}
+
+HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
+                                                  size_t word_size,
+                                                  AllocationContext_t context) {
+  switch (dest.value()) {
+    case InCSetState::Young:
+      return survivor_attempt_allocation(word_size, context);
+    case InCSetState::Old:
+      return old_attempt_allocation(word_size, context);
+    default:
+      ShouldNotReachHere();
+      return NULL; // Keep some compilers happy
+  }
+}
+
 // Inline functions for G1CollectedHeap
 
 inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
@@ -203,7 +238,7 @@
   return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
 }
 
-G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
+InCSetState G1CollectedHeap::in_cset_state(const oop obj) {
   return _in_cset_fast_test.at((HeapWord*)obj);
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1437,18 +1437,6 @@
   return young_list_length < young_list_max_length;
 }
 
-uint G1CollectorPolicy::max_regions(int purpose) {
-  switch (purpose) {
-    case GCAllocForSurvived:
-      return _max_survivor_regions;
-    case GCAllocForTenured:
-      return REGIONS_UNLIMITED;
-    default:
-      ShouldNotReachHere();
-      return REGIONS_UNLIMITED;
-  };
-}
-
 void G1CollectorPolicy::update_max_gc_locker_expansion() {
   uint expansion_region_num = 0;
   if (GCLockerEdenExpansionPercent > 0) {
@@ -1634,7 +1622,7 @@
   hr->set_next_in_collection_set(_collection_set);
   _collection_set = hr;
   _collection_set_bytes_used_before += hr->used();
-  _g1->register_region_with_in_cset_fast_test(hr);
+  _g1->register_old_region_with_in_cset_fast_test(hr);
   size_t rs_length = hr->rem_set()->occupied();
   _recorded_rs_lengths += rs_length;
   _old_cset_region_length += 1;
@@ -1767,7 +1755,7 @@
   hr->set_in_collection_set(true);
   assert( hr->next_in_collection_set() == NULL, "invariant");
 
-  _g1->register_region_with_in_cset_fast_test(hr);
+  _g1->register_young_region_with_in_cset_fast_test(hr);
 }
 
 // Add the region at the RHS of the incremental cset
--- a/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -881,28 +881,20 @@
 public:
   uint tenuring_threshold() const { return _tenuring_threshold; }
 
-  inline GCAllocPurpose
-    evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
-      if (age < _tenuring_threshold && src_region->is_young()) {
-        return GCAllocForSurvived;
-      } else {
-        return GCAllocForTenured;
-      }
-  }
-
-  inline bool track_object_age(GCAllocPurpose purpose) {
-    return purpose == GCAllocForSurvived;
-  }
-
   static const uint REGIONS_UNLIMITED = (uint) -1;
 
-  uint max_regions(int purpose);
-
-  // The limit on regions for a particular purpose is reached.
-  void note_alloc_region_limit_reached(int purpose) {
-    if (purpose == GCAllocForSurvived) {
-      _tenuring_threshold = 0;
+  uint max_regions(InCSetState dest) {
+    switch (dest.value()) {
+      case InCSetState::Young:
+        return _max_survivor_regions;
+      case InCSetState::Old:
+        return REGIONS_UNLIMITED;
+      default:
+        assert(false, err_msg("Unknown dest state: " CSETSTATE_FORMAT, dest.value()));
+        break;
     }
+    // keep some compilers happy
+    return 0;
   }
 
   void note_start_adding_survivor_regions() {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1InCSetState.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
+
+#include "gc_implementation/g1/g1BiasedArray.hpp"
+#include "memory/allocation.hpp"
+
+// Per-region state during garbage collection.
+struct InCSetState {
+ public:
+  // We use different types to represent the state value. Particularly SPARC puts
+  // values in structs from "left to right", i.e. MSB to LSB. This results in many
+  // unnecessary shift operations when loading and storing values of this type.
+  // This degrades performance significantly (>10%) on that platform.
+  // Other tested ABIs do not seem to have this problem, and actually tend to
+  // favor smaller types, so we use the smallest usable type there.
+#ifdef SPARC
+  #define CSETSTATE_FORMAT INTPTR_FORMAT
+  typedef intptr_t in_cset_state_t;
+#else
+  #define CSETSTATE_FORMAT "%d"
+  typedef int8_t in_cset_state_t;
+#endif
+ private:
+  in_cset_state_t _value;
+ public:
+  enum {
+    // Selection of the values were driven to micro-optimize the encoding and
+    // frequency of the checks.
+    // The most common check is whether the region is in the collection set or not.
+    // This encoding allows us to use an != 0 check which in some architectures
+    // (x86*) can be encoded slightly more efficently than a normal comparison
+    // against zero.
+    // The same situation occurs when checking whether the region is humongous
+    // or not, which is encoded by values < 0.
+    // The other values are simply encoded in increasing generation order, which
+    // makes getting the next generation fast by a simple increment.
+    Humongous    = -1,    // The region is humongous - note that actually any value < 0 would be possible here.
+    NotInCSet    =  0,    // The region is not in the collection set.
+    Young        =  1,    // The region is in the collection set and a young region.
+    Old          =  2,    // The region is in the collection set and an old region.
+    Num
+  };
+
+  InCSetState(in_cset_state_t value = NotInCSet) : _value(value) {
+    assert(is_valid(), err_msg("Invalid state %d", _value));
+  }
+
+  in_cset_state_t value() const        { return _value; }
+
+  void set_old()                       { _value = Old; }
+
+  bool is_in_cset_or_humongous() const { return _value != NotInCSet; }
+  bool is_in_cset() const              { return _value > NotInCSet; }
+  bool is_humongous() const            { return _value < NotInCSet; }
+  bool is_young() const                { return _value == Young; }
+  bool is_old() const                  { return _value == Old; }
+
+#ifdef ASSERT
+  bool is_default() const              { return !is_in_cset_or_humongous(); }
+  bool is_valid() const                { return (_value >= Humongous) && (_value < Num); }
+  bool is_valid_gen() const            { return (_value >= Young && _value <= Old); }
+#endif
+};
+
+// Instances of this class are used for quick tests on whether a reference points
+// into the collection set and into which generation or is a humongous object
+//
+// Each of the array's elements indicates whether the corresponding region is in
+// the collection set and if so in which generation, or a humongous region.
+//
+// We use this to speed up reference processing during young collection and
+// quickly reclaim humongous objects. For the latter, by making a humongous region
+// succeed this test, we sort-of add it to the collection set. During the reference
+// iteration closures, when we see a humongous region, we then simply mark it as
+// referenced, i.e. live.
+class G1InCSetStateFastTestBiasedMappedArray : public G1BiasedMappedArray<InCSetState> {
+ protected:
+  InCSetState default_value() const { return InCSetState::NotInCSet; }
+ public:
+  void set_humongous(uintptr_t index) {
+    assert(get_by_index(index).is_default(),
+           err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
+    set_by_index(index, InCSetState::Humongous);
+  }
+
+  void clear_humongous(uintptr_t index) {
+    set_by_index(index, InCSetState::NotInCSet);
+  }
+
+  void set_in_young(uintptr_t index) {
+    assert(get_by_index(index).is_default(),
+           err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
+    set_by_index(index, InCSetState::Young);
+  }
+
+  void set_in_old(uintptr_t index) {
+    assert(get_by_index(index).is_default(),
+           err_msg("State at index " INTPTR_FORMAT" should be default but is " CSETSTATE_FORMAT, index, get_by_index(index).value()));
+    set_by_index(index, InCSetState::Old);
+  }
+
+  bool is_in_cset_or_humongous(HeapWord* addr) const { return at(addr).is_in_cset_or_humongous(); }
+  bool is_in_cset(HeapWord* addr) const { return at(addr).is_in_cset(); }
+  InCSetState at(HeapWord* addr) const { return get_by_address(addr); }
+  void clear() { G1BiasedMappedArray<InCSetState>::clear(); }
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1INCSETSTATE_HPP
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -26,6 +26,7 @@
 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1OOPCLOSURES_HPP
 
 #include "memory/iterator.hpp"
+#include "oops/markOop.hpp"
 
 class HeapRegion;
 class G1CollectedHeap;
@@ -239,14 +240,14 @@
   G1CollectedHeap* _g1;
   G1RemSet* _g1_rem_set;
   HeapRegion* _from;
-  OopsInHeapRegionClosure* _push_ref_cl;
+  G1ParPushHeapRSClosure* _push_ref_cl;
   bool _record_refs_into_cset;
   uint _worker_i;
 
 public:
   G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
                                 G1RemSet* rs,
-                                OopsInHeapRegionClosure* push_ref_cl,
+                                G1ParPushHeapRSClosure* push_ref_cl,
                                 bool record_refs_into_cset,
                                 uint worker_i = 0);
 
@@ -256,7 +257,8 @@
   }
 
   bool self_forwarded(oop obj) {
-    bool result = (obj->is_forwarded() && (obj->forwardee()== obj));
+    markOop m = obj->mark();
+    bool result = (m->is_marked() && ((oop)m->decode_pointer() == obj));
     return result;
   }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -67,8 +67,8 @@
 
   if (!oopDesc::is_null(heap_oop)) {
     oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
-    G1CollectedHeap::in_cset_state_t state = _g1->in_cset_state(obj);
-    if (state == G1CollectedHeap::InCSet) {
+    const InCSetState state = _g1->in_cset_state(obj);
+    if (state.is_in_cset()) {
       // We're not going to even bother checking whether the object is
       // already forwarded or not, as this usually causes an immediate
       // stall. We'll try to prefetch the object (for write, given that
@@ -87,7 +87,7 @@
 
       _par_scan_state->push_on_queue(p);
     } else {
-      if (state == G1CollectedHeap::IsHumongous) {
+      if (state.is_humongous()) {
         _g1->set_humongous_is_live(obj);
       }
       _par_scan_state->update_rs(_from, p, _worker_id);
--- a/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1PageBasedVirtualSpace.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -131,6 +131,9 @@
   _committed.set_range(start, start + size_in_pages);
 
   MemRegion result((HeapWord*)page_start(start), byte_size_for_pages(size_in_pages) / HeapWordSize);
+  if (AlwaysPreTouch) {
+    os::pretouch_memory((char*)result.start(), (char*)result.end());
+  }
   return result;
 }
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -38,6 +38,7 @@
     _g1_rem(g1h->g1_rem_set()),
     _hash_seed(17), _queue_num(queue_num),
     _term_attempts(0),
+    _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
     _age_table(false), _scanner(g1h, rp),
     _strong_roots_time(0), _term_time(0) {
   _scanner.set_par_scan_thread_state(this);
@@ -59,6 +60,12 @@
 
   _g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
 
+  _dest[InCSetState::NotInCSet]    = InCSetState::NotInCSet;
+  // The dest for Young is used when the objects are aged enough to
+  // need to be moved to the next space.
+  _dest[InCSetState::Young]        = InCSetState::Old;
+  _dest[InCSetState::Old]          = InCSetState::Old;
+
   _start = os::elapsedTime();
 }
 
@@ -150,52 +157,94 @@
   } while (!_refs->is_empty());
 }
 
-oop G1ParScanThreadState::copy_to_survivor_space(oop const old,
+HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
+                                                      InCSetState* dest,
+                                                      size_t word_sz,
+                                                      AllocationContext_t const context) {
+  assert(state.is_in_cset_or_humongous(), err_msg("Unexpected state: " CSETSTATE_FORMAT, state.value()));
+  assert(dest->is_in_cset_or_humongous(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
+
+  // Right now we only have two types of regions (young / old) so
+  // let's keep the logic here simple. We can generalize it when necessary.
+  if (dest->is_young()) {
+    HeapWord* const obj_ptr = _g1_par_allocator->allocate(InCSetState::Old,
+                                                          word_sz, context);
+    if (obj_ptr == NULL) {
+      return NULL;
+    }
+    // Make sure that we won't attempt to copy any other objects out
+    // of a survivor region (given that apparently we cannot allocate
+    // any new ones) to avoid coming into this slow path.
+    _tenuring_threshold = 0;
+    dest->set_old();
+    return obj_ptr;
+  } else {
+    assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
+    // no other space to try.
+    return NULL;
+  }
+}
+
+InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
+  if (state.is_young()) {
+    age = !m->has_displaced_mark_helper() ? m->age()
+                                          : m->displaced_mark_helper()->age();
+    if (age < _tenuring_threshold) {
+      return state;
+    }
+  }
+  return dest(state);
+}
+
+oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
+                                                 oop const old,
                                                  markOop const old_mark) {
-  size_t word_sz = old->size();
-  HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
+  const size_t word_sz = old->size();
+  HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
   // +1 to make the -1 indexes valid...
-  int       young_index = from_region->young_index_in_cset()+1;
+  const int young_index = from_region->young_index_in_cset()+1;
   assert( (from_region->is_young() && young_index >  0) ||
          (!from_region->is_young() && young_index == 0), "invariant" );
-  G1CollectorPolicy* g1p = _g1h->g1_policy();
-  uint age = old_mark->has_displaced_mark_helper() ? old_mark->displaced_mark_helper()->age()
-                                                   : old_mark->age();
-  GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
-                                                             word_sz);
-  AllocationContext_t context = from_region->allocation_context();
-  HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
+  const AllocationContext_t context = from_region->allocation_context();
+
+  uint age = 0;
+  InCSetState dest_state = next_state(state, old_mark, age);
+  HeapWord* obj_ptr = _g1_par_allocator->plab_allocate(dest_state, word_sz, context);
+
+  // PLAB allocations should succeed most of the time, so we'll
+  // normally check against NULL once and that's it.
+  if (obj_ptr == NULL) {
+    obj_ptr = _g1_par_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context);
+    if (obj_ptr == NULL) {
+      obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context);
+      if (obj_ptr == NULL) {
+        // This will either forward-to-self, or detect that someone else has
+        // installed a forwarding pointer.
+        return _g1h->handle_evacuation_failure_par(this, old);
+      }
+    }
+  }
+
+  assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
 #ifndef PRODUCT
   // Should this evacuation fail?
   if (_g1h->evacuation_should_fail()) {
-    if (obj_ptr != NULL) {
-      _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
-      obj_ptr = NULL;
-    }
+    // Doing this after all the allocation attempts also tests the
+    // undo_allocation() method too.
+    _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
+    return _g1h->handle_evacuation_failure_par(this, old);
   }
 #endif // !PRODUCT
 
-  if (obj_ptr == NULL) {
-    // This will either forward-to-self, or detect that someone else has
-    // installed a forwarding pointer.
-    return _g1h->handle_evacuation_failure_par(this, old);
-  }
-
-  oop obj = oop(obj_ptr);
-
   // We're going to allocate linearly, so might as well prefetch ahead.
   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
 
-  oop forward_ptr = old->forward_to_atomic(obj);
+  const oop obj = oop(obj_ptr);
+  const oop forward_ptr = old->forward_to_atomic(obj);
   if (forward_ptr == NULL) {
     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
 
-    // alloc_purpose is just a hint to allocate() above, recheck the type of region
-    // we actually allocated from and update alloc_purpose accordingly
-    HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
-    alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
-
-    if (g1p->track_object_age(alloc_purpose)) {
+    if (dest_state.is_young()) {
       if (age < markOopDesc::max_age) {
         age++;
       }
@@ -215,13 +264,19 @@
     }
 
     if (G1StringDedup::is_enabled()) {
-      G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
-                                             to_region->is_young(),
+      const bool is_from_young = state.is_young();
+      const bool is_to_young = dest_state.is_young();
+      assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(),
+             "sanity");
+      assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(),
+             "sanity");
+      G1StringDedup::enqueue_from_evacuation(is_from_young,
+                                             is_to_young,
                                              queue_num(),
                                              obj);
     }
 
-    size_t* surv_young_words = surviving_young_words();
+    size_t* const surv_young_words = surviving_young_words();
     surv_young_words[young_index] += word_sz;
 
     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
@@ -232,14 +287,13 @@
       oop* old_p = set_partial_array_mask(old);
       push_on_queue(old_p);
     } else {
-      // No point in using the slower heap_region_containing() method,
-      // given that we know obj is in the heap.
-      _scanner.set_region(_g1h->heap_region_containing_raw(obj));
+      HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr);
+      _scanner.set_region(to_region);
       obj->oop_iterate_backwards(&_scanner);
     }
+    return obj;
   } else {
-    _g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
-    obj = forward_ptr;
+    _g1_par_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context);
+    return forward_ptr;
   }
-  return obj;
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -46,14 +46,16 @@
   G1SATBCardTableModRefBS* _ct_bs;
   G1RemSet* _g1_rem;
 
-  G1ParGCAllocator*   _g1_par_allocator;
+  G1ParGCAllocator* _g1_par_allocator;
 
-  ageTable            _age_table;
+  ageTable          _age_table;
+  InCSetState       _dest[InCSetState::Num];
+  // Local tenuring threshold.
+  uint              _tenuring_threshold;
+  G1ParScanClosure  _scanner;
 
-  G1ParScanClosure    _scanner;
-
-  size_t           _alloc_buffer_waste;
-  size_t           _undo_waste;
+  size_t            _alloc_buffer_waste;
+  size_t            _undo_waste;
 
   OopsInHeapRegionClosure*      _evac_failure_cl;
 
@@ -82,6 +84,14 @@
   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
   G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
 
+  InCSetState dest(InCSetState original) const {
+    assert(original.is_valid(),
+           err_msg("Original state invalid: " CSETSTATE_FORMAT, original.value()));
+    assert(_dest[original.value()].is_valid_gen(),
+           err_msg("Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value()));
+    return _dest[original.value()];
+  }
+
  public:
   G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
   ~G1ParScanThreadState();
@@ -112,7 +122,6 @@
       }
     }
   }
- public:
 
   void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
     _evac_failure_cl = evac_failure_cl;
@@ -193,9 +202,20 @@
   template <class T> inline void deal_with_reference(T* ref_to_scan);
 
   inline void dispatch_reference(StarTask ref);
+
+  // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
+  // allocate into dest. State is the original (source) cset state for the object
+  // that is allocated for.
+  // Returns a non-NULL pointer if successful, and updates dest if required.
+  HeapWord* allocate_in_next_plab(InCSetState const state,
+                                  InCSetState* dest,
+                                  size_t word_sz,
+                                  AllocationContext_t const context);
+
+  inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
  public:
 
-  oop copy_to_survivor_space(oop const obj, markOop const old_mark);
+  oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
 
   void trim_queue();
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1ParScanThreadState.inline.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -38,21 +38,21 @@
   // set, due to (benign) races in the claim mechanism during RSet scanning more
   // than one thread might claim the same card. So the same card may be
   // processed multiple times. So redo this check.
-  G1CollectedHeap::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
-  if (in_cset_state == G1CollectedHeap::InCSet) {
+  const InCSetState in_cset_state = _g1h->in_cset_state(obj);
+  if (in_cset_state.is_in_cset()) {
     oop forwardee;
     markOop m = obj->mark();
     if (m->is_marked()) {
       forwardee = (oop) m->decode_pointer();
     } else {
-      forwardee = copy_to_survivor_space(obj, m);
+      forwardee = copy_to_survivor_space(in_cset_state, obj, m);
     }
     oopDesc::encode_store_heap_oop(p, forwardee);
-  } else if (in_cset_state == G1CollectedHeap::IsHumongous) {
+  } else if (in_cset_state.is_humongous()) {
     _g1h->set_humongous_is_live(obj);
   } else {
-    assert(in_cset_state == G1CollectedHeap::InNeither,
-           err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
+    assert(!in_cset_state.is_in_cset_or_humongous(),
+           err_msg("In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value()));
   }
 
   assert(obj != NULL, "Must be");
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -80,7 +80,7 @@
     _prev_period_summary()
 {
   _seq_task = new SubTasksDone(NumSeqTasks);
-  _cset_rs_update_cl = NEW_C_HEAP_ARRAY(OopsInHeapRegionClosure*, n_workers(), mtGC);
+  _cset_rs_update_cl = NEW_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, n_workers(), mtGC);
   for (uint i = 0; i < n_workers(); i++) {
     _cset_rs_update_cl[i] = NULL;
   }
@@ -94,14 +94,14 @@
   for (uint i = 0; i < n_workers(); i++) {
     assert(_cset_rs_update_cl[i] == NULL, "it should be");
   }
-  FREE_C_HEAP_ARRAY(OopsInHeapRegionClosure*, _cset_rs_update_cl);
+  FREE_C_HEAP_ARRAY(G1ParPushHeapRSClosure*, _cset_rs_update_cl);
 }
 
 class ScanRSClosure : public HeapRegionClosure {
   size_t _cards_done, _cards;
   G1CollectedHeap* _g1h;
 
-  OopsInHeapRegionClosure* _oc;
+  G1ParPushHeapRSClosure* _oc;
   CodeBlobClosure* _code_root_cl;
 
   G1BlockOffsetSharedArray* _bot_shared;
@@ -113,7 +113,7 @@
   bool   _try_claimed;
 
 public:
-  ScanRSClosure(OopsInHeapRegionClosure* oc,
+  ScanRSClosure(G1ParPushHeapRSClosure* oc,
                 CodeBlobClosure* code_root_cl,
                 uint worker_i) :
     _oc(oc),
@@ -135,8 +135,7 @@
   void scanCard(size_t index, HeapRegion *r) {
     // Stack allocate the DirtyCardToOopClosure instance
     HeapRegionDCTOC cl(_g1h, r, _oc,
-                       CardTableModRefBS::Precise,
-                       HeapRegionDCTOC::IntoCSFilterKind);
+                       CardTableModRefBS::Precise);
 
     // Set the "from" region in the closure.
     _oc->set_region(r);
@@ -231,7 +230,7 @@
   size_t cards_looked_up() { return _cards;}
 };
 
-void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
+void G1RemSet::scanRS(G1ParPushHeapRSClosure* oc,
                       CodeBlobClosure* code_root_cl,
                       uint worker_i) {
   double rs_time_start = os::elapsedTime();
@@ -301,7 +300,7 @@
   HeapRegionRemSet::cleanup();
 }
 
-void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
+void G1RemSet::oops_into_collection_set_do(G1ParPushHeapRSClosure* oc,
                                            CodeBlobClosure* code_root_cl,
                                            uint worker_i) {
 #if CARD_REPEAT_HISTO
@@ -417,7 +416,7 @@
 G1UpdateRSOrPushRefOopClosure::
 G1UpdateRSOrPushRefOopClosure(G1CollectedHeap* g1h,
                               G1RemSet* rs,
-                              OopsInHeapRegionClosure* push_ref_cl,
+                              G1ParPushHeapRSClosure* push_ref_cl,
                               bool record_refs_into_cset,
                               uint worker_i) :
   _g1(g1h), _g1_rem_set(rs), _from(NULL),
@@ -518,7 +517,7 @@
   ct_freq_note_card(_ct_bs->index_for(start));
 #endif
 
-  OopsInHeapRegionClosure* oops_in_heap_closure = NULL;
+  G1ParPushHeapRSClosure* oops_in_heap_closure = NULL;
   if (check_for_refs_into_cset) {
     // ConcurrentG1RefineThreads have worker numbers larger than what
     // _cset_rs_update_cl[] is set up to handle. But those threads should
--- a/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -33,6 +33,7 @@
 class G1CollectedHeap;
 class CardTableModRefBarrierSet;
 class ConcurrentG1Refine;
+class G1ParPushHeapRSClosure;
 
 // A G1RemSet in which each heap region has a rem set that records the
 // external heap references into it.  Uses a mod ref bs to track updates,
@@ -68,7 +69,7 @@
 
   // Used for caching the closure that is responsible for scanning
   // references into the collection set.
-  OopsInHeapRegionClosure** _cset_rs_update_cl;
+  G1ParPushHeapRSClosure** _cset_rs_update_cl;
 
   // Print the given summary info
   virtual void print_summary_info(G1RemSetSummary * summary, const char * header = NULL);
@@ -95,7 +96,7 @@
   // partitioning the work to be done. It should be the same as
   // the "i" passed to the calling thread's work(i) function.
   // In the sequential case this param will be ignored.
-  void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
+  void oops_into_collection_set_do(G1ParPushHeapRSClosure* blk,
                                    CodeBlobClosure* code_root_cl,
                                    uint worker_i);
 
@@ -107,7 +108,7 @@
   void prepare_for_oops_into_collection_set_do();
   void cleanup_after_oops_into_collection_set_do();
 
-  void scanRS(OopsInHeapRegionClosure* oc,
+  void scanRS(G1ParPushHeapRSClosure* oc,
               CodeBlobClosure* code_root_cl,
               uint worker_i);
 
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -33,7 +33,7 @@
 #include "runtime/thread.inline.hpp"
 
 G1SATBCardTableModRefBS::G1SATBCardTableModRefBS(MemRegion whole_heap) :
-    CardTableModRefBSForCTRS(whole_heap)
+    CardTableModRefBS(whole_heap)
 {
   _kind = G1SATBCT;
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -37,7 +37,7 @@
 // This barrier is specialized to use a logging barrier to support
 // snapshot-at-the-beginning marking.
 
-class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
+class G1SATBCardTableModRefBS: public CardTableModRefBS {
 protected:
   enum G1CardValues {
     g1_young_gen = CT_MR_BS_last_reserved << 1
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -48,93 +48,55 @@
 size_t HeapRegion::CardsPerRegion    = 0;
 
 HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
-                                 HeapRegion* hr, ExtendedOopClosure* cl,
-                                 CardTableModRefBS::PrecisionStyle precision,
-                                 FilterKind fk) :
+                                 HeapRegion* hr,
+                                 G1ParPushHeapRSClosure* cl,
+                                 CardTableModRefBS::PrecisionStyle precision) :
   DirtyCardToOopClosure(hr, cl, precision, NULL),
-  _hr(hr), _fk(fk), _g1(g1) { }
+  _hr(hr), _rs_scan(cl), _g1(g1) { }
 
 FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
                                                    OopClosure* oc) :
   _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
 
-template<class ClosureType>
-HeapWord* walk_mem_region_loop(ClosureType* cl, G1CollectedHeap* g1h,
-                               HeapRegion* hr,
-                               HeapWord* cur, HeapWord* top) {
-  oop cur_oop = oop(cur);
-  size_t oop_size = hr->block_size(cur);
-  HeapWord* next_obj = cur + oop_size;
-  while (next_obj < top) {
-    // Keep filtering the remembered set.
-    if (!g1h->is_obj_dead(cur_oop, hr)) {
-      // Bottom lies entirely below top, so we can call the
-      // non-memRegion version of oop_iterate below.
-      cur_oop->oop_iterate(cl);
-    }
-    cur = next_obj;
-    cur_oop = oop(cur);
-    oop_size = hr->block_size(cur);
-    next_obj = cur + oop_size;
-  }
-  return cur;
-}
-
 void HeapRegionDCTOC::walk_mem_region(MemRegion mr,
                                       HeapWord* bottom,
                                       HeapWord* top) {
   G1CollectedHeap* g1h = _g1;
   size_t oop_size;
-  ExtendedOopClosure* cl2 = NULL;
-
-  FilterIntoCSClosure intoCSFilt(this, g1h, _cl);
-  FilterOutOfRegionClosure outOfRegionFilt(_hr, _cl);
-
-  switch (_fk) {
-  case NoFilterKind:          cl2 = _cl; break;
-  case IntoCSFilterKind:      cl2 = &intoCSFilt; break;
-  case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
-  default:                    ShouldNotReachHere();
-  }
+  HeapWord* cur = bottom;
 
   // Start filtering what we add to the remembered set. If the object is
   // not considered dead, either because it is marked (in the mark bitmap)
   // or it was allocated after marking finished, then we add it. Otherwise
   // we can safely ignore the object.
-  if (!g1h->is_obj_dead(oop(bottom), _hr)) {
-    oop_size = oop(bottom)->oop_iterate(cl2, mr);
+  if (!g1h->is_obj_dead(oop(cur), _hr)) {
+    oop_size = oop(cur)->oop_iterate(_rs_scan, mr);
   } else {
-    oop_size = _hr->block_size(bottom);
+    oop_size = _hr->block_size(cur);
   }
 
-  bottom += oop_size;
+  cur += oop_size;
 
-  if (bottom < top) {
-    // We replicate the loop below for several kinds of possible filters.
-    switch (_fk) {
-    case NoFilterKind:
-      bottom = walk_mem_region_loop(_cl, g1h, _hr, bottom, top);
-      break;
-
-    case IntoCSFilterKind: {
-      FilterIntoCSClosure filt(this, g1h, _cl);
-      bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
-      break;
-    }
-
-    case OutOfRegionFilterKind: {
-      FilterOutOfRegionClosure filt(_hr, _cl);
-      bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
-      break;
-    }
-
-    default:
-      ShouldNotReachHere();
+  if (cur < top) {
+    oop cur_oop = oop(cur);
+    oop_size = _hr->block_size(cur);
+    HeapWord* next_obj = cur + oop_size;
+    while (next_obj < top) {
+      // Keep filtering the remembered set.
+      if (!g1h->is_obj_dead(cur_oop, _hr)) {
+        // Bottom lies entirely below top, so we can call the
+        // non-memRegion version of oop_iterate below.
+        cur_oop->oop_iterate(_rs_scan);
+      }
+      cur = next_obj;
+      cur_oop = oop(cur);
+      oop_size = _hr->block_size(cur);
+      next_obj = cur + oop_size;
     }
 
     // Last object. Need to do dead-obj filtering here too.
-    if (!g1h->is_obj_dead(oop(bottom), _hr)) {
-      oop(bottom)->oop_iterate(cl2, mr);
+    if (!g1h->is_obj_dead(oop(cur), _hr)) {
+      oop(cur)->oop_iterate(_rs_scan, mr);
     }
   }
 }
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegion.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -67,17 +67,9 @@
 // sets.
 
 class HeapRegionDCTOC : public DirtyCardToOopClosure {
-public:
-  // Specification of possible DirtyCardToOopClosure filtering.
-  enum FilterKind {
-    NoFilterKind,
-    IntoCSFilterKind,
-    OutOfRegionFilterKind
-  };
-
-protected:
+private:
   HeapRegion* _hr;
-  FilterKind _fk;
+  G1ParPushHeapRSClosure* _rs_scan;
   G1CollectedHeap* _g1;
 
   // Walk the given memory region from bottom to (actual) top
@@ -90,9 +82,9 @@
 
 public:
   HeapRegionDCTOC(G1CollectedHeap* g1,
-                  HeapRegion* hr, ExtendedOopClosure* cl,
-                  CardTableModRefBS::PrecisionStyle precision,
-                  FilterKind fk);
+                  HeapRegion* hr,
+                  G1ParPushHeapRSClosure* cl,
+                  CardTableModRefBS::PrecisionStyle precision);
 };
 
 // The complicating factor is that BlockOffsetTable diverged
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -407,20 +407,8 @@
   }
 }
 
-void OtherRegionsTable::initialize(uint max_regions) {
-  FromCardCache::initialize(HeapRegionRemSet::num_par_rem_sets(), max_regions);
-}
-
-void OtherRegionsTable::invalidate(uint start_idx, size_t num_regions) {
-  FromCardCache::invalidate(start_idx, num_regions);
-}
-
-void OtherRegionsTable::print_from_card_cache() {
-  FromCardCache::print();
-}
-
 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, uint tid) {
-  uint cur_hrm_ind = hr()->hrm_index();
+  uint cur_hrm_ind = _hr->hrm_index();
 
   if (G1TraceHeapRegionRememberedSet) {
     gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
@@ -434,7 +422,7 @@
 
   if (G1TraceHeapRegionRememberedSet) {
     gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = %d)",
-                  hr()->bottom(), from_card,
+                  _hr->bottom(), from_card,
                   FromCardCache::at(tid, cur_hrm_ind));
   }
 
@@ -477,13 +465,13 @@
       if (G1HRRSUseSparseTable &&
           _sparse_table.add_card(from_hrm_ind, card_index)) {
         if (G1RecordHRRSOops) {
-          HeapRegionRemSet::record(hr(), from);
+          HeapRegionRemSet::record(_hr, from);
           if (G1TraceHeapRegionRememberedSet) {
             gclog_or_tty->print("   Added card " PTR_FORMAT " to region "
                                 "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
                                 align_size_down(uintptr_t(from),
                                                 CardTableModRefBS::card_size),
-                                hr()->bottom(), from);
+                                _hr->bottom(), from);
           }
         }
         if (G1TraceHeapRegionRememberedSet) {
@@ -539,13 +527,13 @@
   prt->add_reference(from);
 
   if (G1RecordHRRSOops) {
-    HeapRegionRemSet::record(hr(), from);
+    HeapRegionRemSet::record(_hr, from);
     if (G1TraceHeapRegionRememberedSet) {
       gclog_or_tty->print("Added card " PTR_FORMAT " to region "
                           "[" PTR_FORMAT "...) for ref " PTR_FORMAT ".\n",
                           align_size_down(uintptr_t(from),
                                           CardTableModRefBS::card_size),
-                          hr()->bottom(), from);
+                          _hr->bottom(), from);
     }
   }
   assert(contains_reference(from), "We just added it!");
@@ -614,7 +602,7 @@
     if (G1TraceHeapRegionRememberedSet) {
       gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] "
                  "for region [" PTR_FORMAT "...] (" SIZE_FORMAT " coarse entries).\n",
-                 hr()->bottom(),
+                 _hr->bottom(),
                  max->hr()->bottom(),
                  _n_coarse_entries);
     }
@@ -627,13 +615,11 @@
   return max;
 }
 
-
-// At present, this must be called stop-world single-threaded.
 void OtherRegionsTable::scrub(CardTableModRefBS* ctbs,
                               BitMap* region_bm, BitMap* card_bm) {
   // First eliminated garbage regions from the coarse map.
   if (G1RSScrubVerbose) {
-    gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index());
+    gclog_or_tty->print_cr("Scrubbing region %u:", _hr->hrm_index());
   }
 
   assert(_coarse_map.size() == region_bm->size(), "Precondition");
@@ -752,7 +738,7 @@
 }
 
 void OtherRegionsTable::clear_fcc() {
-  FromCardCache::clear(hr()->hrm_index());
+  FromCardCache::clear(_hr->hrm_index());
 }
 
 void OtherRegionsTable::clear() {
@@ -774,27 +760,6 @@
   clear_fcc();
 }
 
-bool OtherRegionsTable::del_single_region_table(size_t ind,
-                                                HeapRegion* hr) {
-  assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
-  PerRegionTable** prev_addr = &_fine_grain_regions[ind];
-  PerRegionTable* prt = *prev_addr;
-  while (prt != NULL && prt->hr() != hr) {
-    prev_addr = prt->collision_list_next_addr();
-    prt = prt->collision_list_next();
-  }
-  if (prt != NULL) {
-    assert(prt->hr() == hr, "Loop postcondition.");
-    *prev_addr = prt->collision_list_next();
-    unlink_from_all(prt);
-    PerRegionTable::free(prt);
-    _n_fine_entries--;
-    return true;
-  } else {
-    return false;
-  }
-}
-
 bool OtherRegionsTable::contains_reference(OopOrNarrowOopStar from) const {
   // Cast away const in this case.
   MutexLockerEx x((Mutex*)_m, Mutex::_no_safepoint_check_flag);
@@ -840,7 +805,7 @@
 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa,
                                    HeapRegion* hr)
   : _bosa(bosa),
-    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true),
+    _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true, Monitor::_safepoint_check_never),
     _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) {
   reset_for_par_iteration();
 }
@@ -975,7 +940,7 @@
   _hrrs(hrrs),
   _g1h(G1CollectedHeap::heap()),
   _coarse_map(&hrrs->_other_regions._coarse_map),
-  _bosa(hrrs->bosa()),
+  _bosa(hrrs->_bosa),
   _is(Sparse),
   // Set these values so that we increment to the first region.
   _coarse_cur_region_index(-1),
--- a/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -162,32 +162,36 @@
   // to hold _m, and the fine-grain table to be full.
   PerRegionTable* delete_region_table();
 
-  // If a PRT for "hr" is in the bucket list indicated by "ind" (which must
-  // be the correct index for "hr"), delete it and return true; else return
-  // false.
-  bool del_single_region_table(size_t ind, HeapRegion* hr);
-
   // link/add the given fine grain remembered set into the "all" list
   void link_to_all(PerRegionTable * prt);
   // unlink/remove the given fine grain remembered set into the "all" list
   void unlink_from_all(PerRegionTable * prt);
 
+  bool contains_reference_locked(OopOrNarrowOopStar from) const;
+
+  // Clear the from_card_cache entries for this region.
+  void clear_fcc();
 public:
+  // Create a new remembered set for the given heap region. The given mutex should
+  // be used to ensure consistency.
   OtherRegionsTable(HeapRegion* hr, Mutex* m);
 
-  HeapRegion* hr() const { return _hr; }
-
   // For now.  Could "expand" some tables in the future, so that this made
   // sense.
   void add_reference(OopOrNarrowOopStar from, uint tid);
 
+  // Returns whether the remembered set contains the given reference.
+  bool contains_reference(OopOrNarrowOopStar from) const;
+
   // Removes any entries shown by the given bitmaps to contain only dead
-  // objects.
+  // objects. Not thread safe.
+  // Set bits in the bitmaps indicate that the given region or card is live.
   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
 
-  // Returns whether this remembered set (and all sub-sets) contain no entries.
+  // Returns whether this remembered set (and all sub-sets) does not contain any entry.
   bool is_empty() const;
 
+  // Returns the number of cards contained in this remembered set.
   size_t occupied() const;
   size_t occ_fine() const;
   size_t occ_coarse() const;
@@ -195,31 +199,17 @@
 
   static jint n_coarsenings() { return _n_coarsenings; }
 
-  // Returns size in bytes.
-  // Not const because it takes a lock.
+  // Returns size of the actual remembered set containers in bytes.
   size_t mem_size() const;
+  // Returns the size of static data in bytes.
   static size_t static_mem_size();
+  // Returns the size of the free list content in bytes.
   static size_t fl_mem_size();
 
-  bool contains_reference(OopOrNarrowOopStar from) const;
-  bool contains_reference_locked(OopOrNarrowOopStar from) const;
-
+  // Clear the entire contents of this remembered set.
   void clear();
 
-  // Specifically clear the from_card_cache.
-  void clear_fcc();
-
   void do_cleanup_work(HRRSCleanupTask* hrrs_cleanup_task);
-
-  // Declare the heap size (in # of regions) to the OtherRegionsTable.
-  // (Uses it to initialize from_card_cache).
-  static void initialize(uint max_regions);
-
-  // Declares that regions between start_idx <= i < start_idx + num_regions are
-  // not in use. Make sure that any entries for these regions are invalid.
-  static void invalidate(uint start_idx, size_t num_regions);
-
-  static void print_from_card_cache();
 };
 
 class HeapRegionRemSet : public CHeapObj<mtGC> {
@@ -233,7 +223,6 @@
 
 private:
   G1BlockOffsetSharedArray* _bosa;
-  G1BlockOffsetSharedArray* bosa() const { return _bosa; }
 
   // A set of code blobs (nmethods) whose code contains pointers into
   // the region that owns this RSet.
@@ -268,10 +257,6 @@
   static uint num_par_rem_sets();
   static void setup_remset_size();
 
-  HeapRegion* hr() const {
-    return _other_regions.hr();
-  }
-
   bool is_empty() const {
     return (strong_code_roots_list_length() == 0) && _other_regions.is_empty();
   }
@@ -305,8 +290,9 @@
     _other_regions.add_reference(from, tid);
   }
 
-  // Removes any entries shown by the given bitmaps to contain only dead
-  // objects.
+  // Removes any entries in the remembered set shown by the given bitmaps to
+  // contain only dead objects. Not thread safe.
+  // One bits in the bitmaps indicate that the given region or card is live.
   void scrub(CardTableModRefBS* ctbs, BitMap* region_bm, BitMap* card_bm);
 
   // The region is being reclaimed; clear its remset, and any mention of
@@ -397,16 +383,16 @@
   // Declare the heap size (in # of regions) to the HeapRegionRemSet(s).
   // (Uses it to initialize from_card_cache).
   static void init_heap(uint max_regions) {
-    OtherRegionsTable::initialize(max_regions);
+    FromCardCache::initialize(num_par_rem_sets(), max_regions);
   }
 
-  static void invalidate(uint start_idx, uint num_regions) {
-    OtherRegionsTable::invalidate(start_idx, num_regions);
+  static void invalidate_from_card_cache(uint start_idx, size_t num_regions) {
+    FromCardCache::invalidate(start_idx, num_regions);
   }
 
 #ifndef PRODUCT
   static void print_from_card_cache() {
-    OtherRegionsTable::print_from_card_cache();
+    FromCardCache::print();
   }
 #endif
 
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/gcTaskManager.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -400,7 +400,8 @@
   assert(workers() != 0, "no workers");
   _monitor = new Monitor(Mutex::barrier,                // rank
                          "GCTaskManager monitor",       // name
-                         Mutex::_allow_vm_block_flag);  // allow_vm_block
+                         Mutex::_allow_vm_block_flag,   // allow_vm_block
+                         Monitor::_safepoint_check_never);
   // The queue for the GCTaskManager must be a CHeapObj.
   GCTaskQueue* unsynchronized_queue = GCTaskQueue::create_on_c_heap();
   _queue = SynchronizedGCTaskQueue::create(unsynchronized_queue, lock());
@@ -1125,7 +1126,8 @@
     } else {
       result = new Monitor(Mutex::barrier,                  // rank
                            "MonitorSupply monitor",         // name
-                           Mutex::_allow_vm_block_flag);    // allow_vm_block
+                           Mutex::_allow_vm_block_flag,     // allow_vm_block
+                           Monitor::_safepoint_check_never);
     }
     guarantee(result != NULL, "shouldn't return NULL");
     assert(!result->is_locked(), "shouldn't be locked");
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -195,7 +195,7 @@
 
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+    ref_processor()->enable_discovery();
     ref_processor()->setup_policy(clear_all_softrefs);
 
     mark_sweep_phase1(clear_all_softrefs);
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -41,31 +41,24 @@
 
   // Closure accessors
   static OopClosure* mark_and_push_closure()   { return &MarkSweep::mark_and_push_closure; }
-  static VoidClosure* follow_stack_closure()   { return (VoidClosure*)&MarkSweep::follow_stack_closure; }
+  static VoidClosure* follow_stack_closure()   { return &MarkSweep::follow_stack_closure; }
   static CLDClosure* follow_cld_closure()      { return &MarkSweep::follow_cld_closure; }
-  static OopClosure* adjust_pointer_closure()  { return (OopClosure*)&MarkSweep::adjust_pointer_closure; }
+  static OopClosure* adjust_pointer_closure()  { return &MarkSweep::adjust_pointer_closure; }
   static CLDClosure* adjust_cld_closure()      { return &MarkSweep::adjust_cld_closure; }
-  static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&MarkSweep::is_alive; }
+  static BoolObjectClosure* is_alive_closure() { return &MarkSweep::is_alive; }
 
- debug_only(public:)  // Used for PSParallelCompact debugging
   // Mark live objects
   static void mark_sweep_phase1(bool clear_all_softrefs);
   // Calculate new addresses
   static void mark_sweep_phase2();
- debug_only(private:) // End used for PSParallelCompact debugging
   // Update pointers
   static void mark_sweep_phase3();
   // Move objects to new positions
   static void mark_sweep_phase4();
 
- debug_only(public:)  // Used for PSParallelCompact debugging
   // Temporary data structures for traversal and storing/restoring marks
   static void allocate_stacks();
   static void deallocate_stacks();
-  static void set_ref_processor(ReferenceProcessor* rp) {  // delete this method
-    _ref_processor = rp;
-  }
- debug_only(private:) // End used for PSParallelCompact debugging
 
   // If objects are left in eden after a collection, try to move the boundary
   // and absorb them into the old gen.  Returns true if eden was emptied.
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -2069,7 +2069,7 @@
 
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+    ref_processor()->enable_discovery();
     ref_processor()->setup_policy(maximum_heap_compaction);
 
     bool marked_for_unloading = false;
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -147,6 +147,10 @@
     claimed_stack_depth()->push(p);
   }
 
+  inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size,
+                                    uint age, bool tenured,
+                                    const PSPromotionLAB* lab);
+
  protected:
   static OopStarTaskQueueSet* stack_array_depth()   { return _stack_array_depth; }
  public:
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -64,6 +64,33 @@
   claim_or_forward_internal_depth(p);
 }
 
+inline void PSPromotionManager::promotion_trace_event(oop new_obj, oop old_obj,
+                                                      size_t obj_size,
+                                                      uint age, bool tenured,
+                                                      const PSPromotionLAB* lab) {
+  // Skip if memory allocation failed
+  if (new_obj != NULL) {
+    const ParallelScavengeTracer* gc_tracer = PSScavenge::gc_tracer();
+
+    if (lab != NULL) {
+      // Promotion of object through newly allocated PLAB
+      if (gc_tracer->should_report_promotion_in_new_plab_event()) {
+        size_t obj_bytes = obj_size * HeapWordSize;
+        size_t lab_size = lab->capacity();
+        gc_tracer->report_promotion_in_new_plab_event(old_obj->klass(), obj_bytes,
+                                                      age, tenured, lab_size);
+      }
+    } else {
+      // Promotion of object directly to heap
+      if (gc_tracer->should_report_promotion_outside_plab_event()) {
+        size_t obj_bytes = obj_size * HeapWordSize;
+        gc_tracer->report_promotion_outside_plab_event(old_obj->klass(), obj_bytes,
+                                                       age, tenured);
+      }
+    }
+  }
+}
+
 //
 // This method is pretty bulky. It would be nice to split it up
 // into smaller submethods, but we need to be careful not to hurt
@@ -85,11 +112,11 @@
     bool new_obj_is_tenured = false;
     size_t new_obj_size = o->size();
 
+    // Find the objects age, MT safe.
+    uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
+      test_mark->displaced_mark_helper()->age() : test_mark->age();
+
     if (!promote_immediately) {
-      // Find the objects age, MT safe.
-      uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ?
-        test_mark->displaced_mark_helper()->age() : test_mark->age();
-
       // Try allocating obj in to-space (unless too old)
       if (age < PSScavenge::tenuring_threshold()) {
         new_obj = (oop) _young_lab.allocate(new_obj_size);
@@ -98,6 +125,7 @@
           if (new_obj_size > (YoungPLABSize / 2)) {
             // Allocate this object directly
             new_obj = (oop)young_space()->cas_allocate(new_obj_size);
+            promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL);
           } else {
             // Flush and fill
             _young_lab.flush();
@@ -107,6 +135,7 @@
               _young_lab.initialize(MemRegion(lab_base, YoungPLABSize));
               // Try the young lab allocation again.
               new_obj = (oop) _young_lab.allocate(new_obj_size);
+              promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab);
             } else {
               _young_gen_is_full = true;
             }
@@ -132,6 +161,7 @@
           if (new_obj_size > (OldPLABSize / 2)) {
             // Allocate this object directly
             new_obj = (oop)old_gen()->cas_allocate(new_obj_size);
+            promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL);
           } else {
             // Flush and fill
             _old_lab.flush();
@@ -148,6 +178,7 @@
               _old_lab.initialize(MemRegion(lab_base, OldPLABSize));
               // Try the old lab allocation again.
               new_obj = (oop) _old_lab.allocate(new_obj_size);
+              promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab);
             }
           }
         }
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -362,7 +362,7 @@
 
     COMPILER2_PRESENT(DerivedPointerTable::clear());
 
-    reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+    reference_processor()->enable_discovery();
     reference_processor()->setup_policy(false);
 
     // We track how much was promoted to the next generation for
--- a/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -92,6 +92,7 @@
 
   // Private accessors
   static CardTableExtension* const card_table()       { assert(_card_table != NULL, "Sanity"); return _card_table; }
+  static const ParallelScavengeTracer* gc_tracer() { return &_gc_tracer; }
 
  public:
   // Accessors
--- a/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/shared/concurrentGCThread.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -88,7 +88,8 @@
 
 SurrogateLockerThread::SurrogateLockerThread() :
   JavaThread(&_sltLoop),
-  _monitor(Mutex::nonleaf, "SLTMonitor"),
+  _monitor(Mutex::nonleaf, "SLTMonitor", false,
+           Monitor::_safepoint_check_sometimes),
   _buffer(empty)
 {}
 
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcTrace.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -172,6 +172,27 @@
   _tenuring_threshold = tenuring_threshold;
 }
 
+bool YoungGCTracer::should_report_promotion_in_new_plab_event() const {
+  return should_send_promotion_in_new_plab_event();
+}
+
+bool YoungGCTracer::should_report_promotion_outside_plab_event() const {
+  return should_send_promotion_outside_plab_event();
+}
+
+void YoungGCTracer::report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
+                                                       uint age, bool tenured,
+                                                       size_t plab_size) const {
+  assert_set_gc_id();
+  send_promotion_in_new_plab_event(klass, obj_size, age, tenured, plab_size);
+}
+
+void YoungGCTracer::report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
+                                                        uint age, bool tenured) const {
+  assert_set_gc_id();
+  send_promotion_outside_plab_event(klass, obj_size, age, tenured);
+}
+
 void OldGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions) {
   assert_set_gc_id();
 
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcTrace.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -156,9 +156,38 @@
   void report_promotion_failed(const PromotionFailedInfo& pf_info);
   void report_tenuring_threshold(const uint tenuring_threshold);
 
+  /*
+   * Methods for reporting Promotion in new or outside PLAB Events.
+   *
+   * The object age is always required as it is not certain that the mark word
+   * of the oop can be trusted at this stage.
+   *
+   * obj_size is the size of the promoted object in bytes.
+   *
+   * tenured should be true if the object has been promoted to the old
+   * space during this GC, if the object is copied to survivor space
+   * from young space or survivor space (aging) tenured should be false.
+   *
+   * plab_size is the size of the newly allocated PLAB in bytes.
+   */
+  bool should_report_promotion_in_new_plab_event() const;
+  bool should_report_promotion_outside_plab_event() const;
+  void report_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
+                                          uint age, bool tenured,
+                                          size_t plab_size) const;
+  void report_promotion_outside_plab_event(Klass* klass, size_t obj_size,
+                                           uint age, bool tenured) const;
+
  private:
   void send_young_gc_event() const;
   void send_promotion_failed_event(const PromotionFailedInfo& pf_info) const;
+  bool should_send_promotion_in_new_plab_event() const;
+  bool should_send_promotion_outside_plab_event() const;
+  void send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
+                                        uint age, bool tenured,
+                                        size_t plab_size) const;
+  void send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
+                                         uint age, bool tenured) const;
 };
 
 class OldGCTracer : public GCTracer {
--- a/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -111,6 +111,44 @@
   }
 }
 
+bool YoungGCTracer::should_send_promotion_in_new_plab_event() const {
+  return EventPromoteObjectInNewPLAB::is_enabled();
+}
+
+bool YoungGCTracer::should_send_promotion_outside_plab_event() const {
+  return EventPromoteObjectOutsidePLAB::is_enabled();
+}
+
+void YoungGCTracer::send_promotion_in_new_plab_event(Klass* klass, size_t obj_size,
+                                                     uint age, bool tenured,
+                                                     size_t plab_size) const {
+
+  EventPromoteObjectInNewPLAB event;
+  if (event.should_commit()) {
+    event.set_gcId(_shared_gc_info.gc_id().id());
+    event.set_class(klass);
+    event.set_objectSize(obj_size);
+    event.set_tenured(tenured);
+    event.set_tenuringAge(age);
+    event.set_plabSize(plab_size);
+    event.commit();
+  }
+}
+
+void YoungGCTracer::send_promotion_outside_plab_event(Klass* klass, size_t obj_size,
+                                                      uint age, bool tenured) const {
+
+  EventPromoteObjectOutsidePLAB event;
+  if (event.should_commit()) {
+    event.set_gcId(_shared_gc_info.gc_id().id());
+    event.set_class(klass);
+    event.set_objectSize(obj_size);
+    event.set_tenured(tenured);
+    event.set_tenuringAge(age);
+    event.commit();
+  }
+}
+
 void OldGCTracer::send_old_gc_event() const {
   EventGCOldGarbageCollection e(UNTIMED);
   if (e.should_commit()) {
--- a/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/gc_implementation/shared/mutableSpace.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -63,9 +63,7 @@
 }
 
 void MutableSpace::pretouch_pages(MemRegion mr) {
-  for (volatile char *p = (char*)mr.start(); p < (char*)mr.end(); p += os::vm_page_size()) {
-    char t = *p; *p = t;
-  }
+  os::pretouch_memory((char*)mr.start(), (char*)mr.end());
 }
 
 void MutableSpace::initialize(MemRegion mr,
--- a/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/interpreter/interpreterRuntime.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -385,6 +385,22 @@
   int                handler_bci;
   int                current_bci = bci(thread);
 
+  if (thread->frames_to_pop_failed_realloc() > 0) {
+    // Allocation of scalar replaced object used in this frame
+    // failed. Unconditionally pop the frame.
+    thread->dec_frames_to_pop_failed_realloc();
+    thread->set_vm_result(h_exception());
+    // If the method is synchronized we already unlocked the monitor
+    // during deoptimization so the interpreter needs to skip it when
+    // the frame is popped.
+    thread->set_do_not_unlock_if_synchronized(true);
+#ifdef CC_INTERP
+    return (address) -1;
+#else
+    return Interpreter::remove_activation_entry();
+#endif
+  }
+
   // Need to do this check first since when _do_not_unlock_if_synchronized
   // is set, we don't want to trigger any classloading which may make calls
   // into java, or surprisingly find a matching exception handler for bci 0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/cardGeneration.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+
+#include "memory/blockOffsetTable.inline.hpp"
+#include "memory/cardGeneration.inline.hpp"
+#include "memory/gcLocker.hpp"
+#include "memory/generationSpec.hpp"
+#include "memory/genOopClosures.inline.hpp"
+#include "memory/genRemSet.hpp"
+#include "memory/iterator.hpp"
+#include "memory/memRegion.hpp"
+#include "memory/space.inline.hpp"
+#include "runtime/java.hpp"
+
+CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
+                               int level,
+                               GenRemSet* remset) :
+  Generation(rs, initial_byte_size, level), _rs(remset),
+  _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
+  _used_at_prologue()
+{
+  HeapWord* start = (HeapWord*)rs.base();
+  size_t reserved_byte_size = rs.size();
+  assert((uintptr_t(start) & 3) == 0, "bad alignment");
+  assert((reserved_byte_size & 3) == 0, "bad alignment");
+  MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
+  _bts = new BlockOffsetSharedArray(reserved_mr,
+                                    heap_word_size(initial_byte_size));
+  MemRegion committed_mr(start, heap_word_size(initial_byte_size));
+  _rs->resize_covered_region(committed_mr);
+  if (_bts == NULL) {
+    vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
+  }
+
+  // Verify that the start and end of this generation is the start of a card.
+  // If this wasn't true, a single card could span more than on generation,
+  // which would cause problems when we commit/uncommit memory, and when we
+  // clear and dirty cards.
+  guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
+  if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
+    // Don't check at the very end of the heap as we'll assert that we're probing off
+    // the end if we try.
+    guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
+  }
+  _min_heap_delta_bytes = MinHeapDeltaBytes;
+  _capacity_at_prologue = initial_byte_size;
+  _used_at_prologue = 0;
+}
+
+bool CardGeneration::grow_by(size_t bytes) {
+  assert_correct_size_change_locking();
+  bool result = _virtual_space.expand_by(bytes);
+  if (result) {
+    size_t new_word_size =
+       heap_word_size(_virtual_space.committed_size());
+    MemRegion mr(space()->bottom(), new_word_size);
+    // Expand card table
+    Universe::heap()->barrier_set()->resize_covered_region(mr);
+    // Expand shared block offset array
+    _bts->resize(new_word_size);
+
+    // Fix for bug #4668531
+    if (ZapUnusedHeapArea) {
+      MemRegion mangle_region(space()->end(),
+      (HeapWord*)_virtual_space.high());
+      SpaceMangler::mangle_region(mangle_region);
+    }
+
+    // Expand space -- also expands space's BOT
+    // (which uses (part of) shared array above)
+    space()->set_end((HeapWord*)_virtual_space.high());
+
+    // update the space and generation capacity counters
+    update_counters();
+
+    if (Verbose && PrintGC) {
+      size_t new_mem_size = _virtual_space.committed_size();
+      size_t old_mem_size = new_mem_size - bytes;
+      gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
+                      SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
+    }
+  }
+  return result;
+}
+
+bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
+  assert_locked_or_safepoint(Heap_lock);
+  if (bytes == 0) {
+    return true;  // That's what grow_by(0) would return
+  }
+  size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
+  if (aligned_bytes == 0){
+    // The alignment caused the number of bytes to wrap.  An expand_by(0) will
+    // return true with the implication that an expansion was done when it
+    // was not.  A call to expand implies a best effort to expand by "bytes"
+    // but not a guarantee.  Align down to give a best effort.  This is likely
+    // the most that the generation can expand since it has some capacity to
+    // start with.
+    aligned_bytes = ReservedSpace::page_align_size_down(bytes);
+  }
+  size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
+  bool success = false;
+  if (aligned_expand_bytes > aligned_bytes) {
+    success = grow_by(aligned_expand_bytes);
+  }
+  if (!success) {
+    success = grow_by(aligned_bytes);
+  }
+  if (!success) {
+    success = grow_to_reserved();
+  }
+  if (PrintGC && Verbose) {
+    if (success && GC_locker::is_active_and_needs_gc()) {
+      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
+    }
+  }
+
+  return success;
+}
+
+bool CardGeneration::grow_to_reserved() {
+  assert_correct_size_change_locking();
+  bool success = true;
+  const size_t remaining_bytes = _virtual_space.uncommitted_size();
+  if (remaining_bytes > 0) {
+    success = grow_by(remaining_bytes);
+    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
+  }
+  return success;
+}
+
+void CardGeneration::shrink(size_t bytes) {
+  assert_correct_size_change_locking();
+
+  size_t size = ReservedSpace::page_align_size_down(bytes);
+  if (size == 0) {
+    return;
+  }
+
+  // Shrink committed space
+  _virtual_space.shrink_by(size);
+  // Shrink space; this also shrinks the space's BOT
+  space()->set_end((HeapWord*) _virtual_space.high());
+  size_t new_word_size = heap_word_size(space()->capacity());
+  // Shrink the shared block offset array
+  _bts->resize(new_word_size);
+  MemRegion mr(space()->bottom(), new_word_size);
+  // Shrink the card table
+  Universe::heap()->barrier_set()->resize_covered_region(mr);
+
+  if (Verbose && PrintGC) {
+    size_t new_mem_size = _virtual_space.committed_size();
+    size_t old_mem_size = new_mem_size + size;
+    gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
+                  name(), old_mem_size/K, new_mem_size/K);
+  }
+}
+
+// No young generation references, clear this generation's cards.
+void CardGeneration::clear_remembered_set() {
+  _rs->clear(reserved());
+}
+
+// Objects in this generation may have moved, invalidate this
+// generation's cards.
+void CardGeneration::invalidate_remembered_set() {
+  _rs->invalidate(used_region());
+}
+
+void CardGeneration::compute_new_size() {
+  assert(_shrink_factor <= 100, "invalid shrink factor");
+  size_t current_shrink_factor = _shrink_factor;
+  _shrink_factor = 0;
+
+  // We don't have floating point command-line arguments
+  // Note:  argument processing ensures that MinHeapFreeRatio < 100.
+  const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
+  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
+
+  // Compute some numbers about the state of the heap.
+  const size_t used_after_gc = used();
+  const size_t capacity_after_gc = capacity();
+
+  const double min_tmp = used_after_gc / maximum_used_percentage;
+  size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
+  // Don't shrink less than the initial generation size
+  minimum_desired_capacity = MAX2(minimum_desired_capacity,
+                                  spec()->init_size());
+  assert(used_after_gc <= minimum_desired_capacity, "sanity check");
+
+  if (PrintGC && Verbose) {
+    const size_t free_after_gc = free();
+    const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
+    gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
+    gclog_or_tty->print_cr("  "
+                  "  minimum_free_percentage: %6.2f"
+                  "  maximum_used_percentage: %6.2f",
+                  minimum_free_percentage,
+                  maximum_used_percentage);
+    gclog_or_tty->print_cr("  "
+                  "   free_after_gc   : %6.1fK"
+                  "   used_after_gc   : %6.1fK"
+                  "   capacity_after_gc   : %6.1fK",
+                  free_after_gc / (double) K,
+                  used_after_gc / (double) K,
+                  capacity_after_gc / (double) K);
+    gclog_or_tty->print_cr("  "
+                  "   free_percentage: %6.2f",
+                  free_percentage);
+  }
+
+  if (capacity_after_gc < minimum_desired_capacity) {
+    // If we have less free space than we want then expand
+    size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
+    // Don't expand unless it's significant
+    if (expand_bytes >= _min_heap_delta_bytes) {
+      expand(expand_bytes, 0); // safe if expansion fails
+    }
+    if (PrintGC && Verbose) {
+      gclog_or_tty->print_cr("    expanding:"
+                    "  minimum_desired_capacity: %6.1fK"
+                    "  expand_bytes: %6.1fK"
+                    "  _min_heap_delta_bytes: %6.1fK",
+                    minimum_desired_capacity / (double) K,
+                    expand_bytes / (double) K,
+                    _min_heap_delta_bytes / (double) K);
+    }
+    return;
+  }
+
+  // No expansion, now see if we want to shrink
+  size_t shrink_bytes = 0;
+  // We would never want to shrink more than this
+  size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
+
+  if (MaxHeapFreeRatio < 100) {
+    const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
+    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
+    const double max_tmp = used_after_gc / minimum_used_percentage;
+    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
+    maximum_desired_capacity = MAX2(maximum_desired_capacity,
+                                    spec()->init_size());
+    if (PrintGC && Verbose) {
+      gclog_or_tty->print_cr("  "
+                             "  maximum_free_percentage: %6.2f"
+                             "  minimum_used_percentage: %6.2f",
+                             maximum_free_percentage,
+                             minimum_used_percentage);
+      gclog_or_tty->print_cr("  "
+                             "  _capacity_at_prologue: %6.1fK"
+                             "  minimum_desired_capacity: %6.1fK"
+                             "  maximum_desired_capacity: %6.1fK",
+                             _capacity_at_prologue / (double) K,
+                             minimum_desired_capacity / (double) K,
+                             maximum_desired_capacity / (double) K);
+    }
+    assert(minimum_desired_capacity <= maximum_desired_capacity,
+           "sanity check");
+
+    if (capacity_after_gc > maximum_desired_capacity) {
+      // Capacity too large, compute shrinking size
+      shrink_bytes = capacity_after_gc - maximum_desired_capacity;
+      // We don't want shrink all the way back to initSize if people call
+      // System.gc(), because some programs do that between "phases" and then
+      // we'd just have to grow the heap up again for the next phase.  So we
+      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
+      // on the third call, and 100% by the fourth call.  But if we recompute
+      // size without shrinking, it goes back to 0%.
+      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
+      assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
+      if (current_shrink_factor == 0) {
+        _shrink_factor = 10;
+      } else {
+        _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
+      }
+      if (PrintGC && Verbose) {
+        gclog_or_tty->print_cr("  "
+                      "  shrinking:"
+                      "  initSize: %.1fK"
+                      "  maximum_desired_capacity: %.1fK",
+                      spec()->init_size() / (double) K,
+                      maximum_desired_capacity / (double) K);
+        gclog_or_tty->print_cr("  "
+                      "  shrink_bytes: %.1fK"
+                      "  current_shrink_factor: " SIZE_FORMAT
+                      "  new shrink factor: " SIZE_FORMAT
+                      "  _min_heap_delta_bytes: %.1fK",
+                      shrink_bytes / (double) K,
+                      current_shrink_factor,
+                      _shrink_factor,
+                      _min_heap_delta_bytes / (double) K);
+      }
+    }
+  }
+
+  if (capacity_after_gc > _capacity_at_prologue) {
+    // We might have expanded for promotions, in which case we might want to
+    // take back that expansion if there's room after GC.  That keeps us from
+    // stretching the heap with promotions when there's plenty of room.
+    size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
+    expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
+    // We have two shrinking computations, take the largest
+    shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
+    assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
+    if (PrintGC && Verbose) {
+      gclog_or_tty->print_cr("  "
+                             "  aggressive shrinking:"
+                             "  _capacity_at_prologue: %.1fK"
+                             "  capacity_after_gc: %.1fK"
+                             "  expansion_for_promotion: %.1fK"
+                             "  shrink_bytes: %.1fK",
+                             capacity_after_gc / (double) K,
+                             _capacity_at_prologue / (double) K,
+                             expansion_for_promotion / (double) K,
+                             shrink_bytes / (double) K);
+    }
+  }
+  // Don't shrink unless it's significant
+  if (shrink_bytes >= _min_heap_delta_bytes) {
+    shrink(shrink_bytes);
+  }
+}
+
+// Currently nothing to do.
+void CardGeneration::prepare_for_verify() {}
+
+void CardGeneration::space_iterate(SpaceClosure* blk,
+                                                 bool usedOnly) {
+  blk->do_space(space());
+}
+
+void CardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
+  blk->set_generation(this);
+  younger_refs_in_space_iterate(space(), blk);
+  blk->reset_generation();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/cardGeneration.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -0,0 +1,99 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_CARDGENERATION_HPP
+#define SHARE_VM_MEMORY_CARDGENERATION_HPP
+
+// Class CardGeneration is a generation that is covered by a card table,
+// and uses a card-size block-offset array to implement block_start.
+
+#include "memory/generation.hpp"
+
+class BlockOffsetSharedArray;
+class CompactibleSpace;
+
+class CardGeneration: public Generation {
+  friend class VMStructs;
+ protected:
+  // This is shared with other generations.
+  GenRemSet* _rs;
+  // This is local to this generation.
+  BlockOffsetSharedArray* _bts;
+
+  // Current shrinking effect: this damps shrinking when the heap gets empty.
+  size_t _shrink_factor;
+
+  size_t _min_heap_delta_bytes;   // Minimum amount to expand.
+
+  // Some statistics from before gc started.
+  // These are gathered in the gc_prologue (and should_collect)
+  // to control growing/shrinking policy in spite of promotions.
+  size_t _capacity_at_prologue;
+  size_t _used_at_prologue;
+
+  CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
+                 GenRemSet* remset);
+
+  virtual void assert_correct_size_change_locking() = 0;
+
+  virtual CompactibleSpace* space() const = 0;
+
+ public:
+
+  // Attempt to expand the generation by "bytes".  Expand by at a
+  // minimum "expand_bytes".  Return true if some amount (not
+  // necessarily the full "bytes") was done.
+  virtual bool expand(size_t bytes, size_t expand_bytes);
+
+  // Shrink generation with specified size
+  virtual void shrink(size_t bytes);
+
+  virtual void compute_new_size();
+
+  virtual void clear_remembered_set();
+
+  virtual void invalidate_remembered_set();
+
+  virtual void prepare_for_verify();
+
+  // Grow generation with specified size (returns false if unable to grow)
+  bool grow_by(size_t bytes);
+  // Grow generation to reserved size.
+  bool grow_to_reserved();
+
+  size_t capacity() const;
+  size_t used() const;
+  size_t free() const;
+  MemRegion used_region() const;
+
+  void space_iterate(SpaceClosure* blk, bool usedOnly = false);
+
+  void younger_refs_iterate(OopsInGenClosure* blk);
+
+  bool is_in(const void* p) const;
+
+  CompactibleSpace* first_compaction_space() const;
+};
+
+#endif // SHARE_VM_MEMORY_CARDGENERATION_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/hotspot/src/share/vm/memory/cardGeneration.inline.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP
+#define SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP
+
+#include "memory/cardGeneration.hpp"
+#include "memory/space.hpp"
+
+inline size_t CardGeneration::capacity() const {
+  return space()->capacity();
+}
+
+inline size_t CardGeneration::used() const {
+  return space()->used();
+}
+
+inline size_t CardGeneration::free() const {
+  return space()->free();
+}
+
+inline MemRegion CardGeneration::used_region() const {
+  return space()->used_region();
+}
+
+inline bool CardGeneration::is_in(const void* p) const {
+  return space()->is_in(p);
+}
+
+inline CompactibleSpace* CardGeneration::first_compaction_space() const {
+  return space();
+}
+
+#endif // SHARE_VM_MEMORY_CARDGENERATION_INLINE_HPP
--- a/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/cardTableModRefBS.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -275,29 +275,26 @@
     // the new_end_aligned does not intrude onto the committed
     // space of another region.
     int ri = 0;
-    for (ri = 0; ri < _cur_covered_regions; ri++) {
-      if (ri != ind) {
-        if (_committed[ri].contains(new_end_aligned)) {
-          // The prior check included in the assert
-          // (new_end_aligned >= _committed[ri].start())
-          // is redundant with the "contains" test.
-          // Any region containing the new end
-          // should start at or beyond the region found (ind)
-          // for the new end (committed regions are not expected to
-          // be proper subsets of other committed regions).
-          assert(_committed[ri].start() >= _committed[ind].start(),
-                 "New end of committed region is inconsistent");
-          new_end_aligned = _committed[ri].start();
-          // new_end_aligned can be equal to the start of its
-          // committed region (i.e., of "ind") if a second
-          // region following "ind" also start at the same location
-          // as "ind".
-          assert(new_end_aligned >= _committed[ind].start(),
-            "New end of committed region is before start");
-          debug_only(collided = true;)
-          // Should only collide with 1 region
-          break;
-        }
+    for (ri = ind + 1; ri < _cur_covered_regions; ri++) {
+      if (new_end_aligned > _committed[ri].start()) {
+        assert(new_end_aligned <= _committed[ri].end(),
+               "An earlier committed region can't cover a later committed region");
+        // Any region containing the new end
+        // should start at or beyond the region found (ind)
+        // for the new end (committed regions are not expected to
+        // be proper subsets of other committed regions).
+        assert(_committed[ri].start() >= _committed[ind].start(),
+               "New end of committed region is inconsistent");
+        new_end_aligned = _committed[ri].start();
+        // new_end_aligned can be equal to the start of its
+        // committed region (i.e., of "ind") if a second
+        // region following "ind" also start at the same location
+        // as "ind".
+        assert(new_end_aligned >= _committed[ind].start(),
+          "New end of committed region is before start");
+        debug_only(collided = true;)
+        // Should only collide with 1 region
+        break;
       }
     }
 #ifdef ASSERT
--- a/hotspot/src/share/vm/memory/cardTableRS.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/cardTableRS.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -33,24 +33,13 @@
 #include "runtime/java.hpp"
 #include "runtime/os.hpp"
 #include "utilities/macros.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc_implementation/g1/concurrentMark.hpp"
-#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
-#endif // INCLUDE_ALL_GCS
 
 CardTableRS::CardTableRS(MemRegion whole_heap) :
   GenRemSet(),
   _cur_youngergen_card_val(youngergenP1_card)
 {
-#if INCLUDE_ALL_GCS
-  if (UseG1GC) {
-      _ct_bs = new G1SATBCardTableLoggingModRefBS(whole_heap);
-  } else {
-    _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
-  }
-#else
+  guarantee(Universe::heap()->kind() == CollectedHeap::GenCollectedHeap, "sanity");
   _ct_bs = new CardTableModRefBSForCTRS(whole_heap);
-#endif
   _ct_bs->initialize();
   set_bs(_ct_bs);
   _last_cur_val_in_gen = NEW_C_HEAP_ARRAY3(jbyte, GenCollectedHeap::max_gens + 1,
--- a/hotspot/src/share/vm/memory/filemap.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/filemap.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -98,11 +98,11 @@
         tty->print_cr("UseSharedSpaces: %s", msg);
       }
     }
+    UseSharedSpaces = false;
+    assert(current_info() != NULL, "singleton must be registered");
+    current_info()->close();
   }
   va_end(ap);
-  UseSharedSpaces = false;
-  assert(current_info() != NULL, "singleton must be registered");
-  current_info()->close();
 }
 
 // Fill in the fileMapInfo structure with data about this VM instance.
--- a/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -70,6 +70,7 @@
 
 GenCollectedHeap::GenCollectedHeap(GenCollectorPolicy *policy) :
   SharedHeap(policy),
+  _rem_set(NULL),
   _gen_policy(policy),
   _gen_process_roots_tasks(new SubTasksDone(GCH_PS_NumElements)),
   _full_collections_completed(0)
@@ -465,7 +466,7 @@
           // atomic wrt other collectors in this configuration, we
           // are guaranteed to have empty discovered ref lists.
           if (rp->discovery_is_atomic()) {
-            rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
+            rp->enable_discovery();
             rp->setup_policy(do_clear_all_soft_refs);
           } else {
             // collect() below will enable discovery as appropriate
--- a/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/genCollectedHeap.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,6 +66,9 @@
   Generation* _gens[max_gens];
   GenerationSpec** _gen_specs;
 
+  // The singleton Gen Remembered Set.
+  GenRemSet* _rem_set;
+
   // The generational collector policy.
   GenCollectorPolicy* _gen_policy;
 
@@ -383,6 +386,10 @@
     return _n_gens;
   }
 
+  // This function returns the "GenRemSet" object that allows us to scan
+  // generations in a fully generational heap.
+  GenRemSet* rem_set() { return _rem_set; }
+
   // Convenience function to be used in situations where the heap type can be
   // asserted to be this type.
   static GenCollectedHeap* heap();
--- a/hotspot/src/share/vm/memory/genOopClosures.inline.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/genOopClosures.inline.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,7 +44,7 @@
   _gen_boundary = _gen->reserved().start();
   // Barrier set for the heap, must be set after heap is initialized
   if (_rs == NULL) {
-    GenRemSet* rs = SharedHeap::heap()->rem_set();
+    GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
     _rs = (CardTableRS*)rs;
   }
 }
--- a/hotspot/src/share/vm/memory/generation.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/generation.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -297,7 +297,7 @@
 
 void Generation::younger_refs_in_space_iterate(Space* sp,
                                                OopsInGenClosure* cl) {
-  GenRemSet* rs = SharedHeap::heap()->rem_set();
+  GenRemSet* rs = GenCollectedHeap::heap()->rem_set();
   rs->younger_refs_in_space_iterate(sp, cl);
 }
 
@@ -361,244 +361,3 @@
     sp = sp->next_compaction_space();
   }
 }
-
-CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
-                               int level,
-                               GenRemSet* remset) :
-  Generation(rs, initial_byte_size, level), _rs(remset),
-  _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
-  _used_at_prologue()
-{
-  HeapWord* start = (HeapWord*)rs.base();
-  size_t reserved_byte_size = rs.size();
-  assert((uintptr_t(start) & 3) == 0, "bad alignment");
-  assert((reserved_byte_size & 3) == 0, "bad alignment");
-  MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
-  _bts = new BlockOffsetSharedArray(reserved_mr,
-                                    heap_word_size(initial_byte_size));
-  MemRegion committed_mr(start, heap_word_size(initial_byte_size));
-  _rs->resize_covered_region(committed_mr);
-  if (_bts == NULL)
-    vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
-
-  // Verify that the start and end of this generation is the start of a card.
-  // If this wasn't true, a single card could span more than on generation,
-  // which would cause problems when we commit/uncommit memory, and when we
-  // clear and dirty cards.
-  guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
-  if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
-    // Don't check at the very end of the heap as we'll assert that we're probing off
-    // the end if we try.
-    guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
-  }
-  _min_heap_delta_bytes = MinHeapDeltaBytes;
-  _capacity_at_prologue = initial_byte_size;
-  _used_at_prologue = 0;
-}
-
-bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
-  assert_locked_or_safepoint(Heap_lock);
-  if (bytes == 0) {
-    return true;  // That's what grow_by(0) would return
-  }
-  size_t aligned_bytes  = ReservedSpace::page_align_size_up(bytes);
-  if (aligned_bytes == 0){
-    // The alignment caused the number of bytes to wrap.  An expand_by(0) will
-    // return true with the implication that an expansion was done when it
-    // was not.  A call to expand implies a best effort to expand by "bytes"
-    // but not a guarantee.  Align down to give a best effort.  This is likely
-    // the most that the generation can expand since it has some capacity to
-    // start with.
-    aligned_bytes = ReservedSpace::page_align_size_down(bytes);
-  }
-  size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
-  bool success = false;
-  if (aligned_expand_bytes > aligned_bytes) {
-    success = grow_by(aligned_expand_bytes);
-  }
-  if (!success) {
-    success = grow_by(aligned_bytes);
-  }
-  if (!success) {
-    success = grow_to_reserved();
-  }
-  if (PrintGC && Verbose) {
-    if (success && GC_locker::is_active_and_needs_gc()) {
-      gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
-    }
-  }
-
-  return success;
-}
-
-
-// No young generation references, clear this generation's cards.
-void CardGeneration::clear_remembered_set() {
-  _rs->clear(reserved());
-}
-
-
-// Objects in this generation may have moved, invalidate this
-// generation's cards.
-void CardGeneration::invalidate_remembered_set() {
-  _rs->invalidate(used_region());
-}
-
-
-void CardGeneration::compute_new_size() {
-  assert(_shrink_factor <= 100, "invalid shrink factor");
-  size_t current_shrink_factor = _shrink_factor;
-  _shrink_factor = 0;
-
-  // We don't have floating point command-line arguments
-  // Note:  argument processing ensures that MinHeapFreeRatio < 100.
-  const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
-  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
-
-  // Compute some numbers about the state of the heap.
-  const size_t used_after_gc = used();
-  const size_t capacity_after_gc = capacity();
-
-  const double min_tmp = used_after_gc / maximum_used_percentage;
-  size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
-  // Don't shrink less than the initial generation size
-  minimum_desired_capacity = MAX2(minimum_desired_capacity,
-                                  spec()->init_size());
-  assert(used_after_gc <= minimum_desired_capacity, "sanity check");
-
-  if (PrintGC && Verbose) {
-    const size_t free_after_gc = free();
-    const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
-    gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
-    gclog_or_tty->print_cr("  "
-                  "  minimum_free_percentage: %6.2f"
-                  "  maximum_used_percentage: %6.2f",
-                  minimum_free_percentage,
-                  maximum_used_percentage);
-    gclog_or_tty->print_cr("  "
-                  "   free_after_gc   : %6.1fK"
-                  "   used_after_gc   : %6.1fK"
-                  "   capacity_after_gc   : %6.1fK",
-                  free_after_gc / (double) K,
-                  used_after_gc / (double) K,
-                  capacity_after_gc / (double) K);
-    gclog_or_tty->print_cr("  "
-                  "   free_percentage: %6.2f",
-                  free_percentage);
-  }
-
-  if (capacity_after_gc < minimum_desired_capacity) {
-    // If we have less free space than we want then expand
-    size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
-    // Don't expand unless it's significant
-    if (expand_bytes >= _min_heap_delta_bytes) {
-      expand(expand_bytes, 0); // safe if expansion fails
-    }
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("    expanding:"
-                    "  minimum_desired_capacity: %6.1fK"
-                    "  expand_bytes: %6.1fK"
-                    "  _min_heap_delta_bytes: %6.1fK",
-                    minimum_desired_capacity / (double) K,
-                    expand_bytes / (double) K,
-                    _min_heap_delta_bytes / (double) K);
-    }
-    return;
-  }
-
-  // No expansion, now see if we want to shrink
-  size_t shrink_bytes = 0;
-  // We would never want to shrink more than this
-  size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
-
-  if (MaxHeapFreeRatio < 100) {
-    const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
-    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
-    const double max_tmp = used_after_gc / minimum_used_percentage;
-    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
-    maximum_desired_capacity = MAX2(maximum_desired_capacity,
-                                    spec()->init_size());
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("  "
-                             "  maximum_free_percentage: %6.2f"
-                             "  minimum_used_percentage: %6.2f",
-                             maximum_free_percentage,
-                             minimum_used_percentage);
-      gclog_or_tty->print_cr("  "
-                             "  _capacity_at_prologue: %6.1fK"
-                             "  minimum_desired_capacity: %6.1fK"
-                             "  maximum_desired_capacity: %6.1fK",
-                             _capacity_at_prologue / (double) K,
-                             minimum_desired_capacity / (double) K,
-                             maximum_desired_capacity / (double) K);
-    }
-    assert(minimum_desired_capacity <= maximum_desired_capacity,
-           "sanity check");
-
-    if (capacity_after_gc > maximum_desired_capacity) {
-      // Capacity too large, compute shrinking size
-      shrink_bytes = capacity_after_gc - maximum_desired_capacity;
-      // We don't want shrink all the way back to initSize if people call
-      // System.gc(), because some programs do that between "phases" and then
-      // we'd just have to grow the heap up again for the next phase.  So we
-      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
-      // on the third call, and 100% by the fourth call.  But if we recompute
-      // size without shrinking, it goes back to 0%.
-      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
-      assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
-      if (current_shrink_factor == 0) {
-        _shrink_factor = 10;
-      } else {
-        _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
-      }
-      if (PrintGC && Verbose) {
-        gclog_or_tty->print_cr("  "
-                      "  shrinking:"
-                      "  initSize: %.1fK"
-                      "  maximum_desired_capacity: %.1fK",
-                      spec()->init_size() / (double) K,
-                      maximum_desired_capacity / (double) K);
-        gclog_or_tty->print_cr("  "
-                      "  shrink_bytes: %.1fK"
-                      "  current_shrink_factor: " SIZE_FORMAT
-                      "  new shrink factor: " SIZE_FORMAT
-                      "  _min_heap_delta_bytes: %.1fK",
-                      shrink_bytes / (double) K,
-                      current_shrink_factor,
-                      _shrink_factor,
-                      _min_heap_delta_bytes / (double) K);
-      }
-    }
-  }
-
-  if (capacity_after_gc > _capacity_at_prologue) {
-    // We might have expanded for promotions, in which case we might want to
-    // take back that expansion if there's room after GC.  That keeps us from
-    // stretching the heap with promotions when there's plenty of room.
-    size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
-    expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
-    // We have two shrinking computations, take the largest
-    shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
-    assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
-    if (PrintGC && Verbose) {
-      gclog_or_tty->print_cr("  "
-                             "  aggressive shrinking:"
-                             "  _capacity_at_prologue: %.1fK"
-                             "  capacity_after_gc: %.1fK"
-                             "  expansion_for_promotion: %.1fK"
-                             "  shrink_bytes: %.1fK",
-                             capacity_after_gc / (double) K,
-                             _capacity_at_prologue / (double) K,
-                             expansion_for_promotion / (double) K,
-                             shrink_bytes / (double) K);
-    }
-  }
-  // Don't shrink unless it's significant
-  if (shrink_bytes >= _min_heap_delta_bytes) {
-    shrink(shrink_bytes);
-  }
-}
-
-// Currently nothing to do.
-void CardGeneration::prepare_for_verify() {}
-
--- a/hotspot/src/share/vm/memory/generation.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/generation.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -584,57 +584,4 @@
   virtual CollectorCounters* counters() { return _gc_counters; }
 };
 
-// Class CardGeneration is a generation that is covered by a card table,
-// and uses a card-size block-offset array to implement block_start.
-
-// class BlockOffsetArray;
-// class BlockOffsetArrayContigSpace;
-class BlockOffsetSharedArray;
-
-class CardGeneration: public Generation {
-  friend class VMStructs;
- protected:
-  // This is shared with other generations.
-  GenRemSet* _rs;
-  // This is local to this generation.
-  BlockOffsetSharedArray* _bts;
-
-  // current shrinking effect: this damps shrinking when the heap gets empty.
-  size_t _shrink_factor;
-
-  size_t _min_heap_delta_bytes;   // Minimum amount to expand.
-
-  // Some statistics from before gc started.
-  // These are gathered in the gc_prologue (and should_collect)
-  // to control growing/shrinking policy in spite of promotions.
-  size_t _capacity_at_prologue;
-  size_t _used_at_prologue;
-
-  CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
-                 GenRemSet* remset);
-
- public:
-
-  // Attempt to expand the generation by "bytes".  Expand by at a
-  // minimum "expand_bytes".  Return true if some amount (not
-  // necessarily the full "bytes") was done.
-  virtual bool expand(size_t bytes, size_t expand_bytes);
-
-  // Shrink generation with specified size (returns false if unable to shrink)
-  virtual void shrink(size_t bytes) = 0;
-
-  virtual void compute_new_size();
-
-  virtual void clear_remembered_set();
-
-  virtual void invalidate_remembered_set();
-
-  virtual void prepare_for_verify();
-
-  // Grow generation with specified size (returns false if unable to grow)
-  virtual bool grow_by(size_t bytes) = 0;
-  // Grow generation to reserved size.
-  virtual bool grow_to_reserved() = 0;
-};
-
 #endif // SHARE_VM_MEMORY_GENERATION_HPP
--- a/hotspot/src/share/vm/memory/metaspace.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/metaspace.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -792,7 +792,8 @@
 Mutex* const SpaceManager::_expand_lock =
   new Mutex(SpaceManager::_expand_lock_rank,
             SpaceManager::_expand_lock_name,
-            Mutex::_allow_vm_block_flag);
+            Mutex::_allow_vm_block_flag,
+            Monitor::_safepoint_check_never);
 
 void VirtualSpaceNode::inc_container_count() {
   assert_lock_strong(SpaceManager::expand_lock());
@@ -3158,7 +3159,25 @@
     SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
     SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
 
-    // the min_misc_code_size estimate is based on MetaspaceShared::generate_vtable_methods()
+    // make sure SharedReadOnlySize and SharedReadWriteSize are not less than
+    // the minimum values.
+    if (SharedReadOnlySize < MetaspaceShared::min_ro_size){
+      report_out_of_shared_space(SharedReadOnly);
+    }
+
+    if (SharedReadWriteSize < MetaspaceShared::min_rw_size){
+      report_out_of_shared_space(SharedReadWrite);
+    }
+
+    // the min_misc_data_size and min_misc_code_size estimates are based on
+    // MetaspaceShared::generate_vtable_methods()
+    uint min_misc_data_size = align_size_up(
+      MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size * sizeof(void*), max_alignment);
+
+    if (SharedMiscDataSize < min_misc_data_size) {
+      report_out_of_shared_space(SharedMiscData);
+    }
+
     uintx min_misc_code_size = align_size_up(
       (MetaspaceShared::num_virtuals * MetaspaceShared::vtbl_list_size) *
         (sizeof(void*) + MetaspaceShared::vtbl_method_size) + MetaspaceShared::vtbl_common_code_size,
--- a/hotspot/src/share/vm/memory/metaspaceShared.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/metaspaceShared.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -969,7 +969,7 @@
 #endif
     // If -Xshare:on is specified, print out the error message and exit VM,
     // otherwise, set UseSharedSpaces to false and continue.
-    if (RequireSharedSpaces) {
+    if (RequireSharedSpaces || PrintSharedArchiveAndExit) {
       vm_exit_during_initialization("Unable to use shared archive.", "Failed map_region for using -Xshare:on.");
     } else {
       FLAG_SET_DEFAULT(UseSharedSpaces, false);
--- a/hotspot/src/share/vm/memory/metaspaceShared.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/metaspaceShared.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -70,6 +70,11 @@
   };
 
   enum {
+    min_ro_size = NOT_LP64(8*M) LP64_ONLY(9*M), // minimum ro and rw regions sizes based on dumping
+    min_rw_size = NOT_LP64(7*M) LP64_ONLY(12*M) // of a shared archive using the default classlist
+  };
+
+  enum {
     ro = 0,  // read-only shared space in the heap
     rw = 1,  // read-write shared space in the heap
     md = 2,  // miscellaneous data for initializing tables, etc.
--- a/hotspot/src/share/vm/memory/referenceProcessor.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/referenceProcessor.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -68,10 +68,10 @@
   _pending_list_uses_discovered_field = JDK_Version::current().pending_list_uses_discovered_field();
 }
 
-void ReferenceProcessor::enable_discovery(bool verify_disabled, bool check_no_refs) {
+void ReferenceProcessor::enable_discovery(bool check_no_refs) {
 #ifdef ASSERT
   // Verify that we're not currently discovering refs
-  assert(!verify_disabled || !_discovering_refs, "nested call?");
+  assert(!_discovering_refs, "nested call?");
 
   if (check_no_refs) {
     // Verify that the discovered lists are empty
@@ -963,52 +963,6 @@
   return total_list_count;
 }
 
-void ReferenceProcessor::clean_up_discovered_references() {
-  // loop over the lists
-  for (uint i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
-    if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
-      gclog_or_tty->print_cr(
-        "\nScrubbing %s discovered list of Null referents",
-        list_name(i));
-    }
-    clean_up_discovered_reflist(_discovered_refs[i]);
-  }
-}
-
-void ReferenceProcessor::clean_up_discovered_reflist(DiscoveredList& refs_list) {
-  assert(!discovery_is_atomic(), "Else why call this method?");
-  DiscoveredListIterator iter(refs_list, NULL, NULL);
-  while (iter.has_next()) {
-    iter.load_ptrs(DEBUG_ONLY(true /* allow_null_referent */));
-    oop next = java_lang_ref_Reference::next(iter.obj());
-    assert(next->is_oop_or_null(), err_msg("Expected an oop or NULL for next field at " PTR_FORMAT, p2i(next)));
-    // If referent has been cleared or Reference is not active,
-    // drop it.
-    if (iter.referent() == NULL || next != NULL) {
-      debug_only(
-        if (PrintGCDetails && TraceReferenceGC) {
-          gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
-            INTPTR_FORMAT " with next field: " INTPTR_FORMAT
-            " and referent: " INTPTR_FORMAT,
-            (void *)iter.obj(), (void *)next, (void *)iter.referent());
-        }
-      )
-      // Remove Reference object from list
-      iter.remove();
-      iter.move_to_next();
-    } else {
-      iter.next();
-    }
-  }
-  NOT_PRODUCT(
-    if (PrintGCDetails && TraceReferenceGC) {
-      gclog_or_tty->print(
-        " Removed %d Refs with NULL referents out of %d discovered Refs",
-        iter.removed(), iter.processed());
-    }
-  )
-}
-
 inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt) {
   uint id = 0;
   // Determine the queue index to use for this object.
--- a/hotspot/src/share/vm/memory/referenceProcessor.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/referenceProcessor.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -353,19 +353,6 @@
                                       GCTimer*           gc_timer,
                                       GCId               gc_id);
 
-  // Delete entries in the discovered lists that have
-  // either a null referent or are not active. Such
-  // Reference objects can result from the clearing
-  // or enqueueing of Reference objects concurrent
-  // with their discovery by a (concurrent) collector.
-  // For a definition of "active" see java.lang.ref.Reference;
-  // Refs are born active, become inactive when enqueued,
-  // and never become active again. The state of being
-  // active is encoded as follows: A Ref is active
-  // if and only if its "next" field is NULL.
-  void clean_up_discovered_references();
-  void clean_up_discovered_reflist(DiscoveredList& refs_list);
-
   // Returns the name of the discovered reference list
   // occupying the i / _num_q slot.
   const char* list_name(uint i);
@@ -439,7 +426,7 @@
   void      set_span(MemRegion span) { _span = span; }
 
   // start and stop weak ref discovery
-  void enable_discovery(bool verify_disabled, bool check_no_refs);
+  void enable_discovery(bool check_no_refs = true);
   void disable_discovery()  { _discovering_refs = false; }
   bool discovery_enabled()  { return _discovering_refs;  }
 
@@ -517,7 +504,7 @@
 
   ~NoRefDiscovery() {
     if (_was_discovering_refs) {
-      _rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
+      _rp->enable_discovery(false /*check_no_refs*/);
     }
   }
 };
--- a/hotspot/src/share/vm/memory/sharedHeap.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/sharedHeap.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -58,7 +58,6 @@
 SharedHeap::SharedHeap(CollectorPolicy* policy_) :
   CollectedHeap(),
   _collector_policy(policy_),
-  _rem_set(NULL),
   _strong_roots_scope(NULL),
   _strong_roots_parity(0),
   _process_strong_tasks(new SubTasksDone(SH_PS_NumElements)),
@@ -152,7 +151,7 @@
   }
 }
 
-Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false);
+Monitor* SharedHeap::StrongRootsScope::_lock = new Monitor(Mutex::leaf, "StrongRootsScope lock", false, Monitor::_safepoint_check_never);
 
 void SharedHeap::StrongRootsScope::mark_worker_done_with_threads(uint n_workers) {
   // The Thread work barrier is only needed by G1 Class Unloading.
--- a/hotspot/src/share/vm/memory/sharedHeap.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/sharedHeap.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,10 +114,6 @@
   // set the static pointer "_sh" to that instance.
   static SharedHeap* _sh;
 
-  // and the Gen Remembered Set, at least one good enough to scan the perm
-  // gen.
-  GenRemSet* _rem_set;
-
   // A gc policy, controls global gc resource issues
   CollectorPolicy *_collector_policy;
 
@@ -152,10 +148,6 @@
   // Initialization of ("weak") reference processing support
   virtual void ref_processing_init();
 
-  // This function returns the "GenRemSet" object that allows us to scan
-  // generations in a fully generational heap.
-  GenRemSet* rem_set() { return _rem_set; }
-
   // Iteration functions.
   void oop_iterate(ExtendedOopClosure* cl) = 0;
 
--- a/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,6 +28,7 @@
 #include "gc_implementation/shared/parGCAllocBuffer.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/blockOffsetTable.inline.hpp"
+#include "memory/cardGeneration.inline.hpp"
 #include "memory/generationSpec.hpp"
 #include "memory/genMarkSweep.hpp"
 #include "memory/genOopClosures.inline.hpp"
@@ -235,34 +236,6 @@
   return CardGeneration::expand(bytes, expand_bytes);
 }
 
-
-void TenuredGeneration::shrink(size_t bytes) {
-  assert_locked_or_safepoint(ExpandHeap_lock);
-  size_t size = ReservedSpace::page_align_size_down(bytes);
-  if (size > 0) {
-    shrink_by(size);
-  }
-}
-
-
-size_t TenuredGeneration::capacity() const {
-  return _the_space->capacity();
-}
-
-
-size_t TenuredGeneration::used() const {
-  return _the_space->used();
-}
-
-
-size_t TenuredGeneration::free() const {
-  return _the_space->free();
-}
-
-MemRegion TenuredGeneration::used_region() const {
-  return the_space()->used_region();
-}
-
 size_t TenuredGeneration::unsafe_max_alloc_nogc() const {
   return _the_space->free();
 }
@@ -271,74 +244,8 @@
   return _the_space->free() + _virtual_space.uncommitted_size();
 }
 
-bool TenuredGeneration::grow_by(size_t bytes) {
+void TenuredGeneration::assert_correct_size_change_locking() {
   assert_locked_or_safepoint(ExpandHeap_lock);
-  bool result = _virtual_space.expand_by(bytes);
-  if (result) {
-    size_t new_word_size =
-       heap_word_size(_virtual_space.committed_size());
-    MemRegion mr(_the_space->bottom(), new_word_size);
-    // Expand card table
-    Universe::heap()->barrier_set()->resize_covered_region(mr);
-    // Expand shared block offset array
-    _bts->resize(new_word_size);
-
-    // Fix for bug #4668531
-    if (ZapUnusedHeapArea) {
-      MemRegion mangle_region(_the_space->end(),
-      (HeapWord*)_virtual_space.high());
-      SpaceMangler::mangle_region(mangle_region);
-    }
-
-    // Expand space -- also expands space's BOT
-    // (which uses (part of) shared array above)
-    _the_space->set_end((HeapWord*)_virtual_space.high());
-
-    // update the space and generation capacity counters
-    update_counters();
-
-    if (Verbose && PrintGC) {
-      size_t new_mem_size = _virtual_space.committed_size();
-      size_t old_mem_size = new_mem_size - bytes;
-      gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
-                      SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                      name(), old_mem_size/K, bytes/K, new_mem_size/K);
-    }
-  }
-  return result;
-}
-
-
-bool TenuredGeneration::grow_to_reserved() {
-  assert_locked_or_safepoint(ExpandHeap_lock);
-  bool success = true;
-  const size_t remaining_bytes = _virtual_space.uncommitted_size();
-  if (remaining_bytes > 0) {
-    success = grow_by(remaining_bytes);
-    DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
-  }
-  return success;
-}
-
-void TenuredGeneration::shrink_by(size_t bytes) {
-  assert_locked_or_safepoint(ExpandHeap_lock);
-  // Shrink committed space
-  _virtual_space.shrink_by(bytes);
-  // Shrink space; this also shrinks the space's BOT
-  _the_space->set_end((HeapWord*) _virtual_space.high());
-  size_t new_word_size = heap_word_size(_the_space->capacity());
-  // Shrink the shared block offset array
-  _bts->resize(new_word_size);
-  MemRegion mr(_the_space->bottom(), new_word_size);
-  // Shrink the card table
-  Universe::heap()->barrier_set()->resize_covered_region(mr);
-
-  if (Verbose && PrintGC) {
-    size_t new_mem_size = _virtual_space.committed_size();
-    size_t old_mem_size = new_mem_size + bytes;
-    gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
-                  name(), old_mem_size/K, new_mem_size/K);
-  }
 }
 
 // Currently nothing to do.
@@ -348,27 +255,14 @@
   _the_space->object_iterate(blk);
 }
 
-void TenuredGeneration::space_iterate(SpaceClosure* blk,
-                                                 bool usedOnly) {
-  blk->do_space(_the_space);
-}
-
-void TenuredGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
-  blk->set_generation(this);
-  younger_refs_in_space_iterate(_the_space, blk);
-  blk->reset_generation();
-}
-
 void TenuredGeneration::save_marks() {
   _the_space->set_saved_mark();
 }
 
-
 void TenuredGeneration::reset_saved_marks() {
   _the_space->reset_saved_mark();
 }
 
-
 bool TenuredGeneration::no_allocs_since_save_marks() {
   return _the_space->saved_mark_at_top();
 }
@@ -387,28 +281,25 @@
 
 #undef TenuredGen_SINCE_SAVE_MARKS_ITERATE_DEFN
 
-
 void TenuredGeneration::gc_epilogue(bool full) {
-  _last_gc = WaterMark(the_space(), the_space()->top());
-
   // update the generation and space performance counters
   update_counters();
   if (ZapUnusedHeapArea) {
-    the_space()->check_mangled_unused_area_complete();
+    _the_space->check_mangled_unused_area_complete();
   }
 }
 
 void TenuredGeneration::record_spaces_top() {
   assert(ZapUnusedHeapArea, "Not mangling unused space");
-  the_space()->set_top_for_allocations();
+  _the_space->set_top_for_allocations();
 }
 
 void TenuredGeneration::verify() {
-  the_space()->verify();
+  _the_space->verify();
 }
 
 void TenuredGeneration::print_on(outputStream* st)  const {
   Generation::print_on(st);
   st->print("   the");
-  the_space()->print_on(st);
+  _the_space->print_on(st);
 }
--- a/hotspot/src/share/vm/memory/tenuredGeneration.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -28,7 +28,7 @@
 #include "gc_implementation/shared/cSpaceCounters.hpp"
 #include "gc_implementation/shared/gcStats.hpp"
 #include "gc_implementation/shared/generationCounters.hpp"
-#include "memory/generation.hpp"
+#include "memory/cardGeneration.hpp"
 #include "utilities/macros.hpp"
 
 // TenuredGeneration models the heap containing old (promoted/tenured) objects
@@ -42,27 +42,18 @@
   friend class VM_PopulateDumpSharedSpace;
 
  protected:
-  ContiguousSpace*  _the_space;       // actual space holding objects
-  WaterMark  _last_gc;                // watermark between objects allocated before
-                                      // and after last GC.
+  ContiguousSpace*  _the_space;       // Actual space holding objects
 
   GenerationCounters*   _gen_counters;
   CSpaceCounters*       _space_counters;
 
-  // Grow generation with specified size (returns false if unable to grow)
-  virtual bool grow_by(size_t bytes);
-  // Grow generation to reserved size.
-  virtual bool grow_to_reserved();
-  // Shrink generation with specified size (returns false if unable to shrink)
-  void shrink_by(size_t bytes);
-
   // Allocation failure
   virtual bool expand(size_t bytes, size_t expand_bytes);
-  void shrink(size_t bytes);
 
   // Accessing spaces
-  ContiguousSpace* the_space() const { return _the_space; }
+  ContiguousSpace* space() const { return _the_space; }
 
+  void assert_correct_size_change_locking();
  public:
   TenuredGeneration(ReservedSpace rs, size_t initial_byte_size,
                                int level, GenRemSet* remset);
@@ -81,33 +72,15 @@
     return !ScavengeBeforeFullGC;
   }
 
-  inline bool is_in(const void* p) const;
-
-  // Space enquiries
-  size_t capacity() const;
-  size_t used() const;
-  size_t free() const;
-
-  MemRegion used_region() const;
-
   size_t unsafe_max_alloc_nogc() const;
   size_t contiguous_available() const;
 
   // Iteration
   void object_iterate(ObjectClosure* blk);
-  void space_iterate(SpaceClosure* blk, bool usedOnly = false);
-
-  void younger_refs_iterate(OopsInGenClosure* blk);
-
-  inline CompactibleSpace* first_compaction_space() const;
 
   virtual inline HeapWord* allocate(size_t word_size, bool is_tlab);
   virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab);
 
-  // Accessing marks
-  inline WaterMark top_mark();
-  inline WaterMark bottom_mark();
-
 #define TenuredGen_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix)     \
   void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
   TenuredGen_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v)
--- a/hotspot/src/share/vm/memory/tenuredGeneration.inline.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/tenuredGeneration.inline.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -22,53 +22,35 @@
  *
  */
 
-#ifndef SHARE_VM_MEMORY_GENERATION_INLINE_HPP
-#define SHARE_VM_MEMORY_GENERATION_INLINE_HPP
+#ifndef SHARE_VM_MEMORY_TENUREDGENERATION_INLINE_HPP
+#define SHARE_VM_MEMORY_TENUREDGENERATION_INLINE_HPP
 
-#include "memory/genCollectedHeap.hpp"
 #include "memory/space.hpp"
 #include "memory/tenuredGeneration.hpp"
 
-bool TenuredGeneration::is_in(const void* p) const {
-  return the_space()->is_in(p);
-}
-
-
-WaterMark TenuredGeneration::top_mark() {
-  return the_space()->top_mark();
-}
-
-CompactibleSpace*
-TenuredGeneration::first_compaction_space() const {
-  return the_space();
-}
-
 HeapWord* TenuredGeneration::allocate(size_t word_size,
                                                  bool is_tlab) {
   assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
-  return the_space()->allocate(word_size);
+  return _the_space->allocate(word_size);
 }
 
 HeapWord* TenuredGeneration::par_allocate(size_t word_size,
                                                      bool is_tlab) {
   assert(!is_tlab, "TenuredGeneration does not support TLAB allocation");
-  return the_space()->par_allocate(word_size);
-}
-
-WaterMark TenuredGeneration::bottom_mark() {
-  return the_space()->bottom_mark();
+  return _the_space->par_allocate(word_size);
 }
 
 size_t TenuredGeneration::block_size(const HeapWord* addr) const {
-  if (addr < the_space()->top()) return oop(addr)->size();
-  else {
-    assert(addr == the_space()->top(), "non-block head arg to block_size");
-    return the_space()->end() - the_space()->top();
+  if (addr < _the_space->top()) {
+    return oop(addr)->size();
+  } else {
+    assert(addr == _the_space->top(), "non-block head arg to block_size");
+    return _the_space->end() - _the_space->top();
   }
 }
 
 bool TenuredGeneration::block_is_obj(const HeapWord* addr) const {
-  return addr < the_space()->top();
+  return addr < _the_space  ->top();
 }
 
-#endif // SHARE_VM_MEMORY_GENERATION_INLINE_HPP
+#endif // SHARE_VM_MEMORY_TENUREDGENERATION_INLINE_HPP
--- a/hotspot/src/share/vm/memory/universe.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/universe.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -120,6 +120,7 @@
 oop Universe::_out_of_memory_error_class_metaspace    = NULL;
 oop Universe::_out_of_memory_error_array_size         = NULL;
 oop Universe::_out_of_memory_error_gc_overhead_limit  = NULL;
+oop Universe::_out_of_memory_error_realloc_objects    = NULL;
 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL;
 volatile jint Universe::_preallocated_out_of_memory_error_avail_count = 0;
 bool Universe::_verify_in_progress                    = false;
@@ -191,6 +192,7 @@
   f->do_oop((oop*)&_out_of_memory_error_class_metaspace);
   f->do_oop((oop*)&_out_of_memory_error_array_size);
   f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit);
+  f->do_oop((oop*)&_out_of_memory_error_realloc_objects);
     f->do_oop((oop*)&_preallocated_out_of_memory_error_array);
   f->do_oop((oop*)&_null_ptr_exception_instance);
   f->do_oop((oop*)&_arithmetic_exception_instance);
@@ -575,7 +577,8 @@
           (throwable() != Universe::_out_of_memory_error_metaspace)  &&
           (throwable() != Universe::_out_of_memory_error_class_metaspace)  &&
           (throwable() != Universe::_out_of_memory_error_array_size) &&
-          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit));
+          (throwable() != Universe::_out_of_memory_error_gc_overhead_limit) &&
+          (throwable() != Universe::_out_of_memory_error_realloc_objects));
 }
 
 
@@ -1039,6 +1042,7 @@
     Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false);
     Universe::_out_of_memory_error_gc_overhead_limit =
       k_h->allocate_instance(CHECK_false);
+    Universe::_out_of_memory_error_realloc_objects = k_h->allocate_instance(CHECK_false);
 
     // Setup preallocated NullPointerException
     // (this is currently used for a cheap & dirty solution in compiler exception handling)
@@ -1078,6 +1082,9 @@
     msg = java_lang_String::create_from_str("GC overhead limit exceeded", CHECK_false);
     java_lang_Throwable::set_message(Universe::_out_of_memory_error_gc_overhead_limit, msg());
 
+    msg = java_lang_String::create_from_str("Java heap space: failed reallocation of scalar replaced objects", CHECK_false);
+    java_lang_Throwable::set_message(Universe::_out_of_memory_error_realloc_objects, msg());
+
     msg = java_lang_String::create_from_str("/ by zero", CHECK_false);
     java_lang_Throwable::set_message(Universe::_arithmetic_exception_instance, msg());
 
--- a/hotspot/src/share/vm/memory/universe.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/memory/universe.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -157,6 +157,7 @@
   static oop          _out_of_memory_error_class_metaspace;
   static oop          _out_of_memory_error_array_size;
   static oop          _out_of_memory_error_gc_overhead_limit;
+  static oop          _out_of_memory_error_realloc_objects;
 
   static Array<int>*       _the_empty_int_array;    // Canonicalized int array
   static Array<u2>*        _the_empty_short_array;  // Canonicalized short array
@@ -328,6 +329,7 @@
   static oop out_of_memory_error_class_metaspace()    { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace);   }
   static oop out_of_memory_error_array_size()         { return gen_out_of_memory_error(_out_of_memory_error_array_size); }
   static oop out_of_memory_error_gc_overhead_limit()  { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit);  }
+  static oop out_of_memory_error_realloc_objects()    { return gen_out_of_memory_error(_out_of_memory_error_realloc_objects);  }
 
   // Accessors needed for fast allocation
   static Klass** boolArrayKlassObj_addr()           { return &_boolArrayKlassObj;   }
--- a/hotspot/src/share/vm/oops/cpCache.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/oops/cpCache.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -287,9 +287,13 @@
   // the lock, so that when the losing writer returns, he can use the linked
   // cache entry.
 
-  // Use the lock from the metaspace for this, which cannot stop for safepoint.
-  Mutex* metaspace_lock = cpool->pool_holder()->class_loader_data()->metaspace_lock();
-  MutexLockerEx ml(metaspace_lock,  Mutex::_no_safepoint_check_flag);
+  objArrayHandle resolved_references = cpool->resolved_references();
+  // Use the resolved_references() lock for this cpCache entry.
+  // resolved_references are created for all classes with Invokedynamic, MethodHandle
+  // or MethodType constant pool cache entries.
+  assert(resolved_references() != NULL,
+         "a resolved_references array should have been created for this class");
+  ObjectLocker ol(resolved_references, Thread::current());
   if (!is_f1_null()) {
     return;
   }
@@ -336,7 +340,6 @@
   // This allows us to create fewer Methods, while keeping type safety.
   //
 
-  objArrayHandle resolved_references = cpool->resolved_references();
   // Store appendix, if any.
   if (has_appendix) {
     const int appendix_index = f2_as_index() + _indy_resolved_references_appendix_offset;
--- a/hotspot/src/share/vm/opto/callnode.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/opto/callnode.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -658,7 +658,7 @@
 
 void CallNode::dump_spec(outputStream *st) const {
   st->print(" ");
-  tf()->dump_on(st);
+  if (tf() != NULL)  tf()->dump_on(st);
   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
   if (jvms() != NULL)  jvms()->dump_spec(st);
 }
--- a/hotspot/src/share/vm/opto/castnode.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/opto/castnode.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -104,7 +104,8 @@
   // Try to improve the type of the CastII if we recognize a CmpI/If
   // pattern.
   if (_carry_dependency) {
-    if (in(0) != NULL && (in(0)->is_IfFalse() || in(0)->is_IfTrue())) {
+    if (in(0) != NULL && in(0)->in(0) != NULL && in(0)->in(0)->is_If()) {
+      assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj");
       Node* proj = in(0);
       if (proj->in(0)->in(1)->is_Bool()) {
         Node* b = proj->in(0)->in(1);
--- a/hotspot/src/share/vm/opto/ifnode.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/opto/ifnode.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -820,6 +820,11 @@
 
 static IfNode* idealize_test(PhaseGVN* phase, IfNode* iff);
 
+struct RangeCheck {
+  Node* ctl;
+  jint off;
+};
+
 //------------------------------Ideal------------------------------------------
 // Return a node which is more "ideal" than the current node.  Strip out
 // control copies
@@ -861,83 +866,141 @@
   jint offset1;
   int flip1 = is_range_check(range1, index1, offset1);
   if( flip1 ) {
-    Node *first_prev_dom = NULL;
-
     // Try to remove extra range checks.  All 'up_one_dom' gives up at merges
     // so all checks we inspect post-dominate the top-most check we find.
     // If we are going to fail the current check and we reach the top check
     // then we are guaranteed to fail, so just start interpreting there.
-    // We 'expand' the top 2 range checks to include all post-dominating
+    // We 'expand' the top 3 range checks to include all post-dominating
     // checks.
 
-    // The top 2 range checks seen
-    Node *prev_chk1 = NULL;
-    Node *prev_chk2 = NULL;
+    // The top 3 range checks seen
+    const int NRC =3;
+    RangeCheck prev_checks[NRC];
+    int nb_checks = 0;
+
     // Low and high offsets seen so far
     jint off_lo = offset1;
     jint off_hi = offset1;
 
-    // Scan for the top 2 checks and collect range of offsets
-    for( int dist = 0; dist < 999; dist++ ) { // Range-Check scan limit
-      if( dom->Opcode() == Op_If &&  // Not same opcode?
-          prev_dom->in(0) == dom ) { // One path of test does dominate?
-        if( dom == this ) return NULL; // dead loop
+    bool found_immediate_dominator = false;
+
+    // Scan for the top checks and collect range of offsets
+    for (int dist = 0; dist < 999; dist++) { // Range-Check scan limit
+      if (dom->Opcode() == Op_If &&  // Not same opcode?
+          prev_dom->in(0) == dom) { // One path of test does dominate?
+        if (dom == this) return NULL; // dead loop
         // See if this is a range check
         Node *index2, *range2;
         jint offset2;
         int flip2 = dom->as_If()->is_range_check(range2, index2, offset2);
         // See if this is a _matching_ range check, checking against
         // the same array bounds.
-        if( flip2 == flip1 && range2 == range1 && index2 == index1 &&
-            dom->outcnt() == 2 ) {
+        if (flip2 == flip1 && range2 == range1 && index2 == index1 &&
+            dom->outcnt() == 2) {
+          if (nb_checks == 0 && dom->in(1) == in(1)) {
+            // Found an immediately dominating test at the same offset.
+            // This kind of back-to-back test can be eliminated locally,
+            // and there is no need to search further for dominating tests.
+            assert(offset2 == offset1, "Same test but different offsets");
+            found_immediate_dominator = true;
+            break;
+          }
           // Gather expanded bounds
           off_lo = MIN2(off_lo,offset2);
           off_hi = MAX2(off_hi,offset2);
-          // Record top 2 range checks
-          prev_chk2 = prev_chk1;
-          prev_chk1 = prev_dom;
-          // If we match the test exactly, then the top test covers
-          // both our lower and upper bounds.
-          if( dom->in(1) == in(1) )
-            prev_chk2 = prev_chk1;
+          // Record top NRC range checks
+          prev_checks[nb_checks%NRC].ctl = prev_dom;
+          prev_checks[nb_checks%NRC].off = offset2;
+          nb_checks++;
         }
       }
       prev_dom = dom;
-      dom = up_one_dom( dom );
-      if( !dom ) break;
+      dom = up_one_dom(dom);
+      if (!dom) break;
     }
 
+    if (!found_immediate_dominator) {
+      // Attempt to widen the dominating range check to cover some later
+      // ones.  Since range checks "fail" by uncommon-trapping to the
+      // interpreter, widening a check can make us speculatively enter
+      // the interpreter.  If we see range-check deopt's, do not widen!
+      if (!phase->C->allow_range_check_smearing())  return NULL;
 
-    // Attempt to widen the dominating range check to cover some later
-    // ones.  Since range checks "fail" by uncommon-trapping to the
-    // interpreter, widening a check can make us speculative enter the
-    // interpreter.  If we see range-check deopt's, do not widen!
-    if (!phase->C->allow_range_check_smearing())  return NULL;
-
-    // Constant indices only need to check the upper bound.
-    // Non-constance indices must check both low and high.
-    if( index1 ) {
-      // Didn't find 2 prior covering checks, so cannot remove anything.
-      if( !prev_chk2 ) return NULL;
-      // 'Widen' the offsets of the 1st and 2nd covering check
-      adjust_check( prev_chk1, range1, index1, flip1, off_lo, igvn );
-      // Do not call adjust_check twice on the same projection
-      // as the first call may have transformed the BoolNode to a ConI
-      if( prev_chk1 != prev_chk2 ) {
-        adjust_check( prev_chk2, range1, index1, flip1, off_hi, igvn );
+      // Didn't find prior covering check, so cannot remove anything.
+      if (nb_checks == 0) {
+        return NULL;
       }
-      // Test is now covered by prior checks, dominate it out
-      prev_dom = prev_chk2;
-    } else {
-      // Didn't find prior covering check, so cannot remove anything.
-      if( !prev_chk1 ) return NULL;
-      // 'Widen' the offset of the 1st and only covering check
-      adjust_check( prev_chk1, range1, index1, flip1, off_hi, igvn );
-      // Test is now covered by prior checks, dominate it out
-      prev_dom = prev_chk1;
+      // Constant indices only need to check the upper bound.
+      // Non-constant indices must check both low and high.
+      int chk0 = (nb_checks - 1) % NRC;
+      if (index1) {
+        if (nb_checks == 1) {
+          return NULL;
+        } else {
+          // If the top range check's constant is the min or max of
+          // all constants we widen the next one to cover the whole
+          // range of constants.
+          RangeCheck rc0 = prev_checks[chk0];
+          int chk1 = (nb_checks - 2) % NRC;
+          RangeCheck rc1 = prev_checks[chk1];
+          if (rc0.off == off_lo) {
+            adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
+            prev_dom = rc1.ctl;
+          } else if (rc0.off == off_hi) {
+            adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
+            prev_dom = rc1.ctl;
+          } else {
+            // If the top test's constant is not the min or max of all
+            // constants, we need 3 range checks. We must leave the
+            // top test unchanged because widening it would allow the
+            // accesses it protects to successfully read/write out of
+            // bounds.
+            if (nb_checks == 2) {
+              return NULL;
+            }
+            int chk2 = (nb_checks - 3) % NRC;
+            RangeCheck rc2 = prev_checks[chk2];
+            // The top range check a+i covers interval: -a <= i < length-a
+            // The second range check b+i covers interval: -b <= i < length-b
+            if (rc1.off <= rc0.off) {
+              // if b <= a, we change the second range check to:
+              // -min_of_all_constants <= i < length-min_of_all_constants
+              // Together top and second range checks now cover:
+              // -min_of_all_constants <= i < length-a
+              // which is more restrictive than -b <= i < length-b:
+              // -b <= -min_of_all_constants <= i < length-a <= length-b
+              // The third check is then changed to:
+              // -max_of_all_constants <= i < length-max_of_all_constants
+              // so 2nd and 3rd checks restrict allowed values of i to:
+              // -min_of_all_constants <= i < length-max_of_all_constants
+              adjust_check(rc1.ctl, range1, index1, flip1, off_lo, igvn);
+              adjust_check(rc2.ctl, range1, index1, flip1, off_hi, igvn);
+            } else {
+              // if b > a, we change the second range check to:
+              // -max_of_all_constants <= i < length-max_of_all_constants
+              // Together top and second range checks now cover:
+              // -a <= i < length-max_of_all_constants
+              // which is more restrictive than -b <= i < length-b:
+              // -b < -a <= i < length-max_of_all_constants <= length-b
+              // The third check is then changed to:
+              // -max_of_all_constants <= i < length-max_of_all_constants
+              // so 2nd and 3rd checks restrict allowed values of i to:
+              // -min_of_all_constants <= i < length-max_of_all_constants
+              adjust_check(rc1.ctl, range1, index1, flip1, off_hi, igvn);
+              adjust_check(rc2.ctl, range1, index1, flip1, off_lo, igvn);
+            }
+            prev_dom = rc2.ctl;
+          }
+        }
+      } else {
+        RangeCheck rc0 = prev_checks[chk0];
+        // 'Widen' the offset of the 1st and only covering check
+        adjust_check(rc0.ctl, range1, index1, flip1, off_hi, igvn);
+        // Test is now covered by prior checks, dominate it out
+        prev_dom = rc0.ctl;
+      }
     }
 
-
   } else {                      // Scan for an equivalent test
 
     Node *cmp;
@@ -1019,7 +1082,7 @@
   // for lower and upper bounds.
   ProjNode* unc_proj = proj_out(1 - prev_dom->as_Proj()->_con)->as_Proj();
   if (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
-    prev_dom = idom;
+   prev_dom = idom;
 
   // Now walk the current IfNode's projections.
   // Loop ends when 'this' has no more uses.
--- a/hotspot/src/share/vm/opto/lcm.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/opto/lcm.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -417,8 +417,15 @@
   for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
     old_tst->last_out(i2)->set_req(0, nul_chk);
   // Clean-up any dead code
-  for (uint i3 = 0; i3 < old_tst->req(); i3++)
+  for (uint i3 = 0; i3 < old_tst->req(); i3++) {
+    Node* in = old_tst->in(i3);
     old_tst->set_req(i3, NULL);
+    if (in->outcnt() == 0) {
+      // Remove dead input node
+      in->disconnect_inputs(NULL, C);
+      block->find_remove(in);
+    }
+  }
 
   latency_from_uses(nul_chk);
   latency_from_uses(best);
--- a/hotspot/src/share/vm/opto/loopnode.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/opto/loopnode.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1147,7 +1147,7 @@
 // Dump special per-node info
 #ifndef PRODUCT
 void CountedLoopEndNode::dump_spec(outputStream *st) const {
-  if( in(TestValue)->is_Bool() ) {
+  if( in(TestValue) != NULL && in(TestValue)->is_Bool() ) {
     BoolTest bt( test_trip()); // Added this for g++.
 
     st->print("[");
--- a/hotspot/src/share/vm/opto/loopopts.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/opto/loopopts.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -241,8 +241,13 @@
   ProjNode* dp_proj  = dp->as_Proj();
   ProjNode* unc_proj = iff->as_If()->proj_out(1 - dp_proj->_con)->as_Proj();
   if (exclude_loop_predicate &&
-      unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate))
+      (unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_predicate) ||
+       unc_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check))) {
+    // If this is a range check (IfNode::is_range_check), do not
+    // reorder because Compile::allow_range_check_smearing might have
+    // changed the check.
     return; // Let IGVN transformation change control dependence.
+  }
 
   IdealLoopTree *old_loop = get_loop(dp);
 
@@ -898,23 +903,23 @@
   int n_op = n->Opcode();
 
   // Check for an IF being dominated by another IF same test
-  if( n_op == Op_If ) {
+  if (n_op == Op_If) {
     Node *bol = n->in(1);
     uint max = bol->outcnt();
     // Check for same test used more than once?
-    if( n_op == Op_If && max > 1 && bol->is_Bool() ) {
+    if (max > 1 && bol->is_Bool()) {
       // Search up IDOMs to see if this IF is dominated.
       Node *cutoff = get_ctrl(bol);
 
       // Now search up IDOMs till cutoff, looking for a dominating test
       Node *prevdom = n;
       Node *dom = idom(prevdom);
-      while( dom != cutoff ) {
-        if( dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom ) {
+      while (dom != cutoff) {
+        if (dom->req() > 1 && dom->in(1) == bol && prevdom->in(0) == dom) {
           // Replace the dominated test with an obvious true or false.
           // Place it on the IGVN worklist for later cleanup.
           C->set_major_progress();
-          dominated_by( prevdom, n, false, true );
+          dominated_by(prevdom, n, false, true);
 #ifndef PRODUCT
           if( VerifyLoopOptimizations ) verify();
 #endif
--- a/hotspot/src/share/vm/opto/machnode.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/opto/machnode.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -473,8 +473,13 @@
 // Print any per-operand special info
 void MachNode::dump_spec(outputStream *st) const {
   uint cnt = num_opnds();
-  for( uint i=0; i<cnt; i++ )
-    _opnds[i]->dump_spec(st);
+  for( uint i=0; i<cnt; i++ ) {
+    if (_opnds[i] != NULL) {
+      _opnds[i]->dump_spec(st);
+    } else {
+      st->print(" _");
+    }
+  }
   const TypePtr *t = adr_type();
   if( t ) {
     Compile* C = Compile::current();
@@ -493,7 +498,11 @@
 //=============================================================================
 #ifndef PRODUCT
 void MachTypeNode::dump_spec(outputStream *st) const {
-  _bottom_type->dump_on(st);
+  if (_bottom_type != NULL) {
+    _bottom_type->dump_on(st);
+  } else {
+    st->print(" NULL");
+  }
 }
 #endif
 
@@ -635,7 +644,7 @@
 #ifndef PRODUCT
 void MachCallNode::dump_spec(outputStream *st) const {
   st->print("# ");
-  tf()->dump_on(st);
+  if (tf() != NULL)  tf()->dump_on(st);
   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
   if (jvms() != NULL)  jvms()->dump_spec(st);
 }
--- a/hotspot/src/share/vm/opto/macro.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/opto/macro.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -971,7 +971,11 @@
 }
 
 bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) {
-  if (!EliminateAllocations || !alloc->_is_non_escaping) {
+  // Don't do scalar replacement if the frame can be popped by JVMTI:
+  // if reallocation fails during deoptimization we'll pop all
+  // interpreter frames for this compiled frame and that won't play
+  // nice with JVMTI popframe.
+  if (!EliminateAllocations || JvmtiExport::can_pop_frame() || !alloc->_is_non_escaping) {
     return false;
   }
   Node* klass = alloc->in(AllocateNode::KlassNode);
--- a/hotspot/src/share/vm/opto/memnode.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/opto/memnode.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -4370,7 +4370,7 @@
   st->print(" {");
   Node* base_mem = base_memory();
   for( uint i = Compile::AliasIdxRaw; i < req(); i++ ) {
-    Node* mem = memory_at(i);
+    Node* mem = (in(i) != NULL) ? memory_at(i) : base_mem;
     if (mem == base_mem) { st->print(" -"); continue; }
     st->print( " N%d:", mem->_idx );
     Compile::current()->get_adr_type(i)->dump_on(st);
--- a/hotspot/src/share/vm/prims/jvm.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/prims/jvm.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -700,23 +700,7 @@
 
 // Returns a class loaded by the bootstrap class loader; or null
 // if not found.  ClassNotFoundException is not thrown.
-//
-// Rationale behind JVM_FindClassFromBootLoader
-// a> JVM_FindClassFromClassLoader was never exported in the export tables.
-// b> because of (a) java.dll has a direct dependecy on the  unexported
-//    private symbol "_JVM_FindClassFromClassLoader@20".
-// c> the launcher cannot use the private symbol as it dynamically opens
-//    the entry point, so if something changes, the launcher will fail
-//    unexpectedly at runtime, it is safest for the launcher to dlopen a
-//    stable exported interface.
-// d> re-exporting JVM_FindClassFromClassLoader as public, will cause its
-//    signature to change from _JVM_FindClassFromClassLoader@20 to
-//    JVM_FindClassFromClassLoader and will not be backward compatible
-//    with older JDKs.
-// Thus a public/stable exported entry point is the right solution,
-// public here means public in linker semantics, and is exported only
-// to the JDK, and is not intended to be a public API.
-
+// FindClassFromBootLoader is exported to the launcher for windows.
 JVM_ENTRY(jclass, JVM_FindClassFromBootLoader(JNIEnv* env,
                                               const char* name))
   JVMWrapper2("JVM_FindClassFromBootLoader %s", name);
@@ -740,33 +724,6 @@
   return (jclass) JNIHandles::make_local(env, k->java_mirror());
 JVM_END
 
-// Not used; JVM_FindClassFromCaller replaces this.
-JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name,
-                                               jboolean init, jobject loader,
-                                               jboolean throwError))
-  JVMWrapper3("JVM_FindClassFromClassLoader %s throw %s", name,
-               throwError ? "error" : "exception");
-  // Java libraries should ensure that name is never null...
-  if (name == NULL || (int)strlen(name) > Symbol::max_length()) {
-    // It's impossible to create this class;  the name cannot fit
-    // into the constant pool.
-    if (throwError) {
-      THROW_MSG_0(vmSymbols::java_lang_NoClassDefFoundError(), name);
-    } else {
-      THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), name);
-    }
-  }
-  TempNewSymbol h_name = SymbolTable::new_symbol(name, CHECK_NULL);
-  Handle h_loader(THREAD, JNIHandles::resolve(loader));
-  jclass result = find_class_from_class_loader(env, h_name, init, h_loader,
-                                               Handle(), throwError, THREAD);
-
-  if (TraceClassResolution && result != NULL) {
-    trace_class_resolution(java_lang_Class::as_Klass(JNIHandles::resolve_non_null(result)));
-  }
-  return result;
-JVM_END
-
 // Find a class with this name in this loader, using the caller's protection domain.
 JVM_ENTRY(jclass, JVM_FindClassFromCaller(JNIEnv* env, const char* name,
                                           jboolean init, jobject loader,
--- a/hotspot/src/share/vm/prims/jvm.h	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/prims/jvm.h	Mon Jan 05 13:30:23 2015 -0800
@@ -335,15 +335,6 @@
 JVM_FindPrimitiveClass(JNIEnv *env, const char *utf);
 
 /*
- * Find a class from a given class loader. Throw ClassNotFoundException
- * or NoClassDefFoundError depending on the value of the last
- * argument.
- */
-JNIEXPORT jclass JNICALL
-JVM_FindClassFromClassLoader(JNIEnv *env, const char *name, jboolean init,
-                             jobject loader, jboolean throwError);
-
-/*
  * Find a class from a boot class loader. Returns NULL if class not found.
  */
 JNIEXPORT jclass JNICALL
--- a/hotspot/src/share/vm/prims/whitebox.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/prims/whitebox.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -69,6 +69,14 @@
 bool WhiteBox::_used = false;
 volatile bool WhiteBox::compilation_locked = false;
 
+class VM_WhiteBoxOperation : public VM_Operation {
+ public:
+  VM_WhiteBoxOperation()                         { }
+  VMOp_Type type()                  const        { return VMOp_WhiteBoxOperation; }
+  bool allow_nested_vm_operations() const        { return true; }
+};
+
+
 WB_ENTRY(jlong, WB_GetObjectAddress(JNIEnv* env, jobject o, jobject obj))
   return (jlong)(void*)JNIHandles::resolve(obj);
 WB_END
@@ -404,6 +412,43 @@
   return env->FromReflectedMethod(method);
 }
 
+// Deoptimizes all compiled frames and makes nmethods not entrant if it's requested
+class VM_WhiteBoxDeoptimizeFrames : public VM_WhiteBoxOperation {
+ private:
+  int _result;
+  const bool _make_not_entrant;
+ public:
+  VM_WhiteBoxDeoptimizeFrames(bool make_not_entrant) :
+        _result(0), _make_not_entrant(make_not_entrant) { }
+  int  result() const { return _result; }
+
+  void doit() {
+    for (JavaThread* t = Threads::first(); t != NULL; t = t->next()) {
+      if (t->has_last_Java_frame()) {
+        for (StackFrameStream fst(t, UseBiasedLocking); !fst.is_done(); fst.next()) {
+          frame* f = fst.current();
+          if (f->can_be_deoptimized() && !f->is_deoptimized_frame()) {
+            RegisterMap* reg_map = fst.register_map();
+            Deoptimization::deoptimize(t, *f, reg_map);
+            if (_make_not_entrant) {
+                nmethod* nm = CodeCache::find_nmethod(f->pc());
+                assert(nm != NULL, "sanity check");
+                nm->make_not_entrant();
+            }
+            ++_result;
+          }
+        }
+      }
+    }
+  }
+};
+
+WB_ENTRY(jint, WB_DeoptimizeFrames(JNIEnv* env, jobject o, jboolean make_not_entrant))
+  VM_WhiteBoxDeoptimizeFrames op(make_not_entrant == JNI_TRUE);
+  VMThread::execute(&op);
+  return op.result();
+WB_END
+
 WB_ENTRY(void, WB_DeoptimizeAll(JNIEnv* env, jobject o))
   MutexLockerEx mu(Compile_lock);
   CodeCache::mark_all_nmethods_for_deoptimization();
@@ -526,13 +571,6 @@
   return (mh->queued_for_compilation() || nm != NULL);
 WB_END
 
-class VM_WhiteBoxOperation : public VM_Operation {
- public:
-  VM_WhiteBoxOperation()                         { }
-  VMOp_Type type()                  const        { return VMOp_WhiteBoxOperation; }
-  bool allow_nested_vm_operations() const        { return true; }
-};
-
 class AlwaysFalseClosure : public BoolObjectClosure {
  public:
   bool do_object_b(oop p) { return false; }
@@ -761,7 +799,6 @@
   }
 WB_END
 
-
 WB_ENTRY(void, WB_LockCompilation(JNIEnv* env, jobject o, jlong timeout))
   WhiteBox::compilation_locked = true;
 WB_END
@@ -1078,6 +1115,14 @@
   return (jlong) MetaspaceGC::capacity_until_GC();
 WB_END
 
+WB_ENTRY(void, WB_AssertMatchingSafepointCalls(JNIEnv* env, jobject o, jboolean mutexSafepointValue, jboolean attemptedNoSafepointValue))
+  Monitor::SafepointCheckRequired sfpt_check_required = mutexSafepointValue ?
+                                           Monitor::_safepoint_check_always :
+                                           Monitor::_safepoint_check_never;
+  MutexLockerEx ml(new Mutex(Mutex::leaf, "SFPT_Test_lock", true, sfpt_check_required),
+                   attemptedNoSafepointValue == JNI_TRUE);
+WB_END
+
 //Some convenience methods to deal with objects from java
 int WhiteBox::offset_for_field(const char* field_name, oop object,
     Symbol* signature_symbol) {
@@ -1201,6 +1246,7 @@
   {CC"NMTChangeTrackingLevel", CC"()Z",               (void*)&WB_NMTChangeTrackingLevel},
   {CC"NMTGetHashSize",      CC"()I",                  (void*)&WB_NMTGetHashSize     },
 #endif // INCLUDE_NMT
+  {CC"deoptimizeFrames",   CC"(Z)I",                  (void*)&WB_DeoptimizeFrames  },
   {CC"deoptimizeAll",      CC"()V",                   (void*)&WB_DeoptimizeAll     },
   {CC"deoptimizeMethod",   CC"(Ljava/lang/reflect/Executable;Z)I",
                                                       (void*)&WB_DeoptimizeMethod  },
@@ -1274,6 +1320,7 @@
   {CC"getCodeBlob",        CC"(J)[Ljava/lang/Object;",(void*)&WB_GetCodeBlob        },
   {CC"getThreadStackSize", CC"()J",                   (void*)&WB_GetThreadStackSize },
   {CC"getThreadRemainingStackSize", CC"()J",          (void*)&WB_GetThreadRemainingStackSize },
+  {CC"assertMatchingSafepointCalls", CC"(ZZ)V",       (void*)&WB_AssertMatchingSafepointCalls },
 };
 
 #undef CC
--- a/hotspot/src/share/vm/prims/whitebox.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/prims/whitebox.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -74,7 +74,7 @@
   static JavaThread* create_sweeper_thread(TRAPS);
   static int get_blob_type(const CodeBlob* code);
   static CodeHeap* get_code_heap(int blob_type);
-  static CodeBlob* allocate_code_blob(int blob_type, int size);
+  static CodeBlob* allocate_code_blob(int size, int blob_type);
   static int array_bytes_to_length(size_t bytes);
   static void register_methods(JNIEnv* env, jclass wbclass, JavaThread* thread,
     JNINativeMethod* method_array, int method_count);
--- a/hotspot/src/share/vm/runtime/arguments.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/runtime/arguments.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -125,8 +125,8 @@
 char* Arguments::_meta_index_dir = NULL;
 char* Arguments::_ext_dirs = NULL;
 
-// Check if head of 'option' matches 'name', and sets 'tail' remaining part of option string
-
+// Check if head of 'option' matches 'name', and sets 'tail' to the remaining
+// part of the option string.
 static bool match_option(const JavaVMOption *option, const char* name,
                          const char** tail) {
   int len = (int)strlen(name);
@@ -138,6 +138,32 @@
   }
 }
 
+// Check if 'option' matches 'name'. No "tail" is allowed.
+static bool match_option(const JavaVMOption *option, const char* name) {
+  const char* tail = NULL;
+  bool result = match_option(option, name, &tail);
+  if (tail != NULL && *tail == '\0') {
+    return result;
+  } else {
+    return false;
+  }
+}
+
+// Return true if any of the strings in null-terminated array 'names' matches.
+// If tail_allowed is true, then the tail must begin with a colon; otherwise,
+// the option must match exactly.
+static bool match_option(const JavaVMOption* option, const char** names, const char** tail,
+  bool tail_allowed) {
+  for (/* empty */; *names != NULL; ++names) {
+    if (match_option(option, *names, tail)) {
+      if (**tail == '\0' || tail_allowed && **tail == ':') {
+        return true;
+      }
+    }
+  }
+  return false;
+}
+
 static void logOption(const char* opt) {
   if (PrintVMOptions) {
     jio_fprintf(defaultStream::output_stream(), "VM option '%s'\n", opt);
@@ -899,9 +925,9 @@
                     "Warning: support for %s was removed in %s\n",
                     fuzzy_matched->_name,
                     version);
-      }
     }
   }
+  }
 
   // allow for commandline "commenting out" options like -XX:#+Verbose
   return arg[0] == '#';
@@ -1356,41 +1382,24 @@
   if (FLAG_IS_DEFAULT(SurvivorRatio) && MaxTenuringThreshold == 0) {
     FLAG_SET_ERGO(uintx, SurvivorRatio, MAX2((uintx)1024, SurvivorRatio));
   }
-  // If OldPLABSize is set and CMSParPromoteBlocksToClaim is not,
-  // set CMSParPromoteBlocksToClaim equal to OldPLABSize.
-  // This is done in order to make ParNew+CMS configuration to work
-  // with YoungPLABSize and OldPLABSize options.
-  // See CR 6362902.
-  if (!FLAG_IS_DEFAULT(OldPLABSize)) {
-    if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
-      // OldPLABSize is not the default value but CMSParPromoteBlocksToClaim
-      // is.  In this situation let CMSParPromoteBlocksToClaim follow
-      // the value (either from the command line or ergonomics) of
-      // OldPLABSize.  Following OldPLABSize is an ergonomics decision.
-      FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, OldPLABSize);
+
+  // OldPLABSize is interpreted in CMS as not the size of the PLAB in words,
+  // but rather the number of free blocks of a given size that are used when
+  // replenishing the local per-worker free list caches.
+  if (FLAG_IS_DEFAULT(OldPLABSize)) {
+    if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
+      // OldPLAB sizing manually turned off: Use a larger default setting,
+      // unless it was manually specified. This is because a too-low value
+      // will slow down scavenges.
+      FLAG_SET_ERGO(uintx, OldPLABSize, CFLS_LAB::_default_static_old_plab_size); // default value before 6631166
     } else {
-      // OldPLABSize and CMSParPromoteBlocksToClaim are both set.
-      // CMSParPromoteBlocksToClaim is a collector-specific flag, so
-      // we'll let it to take precedence.
-      jio_fprintf(defaultStream::error_stream(),
-                  "Both OldPLABSize and CMSParPromoteBlocksToClaim"
-                  " options are specified for the CMS collector."
-                  " CMSParPromoteBlocksToClaim will take precedence.\n");
+      FLAG_SET_DEFAULT(OldPLABSize, CFLS_LAB::_default_dynamic_old_plab_size); // old CMSParPromoteBlocksToClaim default
     }
   }
-  if (!FLAG_IS_DEFAULT(ResizeOldPLAB) && !ResizeOldPLAB) {
-    // OldPLAB sizing manually turned off: Use a larger default setting,
-    // unless it was manually specified. This is because a too-low value
-    // will slow down scavenges.
-    if (FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim)) {
-      FLAG_SET_ERGO(uintx, CMSParPromoteBlocksToClaim, 50); // default value before 6631166
-    }
-  }
-  // Overwrite OldPLABSize which is the variable we will internally use everywhere.
-  FLAG_SET_ERGO(uintx, OldPLABSize, CMSParPromoteBlocksToClaim);
+
   // If either of the static initialization defaults have changed, note this
   // modification.
-  if (!FLAG_IS_DEFAULT(CMSParPromoteBlocksToClaim) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
+  if (!FLAG_IS_DEFAULT(OldPLABSize) || !FLAG_IS_DEFAULT(OldPLABWeight)) {
     CFLS_LAB::modify_initialization(OldPLABSize, OldPLABWeight);
   }
   if (PrintGCDetails && Verbose) {
@@ -2526,21 +2535,6 @@
   "-dsa", "-esa", "-disablesystemassertions", "-enablesystemassertions", 0
 };
 
-// Return true if any of the strings in null-terminated array 'names' matches.
-// If tail_allowed is true, then the tail must begin with a colon; otherwise,
-// the option must match exactly.
-static bool match_option(const JavaVMOption* option, const char** names, const char** tail,
-  bool tail_allowed) {
-  for (/* empty */; *names != NULL; ++names) {
-    if (match_option(option, *names, tail)) {
-      if (**tail == '\0' || tail_allowed && **tail == ':') {
-        return true;
-      }
-    }
-  }
-  return false;
-}
-
 bool Arguments::parse_uintx(const char* value,
                             uintx* uintx_arg,
                             uintx min_size) {
@@ -2782,16 +2776,16 @@
       }
 #endif // !INCLUDE_JVMTI
     // -Xnoclassgc
-    } else if (match_option(option, "-Xnoclassgc", &tail)) {
+    } else if (match_option(option, "-Xnoclassgc")) {
       FLAG_SET_CMDLINE(bool, ClassUnloading, false);
     // -Xconcgc
-    } else if (match_option(option, "-Xconcgc", &tail)) {
+    } else if (match_option(option, "-Xconcgc")) {
       FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, true);
     // -Xnoconcgc
-    } else if (match_option(option, "-Xnoconcgc", &tail)) {
+    } else if (match_option(option, "-Xnoconcgc")) {
       FLAG_SET_CMDLINE(bool, UseConcMarkSweepGC, false);
     // -Xbatch
-    } else if (match_option(option, "-Xbatch", &tail)) {
+    } else if (match_option(option, "-Xbatch")) {
       FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
     // -Xmn for compatibility with other JVM vendors
     } else if (match_option(option, "-Xmn", &tail)) {
@@ -2936,28 +2930,28 @@
         }
         FLAG_SET_CMDLINE(uintx, IncreaseFirstTierCompileThresholdAt, (uintx)uint_IncreaseFirstTierCompileThresholdAt);
     // -green
-    } else if (match_option(option, "-green", &tail)) {
+    } else if (match_option(option, "-green")) {
       jio_fprintf(defaultStream::error_stream(),
                   "Green threads support not available\n");
           return JNI_EINVAL;
     // -native
-    } else if (match_option(option, "-native", &tail)) {
+    } else if (match_option(option, "-native")) {
           // HotSpot always uses native threads, ignore silently for compatibility
     // -Xsqnopause
-    } else if (match_option(option, "-Xsqnopause", &tail)) {
+    } else if (match_option(option, "-Xsqnopause")) {
           // EVM option, ignore silently for compatibility
     // -Xrs
-    } else if (match_option(option, "-Xrs", &tail)) {
+    } else if (match_option(option, "-Xrs")) {
           // Classic/EVM option, new functionality
       FLAG_SET_CMDLINE(bool, ReduceSignalUsage, true);
-    } else if (match_option(option, "-Xusealtsigs", &tail)) {
+    } else if (match_option(option, "-Xusealtsigs")) {
           // change default internal VM signals used - lower case for back compat
       FLAG_SET_CMDLINE(bool, UseAltSigs, true);
     // -Xoptimize
-    } else if (match_option(option, "-Xoptimize", &tail)) {
+    } else if (match_option(option, "-Xoptimize")) {
           // EVM option, ignore silently for compatibility
     // -Xprof
-    } else if (match_option(option, "-Xprof", &tail)) {
+    } else if (match_option(option, "-Xprof")) {
 #if INCLUDE_FPROF
       _has_profile = true;
 #else // INCLUDE_FPROF
@@ -2966,7 +2960,7 @@
       return JNI_ERR;
 #endif // INCLUDE_FPROF
     // -Xconcurrentio
-    } else if (match_option(option, "-Xconcurrentio", &tail)) {
+    } else if (match_option(option, "-Xconcurrentio")) {
       FLAG_SET_CMDLINE(bool, UseLWPSynchronization, true);
       FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
       FLAG_SET_CMDLINE(intx, DeferThrSuspendLoopCount, 1);
@@ -2974,29 +2968,32 @@
       FLAG_SET_CMDLINE(uintx, NewSizeThreadIncrease, 16 * K);  // 20Kb per thread added to new generation
 
       // -Xinternalversion
-    } else if (match_option(option, "-Xinternalversion", &tail)) {
+    } else if (match_option(option, "-Xinternalversion")) {
       jio_fprintf(defaultStream::output_stream(), "%s\n",
                   VM_Version::internal_vm_info_string());
       vm_exit(0);
 #ifndef PRODUCT
     // -Xprintflags
-    } else if (match_option(option, "-Xprintflags", &tail)) {
+    } else if (match_option(option, "-Xprintflags")) {
       CommandLineFlags::printFlags(tty, false);
       vm_exit(0);
 #endif
     // -D
     } else if (match_option(option, "-D", &tail)) {
-      if (match_option(option, "-Djava.endorsed.dirs=", &tail)) {
+      const char* value;
+      if (match_option(option, "-Djava.endorsed.dirs=", &value) &&
+            *value!= '\0' && strcmp(value, "\"\"") != 0) {
         // abort if -Djava.endorsed.dirs is set
         jio_fprintf(defaultStream::output_stream(),
-          "-Djava.endorsed.dirs is not supported. Endorsed standards and standalone APIs\n"
-          "in modular form will be supported via the concept of upgradeable modules.\n");
+          "-Djava.endorsed.dirs=%s is not supported. Endorsed standards and standalone APIs\n"
+          "in modular form will be supported via the concept of upgradeable modules.\n", value);
         return JNI_EINVAL;
       }
-      if (match_option(option, "-Djava.ext.dirs=", &tail)) {
+      if (match_option(option, "-Djava.ext.dirs=", &value) &&
+            *value != '\0' && strcmp(value, "\"\"") != 0) {
         // abort if -Djava.ext.dirs is set
         jio_fprintf(defaultStream::output_stream(),
-          "-Djava.ext.dirs is not supported.  Use -classpath instead.\n");
+          "-Djava.ext.dirs=%s is not supported.  Use -classpath instead.\n", value);
         return JNI_EINVAL;
       }
 
@@ -3014,29 +3011,29 @@
 #endif
       }
     // -Xint
-    } else if (match_option(option, "-Xint", &tail)) {
+    } else if (match_option(option, "-Xint")) {
           set_mode_flags(_int);
     // -Xmixed
-    } else if (match_option(option, "-Xmixed", &tail)) {
+    } else if (match_option(option, "-Xmixed")) {
           set_mode_flags(_mixed);
     // -Xcomp
-    } else if (match_option(option, "-Xcomp", &tail)) {
+    } else if (match_option(option, "-Xcomp")) {
       // for testing the compiler; turn off all flags that inhibit compilation
           set_mode_flags(_comp);
     // -Xshare:dump
-    } else if (match_option(option, "-Xshare:dump", &tail)) {
+    } else if (match_option(option, "-Xshare:dump")) {
       FLAG_SET_CMDLINE(bool, DumpSharedSpaces, true);
       set_mode_flags(_int);     // Prevent compilation, which creates objects
     // -Xshare:on
-    } else if (match_option(option, "-Xshare:on", &tail)) {
+    } else if (match_option(option, "-Xshare:on")) {
       FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
       FLAG_SET_CMDLINE(bool, RequireSharedSpaces, true);
     // -Xshare:auto
-    } else if (match_option(option, "-Xshare:auto", &tail)) {
+    } else if (match_option(option, "-Xshare:auto")) {
       FLAG_SET_CMDLINE(bool, UseSharedSpaces, true);
       FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false);
     // -Xshare:off
-    } else if (match_option(option, "-Xshare:off", &tail)) {
+    } else if (match_option(option, "-Xshare:off")) {
       FLAG_SET_CMDLINE(bool, UseSharedSpaces, false);
       FLAG_SET_CMDLINE(bool, RequireSharedSpaces, false);
     // -Xverify
@@ -3054,13 +3051,13 @@
         return JNI_EINVAL;
       }
     // -Xdebug
-    } else if (match_option(option, "-Xdebug", &tail)) {
+    } else if (match_option(option, "-Xdebug")) {
       // note this flag has been used, then ignore
       set_xdebug_mode(true);
     // -Xnoagent
-    } else if (match_option(option, "-Xnoagent", &tail)) {
+    } else if (match_option(option, "-Xnoagent")) {
       // For compatibility with classic. HotSpot refuses to load the old style agent.dll.
-    } else if (match_option(option, "-Xboundthreads", &tail)) {
+    } else if (match_option(option, "-Xboundthreads")) {
       // Bind user level threads to kernel threads (Solaris only)
       FLAG_SET_CMDLINE(bool, UseBoundThreads, true);
     } else if (match_option(option, "-Xloggc:", &tail)) {
@@ -3090,14 +3087,14 @@
                                      "check")) {
         return JNI_EINVAL;
       }
-    } else if (match_option(option, "vfprintf", &tail)) {
+    } else if (match_option(option, "vfprintf")) {
       _vfprintf_hook = CAST_TO_FN_PTR(vfprintf_hook_t, option->extraInfo);
-    } else if (match_option(option, "exit", &tail)) {
+    } else if (match_option(option, "exit")) {
       _exit_hook = CAST_TO_FN_PTR(exit_hook_t, option->extraInfo);
-    } else if (match_option(option, "abort", &tail)) {
+    } else if (match_option(option, "abort")) {
       _abort_hook = CAST_TO_FN_PTR(abort_hook_t, option->extraInfo);
     // -XX:+AggressiveHeap
-    } else if (match_option(option, "-XX:+AggressiveHeap", &tail)) {
+    } else if (match_option(option, "-XX:+AggressiveHeap")) {
 
       // This option inspects the machine and attempts to set various
       // parameters to be optimal for long-running, memory allocation
@@ -3188,11 +3185,11 @@
 
     // Need to keep consistency of MaxTenuringThreshold and AlwaysTenure/NeverTenure;
     // and the last option wins.
-    } else if (match_option(option, "-XX:+NeverTenure", &tail)) {
+    } else if (match_option(option, "-XX:+NeverTenure")) {
       FLAG_SET_CMDLINE(bool, NeverTenure, true);
       FLAG_SET_CMDLINE(bool, AlwaysTenure, false);
       FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, markOopDesc::max_age + 1);
-    } else if (match_option(option, "-XX:+AlwaysTenure", &tail)) {
+    } else if (match_option(option, "-XX:+AlwaysTenure")) {
       FLAG_SET_CMDLINE(bool, NeverTenure, false);
       FLAG_SET_CMDLINE(bool, AlwaysTenure, true);
       FLAG_SET_CMDLINE(uintx, MaxTenuringThreshold, 0);
@@ -3211,59 +3208,13 @@
         FLAG_SET_CMDLINE(bool, NeverTenure, false);
         FLAG_SET_CMDLINE(bool, AlwaysTenure, false);
       }
-    } else if (match_option(option, "-XX:+CMSPermGenSweepingEnabled", &tail) ||
-               match_option(option, "-XX:-CMSPermGenSweepingEnabled", &tail)) {
-      jio_fprintf(defaultStream::error_stream(),
-        "Please use CMSClassUnloadingEnabled in place of "
-        "CMSPermGenSweepingEnabled in the future\n");
-    } else if (match_option(option, "-XX:+UseGCTimeLimit", &tail)) {
-      FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, true);
-      jio_fprintf(defaultStream::error_stream(),
-        "Please use -XX:+UseGCOverheadLimit in place of "
-        "-XX:+UseGCTimeLimit in the future\n");
-    } else if (match_option(option, "-XX:-UseGCTimeLimit", &tail)) {
-      FLAG_SET_CMDLINE(bool, UseGCOverheadLimit, false);
-      jio_fprintf(defaultStream::error_stream(),
-        "Please use -XX:-UseGCOverheadLimit in place of "
-        "-XX:-UseGCTimeLimit in the future\n");
-    // The TLE options are for compatibility with 1.3 and will be
-    // removed without notice in a future release.  These options
-    // are not to be documented.
-    } else if (match_option(option, "-XX:MaxTLERatio=", &tail)) {
-      // No longer used.
-    } else if (match_option(option, "-XX:+ResizeTLE", &tail)) {
-      FLAG_SET_CMDLINE(bool, ResizeTLAB, true);
-    } else if (match_option(option, "-XX:-ResizeTLE", &tail)) {
-      FLAG_SET_CMDLINE(bool, ResizeTLAB, false);
-    } else if (match_option(option, "-XX:+PrintTLE", &tail)) {
-      FLAG_SET_CMDLINE(bool, PrintTLAB, true);
-    } else if (match_option(option, "-XX:-PrintTLE", &tail)) {
-      FLAG_SET_CMDLINE(bool, PrintTLAB, false);
-    } else if (match_option(option, "-XX:TLEFragmentationRatio=", &tail)) {
-      // No longer used.
-    } else if (match_option(option, "-XX:TLESize=", &tail)) {
-      julong long_tlab_size = 0;
-      ArgsRange errcode = parse_memory_size(tail, &long_tlab_size, 1);
-      if (errcode != arg_in_range) {
-        jio_fprintf(defaultStream::error_stream(),
-                    "Invalid TLAB size: %s\n", option->optionString);
-        describe_range_error(errcode);
-        return JNI_EINVAL;
-      }
-      FLAG_SET_CMDLINE(uintx, TLABSize, long_tlab_size);
-    } else if (match_option(option, "-XX:TLEThreadRatio=", &tail)) {
-      // No longer used.
-    } else if (match_option(option, "-XX:+UseTLE", &tail)) {
-      FLAG_SET_CMDLINE(bool, UseTLAB, true);
-    } else if (match_option(option, "-XX:-UseTLE", &tail)) {
-      FLAG_SET_CMDLINE(bool, UseTLAB, false);
-    } else if (match_option(option, "-XX:+DisplayVMOutputToStderr", &tail)) {
+    } else if (match_option(option, "-XX:+DisplayVMOutputToStderr")) {
       FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, false);
       FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, true);
-    } else if (match_option(option, "-XX:+DisplayVMOutputToStdout", &tail)) {
+    } else if (match_option(option, "-XX:+DisplayVMOutputToStdout")) {
       FLAG_SET_CMDLINE(bool, DisplayVMOutputToStderr, false);
       FLAG_SET_CMDLINE(bool, DisplayVMOutputToStdout, true);
-    } else if (match_option(option, "-XX:+ExtendedDTraceProbes", &tail)) {
+    } else if (match_option(option, "-XX:+ExtendedDTraceProbes")) {
 #if defined(DTRACE_ENABLED)
       FLAG_SET_CMDLINE(bool, ExtendedDTraceProbes, true);
       FLAG_SET_CMDLINE(bool, DTraceMethodProbes, true);
@@ -3275,49 +3226,11 @@
       return JNI_EINVAL;
 #endif // defined(DTRACE_ENABLED)
 #ifdef ASSERT
-    } else if (match_option(option, "-XX:+FullGCALot", &tail)) {
+    } else if (match_option(option, "-XX:+FullGCALot")) {
       FLAG_SET_CMDLINE(bool, FullGCALot, true);
       // disable scavenge before parallel mark-compact
       FLAG_SET_CMDLINE(bool, ScavengeBeforeFullGC, false);
 #endif
-    } else if (match_option(option, "-XX:CMSParPromoteBlocksToClaim=", &tail)) {
-      julong cms_blocks_to_claim = (julong)atol(tail);
-      FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
-      jio_fprintf(defaultStream::error_stream(),
-        "Please use -XX:OldPLABSize in place of "
-        "-XX:CMSParPromoteBlocksToClaim in the future\n");
-    } else if (match_option(option, "-XX:ParCMSPromoteBlocksToClaim=", &tail)) {
-      julong cms_blocks_to_claim = (julong)atol(tail);
-      FLAG_SET_CMDLINE(uintx, CMSParPromoteBlocksToClaim, cms_blocks_to_claim);
-      jio_fprintf(defaultStream::error_stream(),
-        "Please use -XX:OldPLABSize in place of "
-        "-XX:ParCMSPromoteBlocksToClaim in the future\n");
-    } else if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) {
-      julong old_plab_size = 0;
-      ArgsRange errcode = parse_memory_size(tail, &old_plab_size, 1);
-      if (errcode != arg_in_range) {
-        jio_fprintf(defaultStream::error_stream(),
-                    "Invalid old PLAB size: %s\n", option->optionString);
-        describe_range_error(errcode);
-        return JNI_EINVAL;
-      }
-      FLAG_SET_CMDLINE(uintx, OldPLABSize, old_plab_size);
-      jio_fprintf(defaultStream::error_stream(),
-                  "Please use -XX:OldPLABSize in place of "
-                  "-XX:ParallelGCOldGenAllocBufferSize in the future\n");
-    } else if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) {
-      julong young_plab_size = 0;
-      ArgsRange errcode = parse_memory_size(tail, &young_plab_size, 1);
-      if (errcode != arg_in_range) {
-        jio_fprintf(defaultStream::error_stream(),
-                    "Invalid young PLAB size: %s\n", option->optionString);
-        describe_range_error(errcode);
-        return JNI_EINVAL;
-      }
-      FLAG_SET_CMDLINE(uintx, YoungPLABSize, young_plab_size);
-      jio_fprintf(defaultStream::error_stream(),
-                  "Please use -XX:YoungPLABSize in place of "
-                  "-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
     } else if (match_option(option, "-XX:CMSMarkStackSize=", &tail) ||
                match_option(option, "-XX:G1MarkStackSize=", &tail)) {
       julong stack_size = 0;
@@ -3328,6 +3241,9 @@
         describe_range_error(errcode);
         return JNI_EINVAL;
       }
+      jio_fprintf(defaultStream::error_stream(),
+        "Please use -XX:MarkStackSize in place of "
+        "-XX:CMSMarkStackSize or -XX:G1MarkStackSize in the future\n");
       FLAG_SET_CMDLINE(uintx, MarkStackSize, stack_size);
     } else if (match_option(option, "-XX:CMSMarkStackSizeMax=", &tail)) {
       julong max_stack_size = 0;
@@ -3339,6 +3255,9 @@
         describe_range_error(errcode);
         return JNI_EINVAL;
       }
+      jio_fprintf(defaultStream::error_stream(),
+         "Please use -XX:MarkStackSizeMax in place of "
+         "-XX:CMSMarkStackSizeMax in the future\n");
       FLAG_SET_CMDLINE(uintx, MarkStackSizeMax, max_stack_size);
     } else if (match_option(option, "-XX:ParallelMarkingThreads=", &tail) ||
                match_option(option, "-XX:ParallelCMSThreads=", &tail)) {
@@ -3348,6 +3267,9 @@
                     "Invalid concurrent threads: %s\n", option->optionString);
         return JNI_EINVAL;
       }
+      jio_fprintf(defaultStream::error_stream(),
+        "Please use -XX:ConcGCThreads in place of "
+        "-XX:ParallelMarkingThreads or -XX:ParallelCMSThreads in the future\n");
       FLAG_SET_CMDLINE(uintx, ConcGCThreads, conc_threads);
     } else if (match_option(option, "-XX:MaxDirectMemorySize=", &tail)) {
       julong max_direct_memory_size = 0;
@@ -3361,7 +3283,7 @@
       }
       FLAG_SET_CMDLINE(uintx, MaxDirectMemorySize, max_direct_memory_size);
 #if !INCLUDE_MANAGEMENT
-    } else if (match_option(option, "-XX:+ManagementServer", &tail)) {
+    } else if (match_option(option, "-XX:+ManagementServer")) {
         jio_fprintf(defaultStream::error_stream(),
           "ManagementServer is not supported in this VM.\n");
         return JNI_ERR;
@@ -3796,23 +3718,23 @@
       settings_file_specified = true;
       continue;
     }
-    if (match_option(option, "-XX:+PrintVMOptions", &tail)) {
+    if (match_option(option, "-XX:+PrintVMOptions")) {
       PrintVMOptions = true;
       continue;
     }
-    if (match_option(option, "-XX:-PrintVMOptions", &tail)) {
+    if (match_option(option, "-XX:-PrintVMOptions")) {
       PrintVMOptions = false;
       continue;
     }
-    if (match_option(option, "-XX:+IgnoreUnrecognizedVMOptions", &tail)) {
+    if (match_option(option, "-XX:+IgnoreUnrecognizedVMOptions")) {
       IgnoreUnrecognizedVMOptions = true;
       continue;
     }
-    if (match_option(option, "-XX:-IgnoreUnrecognizedVMOptions", &tail)) {
+    if (match_option(option, "-XX:-IgnoreUnrecognizedVMOptions")) {
       IgnoreUnrecognizedVMOptions = false;
       continue;
     }
-    if (match_option(option, "-XX:+PrintFlagsInitial", &tail)) {
+    if (match_option(option, "-XX:+PrintFlagsInitial")) {
       CommandLineFlags::printFlags(tty, false);
       vm_exit(0);
     }
@@ -3838,7 +3760,7 @@
 
 
 #ifndef PRODUCT
-    if (match_option(option, "-XX:+PrintFlagsWithComments", &tail)) {
+    if (match_option(option, "-XX:+PrintFlagsWithComments")) {
       CommandLineFlags::printFlags(tty, true);
       vm_exit(0);
     }
--- a/hotspot/src/share/vm/runtime/deoptimization.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/runtime/deoptimization.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -176,6 +176,8 @@
   assert(vf->is_compiled_frame(), "Wrong frame type");
   chunk->push(compiledVFrame::cast(vf));
 
+  bool realloc_failures = false;
+
 #ifdef COMPILER2
   // Reallocate the non-escaping objects and restore their fields. Then
   // relock objects if synchronization on them was eliminated.
@@ -206,19 +208,16 @@
           tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread);
         }
       }
-      bool reallocated = false;
       if (objects != NULL) {
         JRT_BLOCK
-          reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
+          realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD);
         JRT_END
-      }
-      if (reallocated) {
-        reassign_fields(&deoptee, &map, objects);
+        reassign_fields(&deoptee, &map, objects, realloc_failures);
 #ifndef PRODUCT
         if (TraceDeoptimization) {
           ttyLocker ttyl;
           tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
-          print_objects(objects);
+          print_objects(objects, realloc_failures);
         }
 #endif
       }
@@ -236,7 +235,7 @@
         assert (cvf->scope() != NULL,"expect only compiled java frames");
         GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
         if (monitors->is_nonempty()) {
-          relock_objects(monitors, thread);
+          relock_objects(monitors, thread, realloc_failures);
 #ifndef PRODUCT
           if (TraceDeoptimization) {
             ttyLocker ttyl;
@@ -247,7 +246,12 @@
                   first = false;
                   tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
                 }
-                tty->print_cr("     object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
+                if (mi->owner_is_scalar_replaced()) {
+                  Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
+                  tty->print_cr("     failed reallocation for klass %s", k->external_name());
+                } else {
+                  tty->print_cr("     object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
+                }
               }
             }
           }
@@ -262,9 +266,14 @@
   // out the java state residing in the vframeArray will be missed.
   No_Safepoint_Verifier no_safepoint;
 
-  vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
+  vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
+#ifdef COMPILER2
+  if (realloc_failures) {
+    pop_frames_failed_reallocs(thread, array);
+  }
+#endif
 
-  assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
+  assert(thread->vframe_array_head() == NULL, "Pending deopt!");
   thread->set_vframe_array_head(array);
 
   // Now that the vframeArray has been created if we have any deferred local writes
@@ -718,6 +727,8 @@
   int exception_line = thread->exception_line();
   thread->clear_pending_exception();
 
+  bool failures = false;
+
   for (int i = 0; i < objects->length(); i++) {
     assert(objects->at(i)->is_object(), "invalid debug information");
     ObjectValue* sv = (ObjectValue*) objects->at(i);
@@ -727,27 +738,34 @@
 
     if (k->oop_is_instance()) {
       InstanceKlass* ik = InstanceKlass::cast(k());
-      obj = ik->allocate_instance(CHECK_(false));
+      obj = ik->allocate_instance(THREAD);
     } else if (k->oop_is_typeArray()) {
       TypeArrayKlass* ak = TypeArrayKlass::cast(k());
       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
       int len = sv->field_size() / type2size[ak->element_type()];
-      obj = ak->allocate(len, CHECK_(false));
+      obj = ak->allocate(len, THREAD);
     } else if (k->oop_is_objArray()) {
       ObjArrayKlass* ak = ObjArrayKlass::cast(k());
-      obj = ak->allocate(sv->field_size(), CHECK_(false));
+      obj = ak->allocate(sv->field_size(), THREAD);
     }
 
-    assert(obj != NULL, "allocation failed");
+    if (obj == NULL) {
+      failures = true;
+    }
+
     assert(sv->value().is_null(), "redundant reallocation");
+    assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
+    CLEAR_PENDING_EXCEPTION;
     sv->set_value(obj);
   }
 
-  if (pending_exception.not_null()) {
+  if (failures) {
+    THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
+  } else if (pending_exception.not_null()) {
     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
   }
 
-  return true;
+  return failures;
 }
 
 // This assumes that the fields are stored in ObjectValue in the same order
@@ -885,12 +903,15 @@
 
 
 // restore fields of all eliminated objects and arrays
-void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
+void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
   for (int i = 0; i < objects->length(); i++) {
     ObjectValue* sv = (ObjectValue*) objects->at(i);
     KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
     Handle obj = sv->value();
-    assert(obj.not_null(), "reallocation was missed");
+    assert(obj.not_null() || realloc_failures, "reallocation was missed");
+    if (obj.is_null()) {
+      continue;
+    }
 
     if (k->oop_is_instance()) {
       InstanceKlass* ik = InstanceKlass::cast(k());
@@ -907,34 +928,36 @@
 
 
 // relock objects for which synchronization was eliminated
-void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread) {
+void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) {
   for (int i = 0; i < monitors->length(); i++) {
     MonitorInfo* mon_info = monitors->at(i);
     if (mon_info->eliminated()) {
-      assert(mon_info->owner() != NULL, "reallocation was missed");
-      Handle obj = Handle(mon_info->owner());
-      markOop mark = obj->mark();
-      if (UseBiasedLocking && mark->has_bias_pattern()) {
-        // New allocated objects may have the mark set to anonymously biased.
-        // Also the deoptimized method may called methods with synchronization
-        // where the thread-local object is bias locked to the current thread.
-        assert(mark->is_biased_anonymously() ||
-               mark->biased_locker() == thread, "should be locked to current thread");
-        // Reset mark word to unbiased prototype.
-        markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
-        obj->set_mark(unbiased_prototype);
+      assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
+      if (!mon_info->owner_is_scalar_replaced()) {
+        Handle obj = Handle(mon_info->owner());
+        markOop mark = obj->mark();
+        if (UseBiasedLocking && mark->has_bias_pattern()) {
+          // New allocated objects may have the mark set to anonymously biased.
+          // Also the deoptimized method may called methods with synchronization
+          // where the thread-local object is bias locked to the current thread.
+          assert(mark->is_biased_anonymously() ||
+                 mark->biased_locker() == thread, "should be locked to current thread");
+          // Reset mark word to unbiased prototype.
+          markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
+          obj->set_mark(unbiased_prototype);
+        }
+        BasicLock* lock = mon_info->lock();
+        ObjectSynchronizer::slow_enter(obj, lock, thread);
+        assert(mon_info->owner()->is_locked(), "object must be locked now");
       }
-      BasicLock* lock = mon_info->lock();
-      ObjectSynchronizer::slow_enter(obj, lock, thread);
     }
-    assert(mon_info->owner()->is_locked(), "object must be locked now");
   }
 }
 
 
 #ifndef PRODUCT
 // print information about reallocated objects
-void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
+void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
   fieldDescriptor fd;
 
   for (int i = 0; i < objects->length(); i++) {
@@ -944,10 +967,15 @@
 
     tty->print("     object <" INTPTR_FORMAT "> of type ", (void *)sv->value()());
     k->print_value();
-    tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
+    assert(obj.not_null() || realloc_failures, "reallocation was missed");
+    if (obj.is_null()) {
+      tty->print(" allocation failed");
+    } else {
+      tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
+    }
     tty->cr();
 
-    if (Verbose) {
+    if (Verbose && !obj.is_null()) {
       k->oop_print_on(obj(), tty);
     }
   }
@@ -955,7 +983,7 @@
 #endif
 #endif // COMPILER2
 
-vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
+vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
   Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp());
 
 #ifndef PRODUCT
@@ -998,7 +1026,7 @@
   // Since the Java thread being deoptimized will eventually adjust it's own stack,
   // the vframeArray containing the unpacking information is allocated in the C heap.
   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
-  vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr);
+  vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
 
   // Compare the vframeArray to the collected vframes
   assert(array->structural_compare(thread, chunk), "just checking");
@@ -1013,6 +1041,33 @@
   return array;
 }
 
+#ifdef COMPILER2
+void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
+  // Reallocation of some scalar replaced objects failed. Record
+  // that we need to pop all the interpreter frames for the
+  // deoptimized compiled frame.
+  assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
+  thread->set_frames_to_pop_failed_realloc(array->frames());
+  // Unlock all monitors here otherwise the interpreter will see a
+  // mix of locked and unlocked monitors (because of failed
+  // reallocations of synchronized objects) and be confused.
+  for (int i = 0; i < array->frames(); i++) {
+    MonitorChunk* monitors = array->element(i)->monitors();
+    if (monitors != NULL) {
+      for (int j = 0; j < monitors->number_of_monitors(); j++) {
+        BasicObjectLock* src = monitors->at(j);
+        if (src->obj() != NULL) {
+          ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread);
+        }
+      }
+      array->element(i)->free_monitors(thread);
+#ifdef ASSERT
+      array->element(i)->set_removed_monitors();
+#endif
+    }
+  }
+}
+#endif
 
 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
   GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
--- a/hotspot/src/share/vm/runtime/deoptimization.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/runtime/deoptimization.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -125,13 +125,14 @@
   static bool realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS);
   static void reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type);
   static void reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj);
-  static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects);
-  static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread);
-  NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects);)
+  static void reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures);
+  static void relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures);
+  static void pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array);
+  NOT_PRODUCT(static void print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures);)
 #endif // COMPILER2
 
   public:
-  static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk);
+  static vframeArray* create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures);
 
   // Interface used for unpacking deoptimized frames
 
--- a/hotspot/src/share/vm/runtime/globals.hpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/runtime/globals.hpp	Mon Jan 05 13:30:23 2015 -0800
@@ -1341,7 +1341,7 @@
   develop(bool, TraceClassInitialization, false,                            \
           "Trace class initialization")                                     \
                                                                             \
-  develop(bool, TraceExceptions, false,                                     \
+  product(bool, TraceExceptions, false,                                     \
           "Trace exceptions")                                               \
                                                                             \
   develop(bool, TraceICs, false,                                            \
@@ -1472,7 +1472,8 @@
           "Size of young gen promotion LAB's (in HeapWords)")               \
                                                                             \
   product(uintx, OldPLABSize, 1024,                                         \
-          "Size of old gen promotion LAB's (in HeapWords)")                 \
+          "Size of old gen promotion LAB's (in HeapWords), or Number        \
+          of blocks to attempt to claim when refilling CMS LAB's")          \
                                                                             \
   product(uintx, GCTaskTimeStampEntries, 200,                               \
           "Number of time stamp entries per gc worker thread")              \
@@ -1583,14 +1584,10 @@
           "The number of cards in each chunk of the parallel chunks used "  \
           "during card table scanning")                                     \
                                                                             \
-  product(uintx, CMSParPromoteBlocksToClaim, 16,                            \
-          "Number of blocks to attempt to claim when refilling CMS LAB's "  \
-          "for parallel GC")                                                \
-                                                                            \
   product(uintx, OldPLABWeight, 50,                                         \
           "Percentage (0-100) used to weight the current sample when "      \
           "computing exponentially decaying average for resizing "          \
-          "CMSParPromoteBlocksToClaim")                                     \
+          "OldPLABSize")                                                    \
                                                                             \
   product(bool, ResizeOldPLAB, true,                                        \
           "Dynamically resize (old gen) promotion LAB's")                   \
--- a/hotspot/src/share/vm/runtime/mutex.cpp	Tue Dec 30 22:13:53 2014 +0300
+++ b/hotspot/src/share/vm/runtime/mutex.cpp	Mon Jan 05 13:30:23 2015 -0800
@@ -895,6 +895,11 @@
 // of Mutex-Monitor and instead directly address the underlying design flaw.
 
 void Monitor::lock(Thread * Self) {
+  // Ensure that the Monitor requires/allows safepoint checks.
+  assert(_safepoint_check_required != Monitor::_safepoint_check_never,
+         err_msg("This lock should never have a safepoint check: %s",
+                 name()));
+
 #ifdef CHECK_UNHANDLED_OOPS
   // Clear unhandled oops so we get a crash right away.  Only clear for non-vm
   // or GC threads.
@@ -953,6 +958,10 @@
 // thread state set to be in VM, the safepoint synchronization code will deadlock!
 
 void Monitor::lock_without_safepoint_check(Thread * Self) {
+  // Ensure that the Monitor does not require or allow safepoint checks.
+  assert(_safepoint_check_required != Monitor::_safepoint_check_always,
+         err_msg("This lock should always have a safepoint check: %s",
+                 name()));
   assert(_owner != Self, "invariant");
   ILock(Self);
   assert(_owner == NULL, "invariant");
@@ -1082,6 +1091,12 @@
 
 bool Monitor::wait(bool no_safepoint_check, long timeout,
                    bool as_suspend_equivalent) {
+  // Make sure safepoint checking is used properly.
+  assert(!(_safepoint_check_required == Monitor::_safepoint_check_never && no_safepoint_check == false),
+         err_msg("This lock should never have a safepoint check: %s", name()));
+  assert(!(_safepoint_check_required == Monitor::_safepoint_check_always && no_safepoint_check == true),
+         err_msg("This lock should always have a safepoint check: %s", name()));
+
   Thread * const Self = Thread::current();
   assert(_owner == Self, "invariant");
   assert(ILocked(), "invariant");
@@ -1168,11 +1183,13 @@
 
 Monitor::Monitor() { ClearMonitor(this); }
 
-Monitor::Monitor(int Rank, const char * name, bool allow_vm_block) {
+Monitor::Monitor(int Rank, const char * name, bool allow_vm_block,
+                 SafepointCheckRequired safepoint_check_required) {
   ClearMonitor(this, name);
 #ifdef ASSERT
   _allow_vm_block  = allow_vm_block;
   _rank            = Rank;
+  NOT_PRODUCT(_safepoint_check_required = safepoint_check_required;)
 #endif
 }