changeset 5778:6795fcebbf42

Merge
author chegar
date Mon, 21 Oct 2013 14:08:09 +0100
parents 6fa574bfd32a e39b138b2518
children c31f0cbe6d9e
files src/share/vm/classfile/classFileParser.cpp test/testlibrary/AssertsTest.java test/testlibrary/OutputAnalyzerReportingTest.java test/testlibrary/OutputAnalyzerTest.java
diffstat 245 files changed, 7761 insertions(+), 4395 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Thu Oct 03 19:13:12 2013 +0100
+++ b/.hgtags	Mon Oct 21 14:08:09 2013 +0100
@@ -382,3 +382,7 @@
 c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
 58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
 6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110
+562a3d356de67670b4172b82aca2d30743449e04 hs25-b53
+f6962730bbde82f279a0ae3a1c14bc5e58096c6e jdk8-b111
+4a845c7a463844cead9e1e1641d6bcfb8a77f1c7 hs25-b54
+0ed9a90f45e1b392c671005f9ee22ce1acf02984 jdk8-b112
--- a/agent/src/os/bsd/ps_core.c	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/os/bsd/ps_core.c	Mon Oct 21 14:08:09 2013 +0100
@@ -44,6 +44,7 @@
 // close all file descriptors
 static void close_files(struct ps_prochandle* ph) {
   lib_info* lib = NULL;
+
   // close core file descriptor
   if (ph->core->core_fd >= 0)
     close(ph->core->core_fd);
@@ -149,8 +150,7 @@
 
 // Return the map_info for the given virtual address.  We keep a sorted
 // array of pointers in ph->map_array, so we can binary search.
-static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
-{
+static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
   int mid, lo = 0, hi = ph->core->num_maps - 1;
   map_info *mp;
 
@@ -230,9 +230,9 @@
     size_t _used;            // for setting space top on read
 
     // 4991491 NOTICE These are C++ bool's in filemap.hpp and must match up with
-    // the C type matching the C++ bool type on any given platform. For
-    // Hotspot on BSD we assume the corresponding C type is char but
-    // licensees on BSD versions may need to adjust the type of these fields.
+    // the C type matching the C++ bool type on any given platform.
+    // We assume the corresponding C type is char but licensees
+    // may need to adjust the type of these fields.
     char   _read_only;       // read only space?
     char   _allow_exec;      // executable code in space?
 
@@ -286,10 +286,12 @@
 #define USE_SHARED_SPACES_SYM "_UseSharedSpaces"
 // mangled name of Arguments::SharedArchivePath
 #define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
+#define LIBJVM_NAME "/libjvm.dylib"
 #else
 #define USE_SHARED_SPACES_SYM "UseSharedSpaces"
 // mangled name of Arguments::SharedArchivePath
 #define SHARED_ARCHIVE_PATH_SYM "__ZN9Arguments17SharedArchivePathE"
+#define LIBJVM_NAME "/libjvm.so"
 #endif // __APPLE_
 
 static bool init_classsharing_workaround(struct ps_prochandle* ph) {
@@ -300,12 +302,7 @@
     // we are iterating over shared objects from the core dump. look for
     // libjvm.so.
     const char *jvm_name = 0;
-#ifdef __APPLE__
-    if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0)
-#else
-    if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0)
-#endif // __APPLE__
-    {
+    if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
       char classes_jsa[PATH_MAX];
       struct FileMapHeader header;
       int fd = -1;
@@ -399,8 +396,8 @@
         }
       }
       return true;
-    }
-    lib = lib->next;
+   }
+   lib = lib->next;
   }
   return true;
 }
@@ -432,8 +429,8 @@
   // allocate map_array
   map_info** array;
   if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
-     print_debug("can't allocate memory for map array\n");
-     return false;
+    print_debug("can't allocate memory for map array\n");
+    return false;
   }
 
   // add maps to array
@@ -450,7 +447,7 @@
   ph->core->map_array = array;
   // sort the map_info array by base virtual address.
   qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
-           core_cmp_mapping);
+        core_cmp_mapping);
 
   // print map
   if (is_debug()) {
@@ -458,7 +455,7 @@
     print_debug("---- sorted virtual address map ----\n");
     for (j = 0; j < ph->core->num_maps; j++) {
       print_debug("base = 0x%lx\tsize = %d\n", ph->core->map_array[j]->vaddr,
-                                       ph->core->map_array[j]->memsz);
+                  ph->core->map_array[j]->memsz);
     }
   }
 
@@ -1091,9 +1088,9 @@
                                    notep->n_type, notep->n_descsz);
 
       if (notep->n_type == NT_PRSTATUS) {
-         if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
-            return false;
-         }
+        if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
+          return false;
+        }
       }
       p = descdata + ROUNDUP(notep->n_descsz, 4);
    }
@@ -1121,7 +1118,7 @@
     * contains a set of saved /proc structures), and PT_LOAD (which
     * represents a memory mapping from the process's address space).
     *
-    * Difference b/w Solaris PT_NOTE and BSD PT_NOTE:
+    * Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE:
     *
     *     In Solaris there are two PT_NOTE segments the first PT_NOTE (if present)
     *     contains /proc structs in the pre-2.6 unstructured /proc format. the last
@@ -1167,32 +1164,61 @@
 
 // read segments of a shared object
 static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
-   int i = 0;
-   ELF_PHDR* phbuf;
-   ELF_PHDR* lib_php = NULL;
+  int i = 0;
+  ELF_PHDR* phbuf;
+  ELF_PHDR* lib_php = NULL;
 
-   if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
-      return false;
+  int page_size=sysconf(_SC_PAGE_SIZE);
 
-   // we want to process only PT_LOAD segments that are not writable.
-   // i.e., text segments. The read/write/exec (data) segments would
-   // have been already added from core file segments.
-   for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
-      if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
-         if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
-            goto err;
+  if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
+    return false;
+  }
+
+  // we want to process only PT_LOAD segments that are not writable.
+  // i.e., text segments. The read/write/exec (data) segments would
+  // have been already added from core file segments.
+  for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
+    if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
+
+      uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
+      map_info *existing_map = core_lookup(ph, target_vaddr);
+
+      if (existing_map == NULL){
+        if (add_map_info(ph, lib_fd, lib_php->p_offset,
+                          target_vaddr, lib_php->p_filesz) == NULL) {
+          goto err;
+        }
+      } else {
+        if ((existing_map->memsz != page_size) &&
+            (existing_map->fd != lib_fd) &&
+            (existing_map->memsz != lib_php->p_filesz)){
+
+          print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
+                        target_vaddr, lib_php->p_filesz, lib_php->p_flags);
+          goto err;
+        }
+
+        /* replace PT_LOAD segment with library segment */
+        print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
+                     existing_map->memsz, lib_php->p_filesz);
+
+        existing_map->fd = lib_fd;
+        existing_map->offset = lib_php->p_offset;
+        existing_map->memsz = lib_php->p_filesz;
       }
-      lib_php++;
-   }
+    }
 
-   free(phbuf);
-   return true;
+    lib_php++;
+  }
+
+  free(phbuf);
+  return true;
 err:
-   free(phbuf);
-   return false;
+  free(phbuf);
+  return false;
 }
 
-// process segments from interpreter (ld-elf.so.1)
+// process segments from interpreter (ld.so or ld-linux.so or ld-elf.so)
 static bool read_interp_segments(struct ps_prochandle* ph) {
    ELF_EHDR interp_ehdr;
 
@@ -1303,32 +1329,34 @@
   debug_base = dyn.d_un.d_ptr;
   // at debug_base we have struct r_debug. This has first link map in r_map field
   if (ps_pread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
-                  &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
+                 &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
     print_debug("can't read first link map address\n");
     return false;
   }
 
   // read ld_base address from struct r_debug
-  // XXX: There is no r_ldbase member on BSD
-  /*
+#if 0  // There is no r_ldbase member on BSD
   if (ps_pread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
                   sizeof(uintptr_t)) != PS_OK) {
     print_debug("can't read ld base address\n");
     return false;
   }
   ph->core->ld_base_addr = ld_base_addr;
-  */
+#else
   ph->core->ld_base_addr = 0;
+#endif
 
   print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
 
-  // now read segments from interp (i.e ld-elf.so.1)
-  if (read_interp_segments(ph) != true)
+  // now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so)
+  if (read_interp_segments(ph) != true) {
     return false;
+  }
 
   // after adding interpreter (ld.so) mappings sort again
-  if (sort_map_array(ph) != true)
+  if (sort_map_array(ph) != true) {
     return false;
+  }
 
   print_debug("first link map is at 0x%lx\n", first_link_map_addr);
 
@@ -1380,8 +1408,9 @@
           add_lib_info_fd(ph, lib_name, lib_fd, lib_base);
           // Map info is added for the library (lib_name) so
           // we need to re-sort it before calling the p_pdread.
-          if (sort_map_array(ph) != true)
+          if (sort_map_array(ph) != true) {
             return false;
+          }
         } else {
           print_debug("can't read ELF header for shared object %s\n", lib_name);
           close(lib_fd);
@@ -1392,7 +1421,7 @@
 
     // read next link_map address
     if (ps_pread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
-                 &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
+                  &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
       print_debug("can't read next link in link_map\n");
       return false;
     }
@@ -1408,7 +1437,7 @@
 
   struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
   if (ph == NULL) {
-    print_debug("cant allocate ps_prochandle\n");
+    print_debug("can't allocate ps_prochandle\n");
     return NULL;
   }
 
@@ -1444,38 +1473,45 @@
   }
 
   if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
-     print_debug("executable file is not a valid ELF ET_EXEC file\n");
-     goto err;
+    print_debug("executable file is not a valid ELF ET_EXEC file\n");
+    goto err;
   }
 
   // process core file segments
-  if (read_core_segments(ph, &core_ehdr) != true)
-     goto err;
+  if (read_core_segments(ph, &core_ehdr) != true) {
+    goto err;
+  }
 
   // process exec file segments
-  if (read_exec_segments(ph, &exec_ehdr) != true)
-     goto err;
+  if (read_exec_segments(ph, &exec_ehdr) != true) {
+    goto err;
+  }
 
   // exec file is also treated like a shared object for symbol search
   if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
-                      (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL)
-     goto err;
+                      (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) {
+    goto err;
+  }
 
   // allocate and sort maps into map_array, we need to do this
   // here because read_shared_lib_info needs to read from debuggee
   // address space
-  if (sort_map_array(ph) != true)
+  if (sort_map_array(ph) != true) {
     goto err;
+  }
 
-  if (read_shared_lib_info(ph) != true)
+  if (read_shared_lib_info(ph) != true) {
     goto err;
+  }
 
   // sort again because we have added more mappings from shared objects
-  if (sort_map_array(ph) != true)
+  if (sort_map_array(ph) != true) {
     goto err;
+  }
 
-  if (init_classsharing_workaround(ph) != true)
+  if (init_classsharing_workaround(ph) != true) {
     goto err;
+  }
 
   print_debug("Leave Pgrab_core\n");
   return ph;
--- a/agent/src/os/linux/LinuxDebuggerLocal.c	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/os/linux/LinuxDebuggerLocal.c	Mon Oct 21 14:08:09 2013 +0100
@@ -29,6 +29,7 @@
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
+#include <stdlib.h>
 #include <string.h>
 #include <limits.h>
 
@@ -80,7 +81,7 @@
   (JNIEnv *env, jclass cls) {
   jclass listClass;
 
-  if (init_libproc(getenv("LIBSAPROC_DEBUG")) != true) {
+  if (init_libproc(getenv("LIBSAPROC_DEBUG") != NULL) != true) {
      THROW_NEW_DEBUGGER_EXCEPTION("can't initialize libproc");
   }
 
--- a/agent/src/os/linux/ps_core.c	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/os/linux/ps_core.c	Mon Oct 21 14:08:09 2013 +0100
@@ -41,155 +41,158 @@
 // ps_prochandle cleanup helper functions
 
 // close all file descriptors
-static void close_elf_files(struct ps_prochandle* ph) {
-   lib_info* lib = NULL;
+static void close_files(struct ps_prochandle* ph) {
+  lib_info* lib = NULL;
 
-   // close core file descriptor
-   if (ph->core->core_fd >= 0)
-     close(ph->core->core_fd);
+  // close core file descriptor
+  if (ph->core->core_fd >= 0)
+    close(ph->core->core_fd);
 
-   // close exec file descriptor
-   if (ph->core->exec_fd >= 0)
-     close(ph->core->exec_fd);
+  // close exec file descriptor
+  if (ph->core->exec_fd >= 0)
+    close(ph->core->exec_fd);
 
-   // close interp file descriptor
-   if (ph->core->interp_fd >= 0)
-     close(ph->core->interp_fd);
+  // close interp file descriptor
+  if (ph->core->interp_fd >= 0)
+    close(ph->core->interp_fd);
 
-   // close class share archive file
-   if (ph->core->classes_jsa_fd >= 0)
-     close(ph->core->classes_jsa_fd);
+  // close class share archive file
+  if (ph->core->classes_jsa_fd >= 0)
+    close(ph->core->classes_jsa_fd);
 
-   // close all library file descriptors
-   lib = ph->libs;
-   while (lib) {
-      int fd = lib->fd;
-      if (fd >= 0 && fd != ph->core->exec_fd) close(fd);
-      lib = lib->next;
-   }
+  // close all library file descriptors
+  lib = ph->libs;
+  while (lib) {
+    int fd = lib->fd;
+    if (fd >= 0 && fd != ph->core->exec_fd) {
+      close(fd);
+    }
+    lib = lib->next;
+  }
 }
 
 // clean all map_info stuff
 static void destroy_map_info(struct ps_prochandle* ph) {
   map_info* map = ph->core->maps;
   while (map) {
-     map_info* next = map->next;
-     free(map);
-     map = next;
+    map_info* next = map->next;
+    free(map);
+    map = next;
   }
 
   if (ph->core->map_array) {
-     free(ph->core->map_array);
+    free(ph->core->map_array);
   }
 
   // Part of the class sharing workaround
   map = ph->core->class_share_maps;
   while (map) {
-     map_info* next = map->next;
-     free(map);
-     map = next;
+    map_info* next = map->next;
+    free(map);
+    map = next;
   }
 }
 
 // ps_prochandle operations
 static void core_release(struct ps_prochandle* ph) {
-   if (ph->core) {
-      close_elf_files(ph);
-      destroy_map_info(ph);
-      free(ph->core);
-   }
+  if (ph->core) {
+    close_files(ph);
+    destroy_map_info(ph);
+    free(ph->core);
+  }
 }
 
 static map_info* allocate_init_map(int fd, off_t offset, uintptr_t vaddr, size_t memsz) {
-   map_info* map;
-   if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
-      print_debug("can't allocate memory for map_info\n");
-      return NULL;
-   }
+  map_info* map;
+  if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
+    print_debug("can't allocate memory for map_info\n");
+    return NULL;
+  }
 
-   // initialize map
-   map->fd     = fd;
-   map->offset = offset;
-   map->vaddr  = vaddr;
-   map->memsz  = memsz;
-   return map;
+  // initialize map
+  map->fd     = fd;
+  map->offset = offset;
+  map->vaddr  = vaddr;
+  map->memsz  = memsz;
+  return map;
 }
 
 // add map info with given fd, offset, vaddr and memsz
 static map_info* add_map_info(struct ps_prochandle* ph, int fd, off_t offset,
                              uintptr_t vaddr, size_t memsz) {
-   map_info* map;
-   if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
-      return NULL;
-   }
+  map_info* map;
+  if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
+    return NULL;
+  }
 
-   // add this to map list
-   map->next  = ph->core->maps;
-   ph->core->maps   = map;
-   ph->core->num_maps++;
+  // add this to map list
+  map->next  = ph->core->maps;
+  ph->core->maps   = map;
+  ph->core->num_maps++;
 
-   return map;
+  return map;
 }
 
 // Part of the class sharing workaround
-static void add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
+static map_info* add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
                              uintptr_t vaddr, size_t memsz) {
-   map_info* map;
-   if ((map = allocate_init_map(ph->core->classes_jsa_fd,
-                                offset, vaddr, memsz)) == NULL) {
-      return;
-   }
+  map_info* map;
+  if ((map = allocate_init_map(ph->core->classes_jsa_fd,
+                               offset, vaddr, memsz)) == NULL) {
+    return NULL;
+  }
 
-   map->next = ph->core->class_share_maps;
-   ph->core->class_share_maps = map;
+  map->next = ph->core->class_share_maps;
+  ph->core->class_share_maps = map;
+  return map;
 }
 
 // Return the map_info for the given virtual address.  We keep a sorted
 // array of pointers in ph->map_array, so we can binary search.
-static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
-{
-   int mid, lo = 0, hi = ph->core->num_maps - 1;
-   map_info *mp;
+static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
+  int mid, lo = 0, hi = ph->core->num_maps - 1;
+  map_info *mp;
 
-   while (hi - lo > 1) {
-     mid = (lo + hi) / 2;
-      if (addr >= ph->core->map_array[mid]->vaddr)
-         lo = mid;
-      else
-         hi = mid;
-   }
+  while (hi - lo > 1) {
+    mid = (lo + hi) / 2;
+    if (addr >= ph->core->map_array[mid]->vaddr) {
+      lo = mid;
+    } else {
+      hi = mid;
+    }
+  }
 
-   if (addr < ph->core->map_array[hi]->vaddr)
-      mp = ph->core->map_array[lo];
-   else
-      mp = ph->core->map_array[hi];
+  if (addr < ph->core->map_array[hi]->vaddr) {
+    mp = ph->core->map_array[lo];
+  } else {
+    mp = ph->core->map_array[hi];
+  }
 
-   if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz)
+  if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
+    return (mp);
+  }
+
+
+  // Part of the class sharing workaround
+  // Unfortunately, we have no way of detecting -Xshare state.
+  // Check out the share maps atlast, if we don't find anywhere.
+  // This is done this way so to avoid reading share pages
+  // ahead of other normal maps. For eg. with -Xshare:off we don't
+  // want to prefer class sharing data to data from core.
+  mp = ph->core->class_share_maps;
+  if (mp) {
+    print_debug("can't locate map_info at 0x%lx, trying class share maps\n", addr);
+  }
+  while (mp) {
+    if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
+      print_debug("located map_info at 0x%lx from class share maps\n", addr);
       return (mp);
+    }
+    mp = mp->next;
+  }
 
-
-   // Part of the class sharing workaround
-   // Unfortunately, we have no way of detecting -Xshare state.
-   // Check out the share maps atlast, if we don't find anywhere.
-   // This is done this way so to avoid reading share pages
-   // ahead of other normal maps. For eg. with -Xshare:off we don't
-   // want to prefer class sharing data to data from core.
-   mp = ph->core->class_share_maps;
-   if (mp) {
-      print_debug("can't locate map_info at 0x%lx, trying class share maps\n",
-             addr);
-   }
-   while (mp) {
-      if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
-         print_debug("located map_info at 0x%lx from class share maps\n",
-                  addr);
-         return (mp);
-      }
-      mp = mp->next;
-   }
-
-   print_debug("can't locate map_info at 0x%lx\n", addr);
-   return (NULL);
+  print_debug("can't locate map_info at 0x%lx\n", addr);
+  return (NULL);
 }
 
 //---------------------------------------------------------------
@@ -226,9 +229,9 @@
     size_t _used;            // for setting space top on read
 
     // 4991491 NOTICE These are C++ bool's in filemap.hpp and must match up with
-    // the C type matching the C++ bool type on any given platform. For
-    // Hotspot on Linux we assume the corresponding C type is char but
-    // licensees on Linux versions may need to adjust the type of these fields.
+    // the C type matching the C++ bool type on any given platform.
+    // We assume the corresponding C type is char but licensees
+    // may need to adjust the type of these fields.
     char   _read_only;       // read only space?
     char   _allow_exec;      // executable code in space?
 
@@ -238,154 +241,159 @@
 };
 
 static bool read_jboolean(struct ps_prochandle* ph, uintptr_t addr, jboolean* pvalue) {
-   jboolean i;
-   if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
-      *pvalue = i;
-      return true;
-   } else {
-      return false;
-   }
+  jboolean i;
+  if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
+    *pvalue = i;
+    return true;
+  } else {
+    return false;
+  }
 }
 
 static bool read_pointer(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* pvalue) {
-   uintptr_t uip;
-   if (ps_pdread(ph, (psaddr_t) addr, &uip, sizeof(uip)) == PS_OK) {
-      *pvalue = uip;
-      return true;
-   } else {
-      return false;
-   }
+  uintptr_t uip;
+  if (ps_pdread(ph, (psaddr_t) addr, (char *)&uip, sizeof(uip)) == PS_OK) {
+    *pvalue = uip;
+    return true;
+  } else {
+    return false;
+  }
 }
 
 // used to read strings from debuggee
 static bool read_string(struct ps_prochandle* ph, uintptr_t addr, char* buf, size_t size) {
-   size_t i = 0;
-   char  c = ' ';
+  size_t i = 0;
+  char  c = ' ';
 
-   while (c != '\0') {
-     if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK)
-         return false;
-      if (i < size - 1)
-         buf[i] = c;
-      else // smaller buffer
-         return false;
-      i++; addr++;
-   }
+  while (c != '\0') {
+    if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK) {
+      return false;
+    }
+    if (i < size - 1) {
+      buf[i] = c;
+    } else {
+      // smaller buffer
+      return false;
+    }
+    i++; addr++;
+  }
 
-   buf[i] = '\0';
-   return true;
+  buf[i] = '\0';
+  return true;
 }
 
 #define USE_SHARED_SPACES_SYM "UseSharedSpaces"
 // mangled name of Arguments::SharedArchivePath
 #define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
+#define LIBJVM_NAME "/libjvm.so"
 
 static bool init_classsharing_workaround(struct ps_prochandle* ph) {
-   lib_info* lib = ph->libs;
-   while (lib != NULL) {
-      // we are iterating over shared objects from the core dump. look for
-      // libjvm.so.
-      const char *jvm_name = 0;
-      if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) {
-         char classes_jsa[PATH_MAX];
-         struct FileMapHeader header;
-         size_t n = 0;
-         int fd = -1, m = 0;
-         uintptr_t base = 0, useSharedSpacesAddr = 0;
-         uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
-         jboolean useSharedSpaces = 0;
-         map_info* mi = 0;
+  lib_info* lib = ph->libs;
+  while (lib != NULL) {
+    // we are iterating over shared objects from the core dump. look for
+    // libjvm.so.
+    const char *jvm_name = 0;
+    if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
+      char classes_jsa[PATH_MAX];
+      struct FileMapHeader header;
+      int fd = -1;
+      int m = 0;
+      size_t n = 0;
+      uintptr_t base = 0, useSharedSpacesAddr = 0;
+      uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
+      jboolean useSharedSpaces = 0;
+      map_info* mi = 0;
 
-         memset(classes_jsa, 0, sizeof(classes_jsa));
-         jvm_name = lib->name;
-         useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
-         if (useSharedSpacesAddr == 0) {
-            print_debug("can't lookup 'UseSharedSpaces' flag\n");
-            return false;
-         }
+      memset(classes_jsa, 0, sizeof(classes_jsa));
+      jvm_name = lib->name;
+      useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
+      if (useSharedSpacesAddr == 0) {
+        print_debug("can't lookup 'UseSharedSpaces' flag\n");
+        return false;
+      }
 
-         // Hotspot vm types are not exported to build this library. So
-         // using equivalent type jboolean to read the value of
-         // UseSharedSpaces which is same as hotspot type "bool".
-         if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
-            print_debug("can't read the value of 'UseSharedSpaces' flag\n");
-            return false;
-         }
+      // Hotspot vm types are not exported to build this library. So
+      // using equivalent type jboolean to read the value of
+      // UseSharedSpaces which is same as hotspot type "bool".
+      if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
+        print_debug("can't read the value of 'UseSharedSpaces' flag\n");
+        return false;
+      }
 
-         if ((int)useSharedSpaces == 0) {
-            print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
-            return true;
-         }
+      if ((int)useSharedSpaces == 0) {
+        print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
+        return true;
+      }
 
-         sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
-         if (sharedArchivePathAddrAddr == 0) {
-            print_debug("can't lookup shared archive path symbol\n");
-            return false;
-         }
+      sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
+      if (sharedArchivePathAddrAddr == 0) {
+        print_debug("can't lookup shared archive path symbol\n");
+        return false;
+      }
 
-         if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
-            print_debug("can't read shared archive path pointer\n");
-            return false;
-         }
+      if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
+        print_debug("can't read shared archive path pointer\n");
+        return false;
+      }
 
-         if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
-            print_debug("can't read shared archive path value\n");
-            return false;
-         }
+      if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
+        print_debug("can't read shared archive path value\n");
+        return false;
+      }
 
-         print_debug("looking for %s\n", classes_jsa);
-         // open the class sharing archive file
-         fd = pathmap_open(classes_jsa);
-         if (fd < 0) {
-            print_debug("can't open %s!\n", classes_jsa);
-            ph->core->classes_jsa_fd = -1;
-            return false;
-         } else {
-            print_debug("opened %s\n", classes_jsa);
-         }
+      print_debug("looking for %s\n", classes_jsa);
+      // open the class sharing archive file
+      fd = pathmap_open(classes_jsa);
+      if (fd < 0) {
+        print_debug("can't open %s!\n", classes_jsa);
+        ph->core->classes_jsa_fd = -1;
+        return false;
+      } else {
+        print_debug("opened %s\n", classes_jsa);
+      }
 
-         // read FileMapHeader from the file
-         memset(&header, 0, sizeof(struct FileMapHeader));
-         if ((n = read(fd, &header, sizeof(struct FileMapHeader)))
-              != sizeof(struct FileMapHeader)) {
-            print_debug("can't read shared archive file map header from %s\n", classes_jsa);
-            close(fd);
-            return false;
-         }
+      // read FileMapHeader from the file
+      memset(&header, 0, sizeof(struct FileMapHeader));
+      if ((n = read(fd, &header, sizeof(struct FileMapHeader)))
+           != sizeof(struct FileMapHeader)) {
+        print_debug("can't read shared archive file map header from %s\n", classes_jsa);
+        close(fd);
+        return false;
+      }
 
-         // check file magic
-         if (header._magic != 0xf00baba2) {
-            print_debug("%s has bad shared archive file magic number 0x%x, expecing 0xf00baba2\n",
-                        classes_jsa, header._magic);
-            close(fd);
-            return false;
-         }
+      // check file magic
+      if (header._magic != 0xf00baba2) {
+        print_debug("%s has bad shared archive file magic number 0x%x, expecing 0xf00baba2\n",
+                     classes_jsa, header._magic);
+        close(fd);
+        return false;
+      }
 
-         // check version
-         if (header._version != CURRENT_ARCHIVE_VERSION) {
-            print_debug("%s has wrong shared archive file version %d, expecting %d\n",
-                        classes_jsa, header._version, CURRENT_ARCHIVE_VERSION);
-            close(fd);
-            return false;
-         }
+      // check version
+      if (header._version != CURRENT_ARCHIVE_VERSION) {
+        print_debug("%s has wrong shared archive file version %d, expecting %d\n",
+                     classes_jsa, header._version, CURRENT_ARCHIVE_VERSION);
+        close(fd);
+        return false;
+      }
 
-         ph->core->classes_jsa_fd = fd;
-         // add read-only maps from classes.jsa to the list of maps
-         for (m = 0; m < NUM_SHARED_MAPS; m++) {
-            if (header._space[m]._read_only) {
-               base = (uintptr_t) header._space[m]._base;
-               // no need to worry about the fractional pages at-the-end.
-               // possible fractional pages are handled by core_read_data.
-               add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
-                         base, (size_t) header._space[m]._used);
-               print_debug("added a share archive map at 0x%lx\n", base);
-            }
-         }
-         return true;
+      ph->core->classes_jsa_fd = fd;
+      // add read-only maps from classes.jsa to the list of maps
+      for (m = 0; m < NUM_SHARED_MAPS; m++) {
+        if (header._space[m]._read_only) {
+          base = (uintptr_t) header._space[m]._base;
+          // no need to worry about the fractional pages at-the-end.
+          // possible fractional pages are handled by core_read_data.
+          add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
+                                   base, (size_t) header._space[m]._used);
+          print_debug("added a share archive map at 0x%lx\n", base);
+        }
       }
-      lib = lib->next;
+      return true;
    }
-   return true;
+   lib = lib->next;
+  }
+  return true;
 }
 
 
@@ -396,54 +404,58 @@
 // callback for sorting the array of map_info pointers.
 static int core_cmp_mapping(const void *lhsp, const void *rhsp)
 {
-   const map_info *lhs = *((const map_info **)lhsp);
-   const map_info *rhs = *((const map_info **)rhsp);
+  const map_info *lhs = *((const map_info **)lhsp);
+  const map_info *rhs = *((const map_info **)rhsp);
 
-   if (lhs->vaddr == rhs->vaddr)
-      return (0);
+  if (lhs->vaddr == rhs->vaddr) {
+    return (0);
+  }
 
-   return (lhs->vaddr < rhs->vaddr ? -1 : 1);
+  return (lhs->vaddr < rhs->vaddr ? -1 : 1);
 }
 
 // we sort map_info by starting virtual address so that we can do
 // binary search to read from an address.
 static bool sort_map_array(struct ps_prochandle* ph) {
-   size_t num_maps = ph->core->num_maps;
-   map_info* map = ph->core->maps;
-   int i = 0;
+  size_t num_maps = ph->core->num_maps;
+  map_info* map = ph->core->maps;
+  int i = 0;
 
-   // allocate map_array
-   map_info** array;
-   if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
-      print_debug("can't allocate memory for map array\n");
-      return false;
-   }
+  // allocate map_array
+  map_info** array;
+  if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
+    print_debug("can't allocate memory for map array\n");
+    return false;
+  }
 
-   // add maps to array
-   while (map) {
-      array[i] = map;
-      i++;
-      map = map->next;
-   }
+  // add maps to array
+  while (map) {
+    array[i] = map;
+    i++;
+    map = map->next;
+  }
 
-   // sort is called twice. If this is second time, clear map array
-   if (ph->core->map_array) free(ph->core->map_array);
-   ph->core->map_array = array;
-   // sort the map_info array by base virtual address.
-   qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
-            core_cmp_mapping);
+  // sort is called twice. If this is second time, clear map array
+  if (ph->core->map_array) {
+    free(ph->core->map_array);
+  }
 
-   // print map
-   if (is_debug()) {
-      int j = 0;
-      print_debug("---- sorted virtual address map ----\n");
-      for (j = 0; j < ph->core->num_maps; j++) {
-        print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr,
-                                         ph->core->map_array[j]->memsz);
-      }
-   }
+  ph->core->map_array = array;
+  // sort the map_info array by base virtual address.
+  qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
+        core_cmp_mapping);
 
-   return true;
+  // print map
+  if (is_debug()) {
+    int j = 0;
+    print_debug("---- sorted virtual address map ----\n");
+    for (j = 0; j < ph->core->num_maps; j++) {
+      print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr,
+                  ph->core->map_array[j]->memsz);
+    }
+  }
+
+  return true;
 }
 
 #ifndef MIN
@@ -460,16 +472,18 @@
       off_t off;
       int fd;
 
-      if (mp == NULL)
+      if (mp == NULL) {
          break;  /* No mapping for this address */
+      }
 
       fd = mp->fd;
       mapoff = addr - mp->vaddr;
       len = MIN(resid, mp->memsz - mapoff);
       off = mp->offset + mapoff;
 
-      if ((len = pread(fd, buf, len, off)) <= 0)
+      if ((len = pread(fd, buf, len, off)) <= 0) {
          break;
+      }
 
       resid -= len;
       addr += len;
@@ -625,8 +639,9 @@
                                    notep->n_type, notep->n_descsz);
 
       if (notep->n_type == NT_PRSTATUS) {
-         if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true)
-            return false;
+        if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
+          return false;
+        }
       }
       p = descdata + ROUNDUP(notep->n_descsz, 4);
    }
@@ -654,7 +669,7 @@
     * contains a set of saved /proc structures), and PT_LOAD (which
     * represents a memory mapping from the process's address space).
     *
-    * Difference b/w Solaris PT_NOTE and Linux PT_NOTE:
+    * Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE:
     *
     *     In Solaris there are two PT_NOTE segments the first PT_NOTE (if present)
     *     contains /proc structs in the pre-2.6 unstructured /proc format. the last
@@ -674,7 +689,9 @@
     for (core_php = phbuf, i = 0; i < core_ehdr->e_phnum; i++) {
       switch (core_php->p_type) {
          case PT_NOTE:
-            if (core_handle_note(ph, core_php) != true) goto err;
+            if (core_handle_note(ph, core_php) != true) {
+              goto err;
+            }
             break;
 
          case PT_LOAD: {
@@ -832,60 +849,62 @@
 // read shared library info from runtime linker's data structures.
 // This work is done by librtlb_db in Solaris
 static bool read_shared_lib_info(struct ps_prochandle* ph) {
-   uintptr_t addr = ph->core->dynamic_addr;
-   uintptr_t debug_base;
-   uintptr_t first_link_map_addr;
-   uintptr_t ld_base_addr;
-   uintptr_t link_map_addr;
-   uintptr_t lib_base_diff;
-   uintptr_t lib_base;
-   uintptr_t lib_name_addr;
-   char lib_name[BUF_SIZE];
-   ELF_DYN dyn;
-   ELF_EHDR elf_ehdr;
-   int lib_fd;
+  uintptr_t addr = ph->core->dynamic_addr;
+  uintptr_t debug_base;
+  uintptr_t first_link_map_addr;
+  uintptr_t ld_base_addr;
+  uintptr_t link_map_addr;
+  uintptr_t lib_base_diff;
+  uintptr_t lib_base;
+  uintptr_t lib_name_addr;
+  char lib_name[BUF_SIZE];
+  ELF_DYN dyn;
+  ELF_EHDR elf_ehdr;
+  int lib_fd;
 
-   // _DYNAMIC has information of the form
-   //         [tag] [data] [tag] [data] .....
-   // Both tag and data are pointer sized.
-   // We look for dynamic info with DT_DEBUG. This has shared object info.
-   // refer to struct r_debug in link.h
+  // _DYNAMIC has information of the form
+  //         [tag] [data] [tag] [data] .....
+  // Both tag and data are pointer sized.
+  // We look for dynamic info with DT_DEBUG. This has shared object info.
+  // refer to struct r_debug in link.h
 
-   dyn.d_tag = DT_NULL;
-   while (dyn.d_tag != DT_DEBUG) {
-      if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) {
-         print_debug("can't read debug info from _DYNAMIC\n");
-         return false;
-      }
-      addr += sizeof(ELF_DYN);
-   }
+  dyn.d_tag = DT_NULL;
+  while (dyn.d_tag != DT_DEBUG) {
+    if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) {
+      print_debug("can't read debug info from _DYNAMIC\n");
+      return false;
+    }
+    addr += sizeof(ELF_DYN);
+  }
 
-   // we have got Dyn entry with DT_DEBUG
-   debug_base = dyn.d_un.d_ptr;
-   // at debug_base we have struct r_debug. This has first link map in r_map field
-   if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
+  // we have got Dyn entry with DT_DEBUG
+  debug_base = dyn.d_un.d_ptr;
+  // at debug_base we have struct r_debug. This has first link map in r_map field
+  if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
                  &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
-      print_debug("can't read first link map address\n");
+    print_debug("can't read first link map address\n");
+    return false;
+  }
+
+  // read ld_base address from struct r_debug
+  if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
+                 sizeof(uintptr_t)) != PS_OK) {
+    print_debug("can't read ld base address\n");
+    return false;
+  }
+  ph->core->ld_base_addr = ld_base_addr;
+
+  print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
+
+  // now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so)
+  if (read_interp_segments(ph) != true) {
       return false;
-   }
+  }
 
-   // read ld_base address from struct r_debug
-   if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
-                 sizeof(uintptr_t)) != PS_OK) {
-      print_debug("can't read ld base address\n");
-      return false;
-   }
-   ph->core->ld_base_addr = ld_base_addr;
-
-   print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
-
-   // now read segments from interp (i.e ld.so or ld-linux.so)
-   if (read_interp_segments(ph) != true)
-      return false;
-
-   // after adding interpreter (ld.so) mappings sort again
-   if (sort_map_array(ph) != true)
-      return false;
+  // after adding interpreter (ld.so) mappings sort again
+  if (sort_map_array(ph) != true) {
+    return false;
+  }
 
    print_debug("first link map is at 0x%lx\n", first_link_map_addr);
 
@@ -950,95 +969,102 @@
          }
       }
 
-      // read next link_map address
-      if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
-                        &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
-         print_debug("can't read next link in link_map\n");
-         return false;
-      }
-   }
+    // read next link_map address
+    if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
+                   &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
+      print_debug("can't read next link in link_map\n");
+      return false;
+    }
+  }
 
-   return true;
+  return true;
 }
 
 // the one and only one exposed stuff from this file
 struct ps_prochandle* Pgrab_core(const char* exec_file, const char* core_file) {
-   ELF_EHDR core_ehdr;
-   ELF_EHDR exec_ehdr;
-   ELF_EHDR lib_ehdr;
+  ELF_EHDR core_ehdr;
+  ELF_EHDR exec_ehdr;
+  ELF_EHDR lib_ehdr;
 
-   struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
-   if (ph == NULL) {
-      print_debug("can't allocate ps_prochandle\n");
-      return NULL;
-   }
+  struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
+  if (ph == NULL) {
+    print_debug("can't allocate ps_prochandle\n");
+    return NULL;
+  }
 
-   if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) {
-      free(ph);
-      print_debug("can't allocate ps_prochandle\n");
-      return NULL;
-   }
+  if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) {
+    free(ph);
+    print_debug("can't allocate ps_prochandle\n");
+    return NULL;
+  }
 
-   // initialize ph
-   ph->ops = &core_ops;
-   ph->core->core_fd   = -1;
-   ph->core->exec_fd   = -1;
-   ph->core->interp_fd = -1;
+  // initialize ph
+  ph->ops = &core_ops;
+  ph->core->core_fd   = -1;
+  ph->core->exec_fd   = -1;
+  ph->core->interp_fd = -1;
 
-   // open the core file
-   if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) {
-      print_debug("can't open core file\n");
-      goto err;
-   }
+  // open the core file
+  if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) {
+    print_debug("can't open core file\n");
+    goto err;
+  }
 
-   // read core file ELF header
-   if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) {
-      print_debug("core file is not a valid ELF ET_CORE file\n");
-      goto err;
-   }
+  // read core file ELF header
+  if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) {
+    print_debug("core file is not a valid ELF ET_CORE file\n");
+    goto err;
+  }
 
-   if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) {
-      print_debug("can't open executable file\n");
-      goto err;
-   }
+  if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) {
+    print_debug("can't open executable file\n");
+    goto err;
+  }
 
-   if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
-      print_debug("executable file is not a valid ELF ET_EXEC file\n");
-      goto err;
-   }
+  if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
+    print_debug("executable file is not a valid ELF ET_EXEC file\n");
+    goto err;
+  }
 
-   // process core file segments
-   if (read_core_segments(ph, &core_ehdr) != true)
-      goto err;
+  // process core file segments
+  if (read_core_segments(ph, &core_ehdr) != true) {
+    goto err;
+  }
 
-   // process exec file segments
-   if (read_exec_segments(ph, &exec_ehdr) != true)
-      goto err;
+  // process exec file segments
+  if (read_exec_segments(ph, &exec_ehdr) != true) {
+    goto err;
+  }
 
-   // exec file is also treated like a shared object for symbol search
-   if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
-                       (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL)
-      goto err;
+  // exec file is also treated like a shared object for symbol search
+  if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
+                      (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) {
+    goto err;
+  }
 
-   // allocate and sort maps into map_array, we need to do this
-   // here because read_shared_lib_info needs to read from debuggee
-   // address space
-   if (sort_map_array(ph) != true)
-      goto err;
+  // allocate and sort maps into map_array, we need to do this
+  // here because read_shared_lib_info needs to read from debuggee
+  // address space
+  if (sort_map_array(ph) != true) {
+    goto err;
+  }
 
-   if (read_shared_lib_info(ph) != true)
-      goto err;
+  if (read_shared_lib_info(ph) != true) {
+    goto err;
+  }
 
-   // sort again because we have added more mappings from shared objects
-   if (sort_map_array(ph) != true)
-      goto err;
+  // sort again because we have added more mappings from shared objects
+  if (sort_map_array(ph) != true) {
+    goto err;
+  }
 
-   if (init_classsharing_workaround(ph) != true)
-      goto err;
+  if (init_classsharing_workaround(ph) != true) {
+    goto err;
+  }
 
-   return ph;
+  return ph;
 
 err:
-   Prelease(ph);
-   return NULL;
+  Prelease(ph);
+  return NULL;
 }
--- a/agent/src/os/linux/ps_proc.c	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/os/linux/ps_proc.c	Mon Oct 21 14:08:09 2013 +0100
@@ -27,6 +27,8 @@
 #include <string.h>
 #include <signal.h>
 #include <errno.h>
+#include <sys/types.h>
+#include <sys/wait.h>
 #include <sys/ptrace.h>
 #include "libproc_impl.h"
 
--- a/agent/src/os/linux/salibelf.c	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/os/linux/salibelf.c	Mon Oct 21 14:08:09 2013 +0100
@@ -25,6 +25,7 @@
 #include "salibelf.h"
 #include <stdlib.h>
 #include <unistd.h>
+#include <string.h>
 
 extern void print_debug(const char*,...);
 
--- a/agent/src/os/linux/symtab.c	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/os/linux/symtab.c	Mon Oct 21 14:08:09 2013 +0100
@@ -305,7 +305,7 @@
 
   unsigned char *bytes
     = (unsigned char*)(note+1) + note->n_namesz;
-  unsigned char *filename
+  char *filename
     = (build_id_to_debug_filename (note->n_descsz, bytes));
 
   fd = pathmap_open(filename);
--- a/agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java	Mon Oct 21 14:08:09 2013 +0100
@@ -67,6 +67,13 @@
          String libname = "hsdis";
          String arch = System.getProperty("os.arch");
          if (os.lastIndexOf("Windows", 0) != -1) {
+            if (arch.equals("x86")) {
+               libname +=  "-i386";
+            } else if (arch.equals("amd64")) {
+               libname +=  "-amd64";
+            } else {
+               libname +=  "-" + arch;
+            }
             path.append(sep + "bin" + sep);
             libname += ".dll";
          } else if (os.lastIndexOf("SunOS", 0) != -1) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainCacheEntry.java	Mon Oct 21 14:08:09 2013 +0100
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+package sun.jvm.hotspot.memory;
+
+import java.util.*;
+import sun.jvm.hotspot.debugger.*;
+import sun.jvm.hotspot.oops.*;
+import sun.jvm.hotspot.runtime.*;
+import sun.jvm.hotspot.types.*;
+
+public class ProtectionDomainCacheEntry extends VMObject {
+  private static sun.jvm.hotspot.types.OopField protectionDomainField;
+
+  static {
+    VM.registerVMInitializedObserver(new Observer() {
+        public void update(Observable o, Object data) {
+          initialize(VM.getVM().getTypeDataBase());
+        }
+      });
+  }
+
+  private static synchronized void initialize(TypeDataBase db) {
+    Type type = db.lookupType("ProtectionDomainCacheEntry");
+    protectionDomainField = type.getOopField("_literal");
+  }
+
+  public ProtectionDomainCacheEntry(Address addr) {
+    super(addr);
+  }
+
+  public Oop protectionDomain() {
+    return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
+  }
+}
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
 
 public class ProtectionDomainEntry extends VMObject {
   private static AddressField nextField;
-  private static sun.jvm.hotspot.types.OopField protectionDomainField;
+  private static AddressField pdCacheField;
 
   static {
     VM.registerVMInitializedObserver(new Observer() {
@@ -46,7 +46,7 @@
     Type type = db.lookupType("ProtectionDomainEntry");
 
     nextField = type.getAddressField("_next");
-    protectionDomainField = type.getOopField("_protection_domain");
+    pdCacheField = type.getAddressField("_pd_cache");
   }
 
   public ProtectionDomainEntry(Address addr) {
@@ -54,10 +54,12 @@
   }
 
   public ProtectionDomainEntry next() {
-    return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, addr);
+    return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, nextField.getValue(addr));
   }
 
   public Oop protectionDomain() {
-    return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
+    ProtectionDomainCacheEntry pd_cache = (ProtectionDomainCacheEntry)
+      VMObjectFactory.newObject(ProtectionDomainCacheEntry.class, pdCacheField.getValue(addr));
+    return pd_cache.protectionDomain();
   }
 }
--- a/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Mon Oct 21 14:08:09 2013 +0100
@@ -44,12 +44,10 @@
   private static synchronized void initialize(TypeDataBase db) {
     Type type = db.lookupType("SymbolTable");
     theTableField  = type.getAddressField("_the_table");
-    symbolTableSize = db.lookupIntConstant("SymbolTable::symbol_table_size").intValue();
   }
 
   // Fields
   private static AddressField theTableField;
-  private static int symbolTableSize;
 
   // Accessors
   public static SymbolTable getTheTable() {
@@ -57,10 +55,6 @@
     return (SymbolTable) VMObjectFactory.newObject(SymbolTable.class, tmp);
   }
 
-  public static int getSymbolTableSize() {
-    return symbolTableSize;
-  }
-
   public SymbolTable(Address addr) {
     super(addr);
   }
--- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java	Mon Oct 21 14:08:09 2013 +0100
@@ -134,15 +134,13 @@
      private String type;
      private String name;
      private Address addr;
-     private String kind;
-     private int origin;
+     private int flags;
 
-     private Flag(String type, String name, Address addr, String kind, int origin) {
+     private Flag(String type, String name, Address addr, int flags) {
         this.type = type;
         this.name = name;
         this.addr = addr;
-        this.kind = kind;
-        this.origin = origin;
+        this.flags = flags;
      }
 
      public String getType() {
@@ -157,12 +155,8 @@
         return addr;
      }
 
-     public String getKind() {
-        return kind;
-     }
-
      public int getOrigin() {
-        return origin;
+        return flags & 0xF;  // XXX can we get the mask bits from somewhere?
      }
 
      public boolean isBool() {
@@ -173,8 +167,7 @@
         if (Assert.ASSERTS_ENABLED) {
            Assert.that(isBool(), "not a bool flag!");
         }
-        return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned())
-               != 0;
+        return addr.getCIntegerAt(0, boolType.getSize(), boolType.isUnsigned()) != 0;
      }
 
      public boolean isIntx() {
@@ -843,11 +836,10 @@
 
     Address flagAddr = flagType.getAddressField("flags").getValue();
 
-    AddressField typeFld = flagType.getAddressField("type");
-    AddressField nameFld = flagType.getAddressField("name");
-    AddressField addrFld = flagType.getAddressField("addr");
-    AddressField kindFld = flagType.getAddressField("kind");
-    CIntField originFld = new CIntField(flagType.getCIntegerField("origin"), 0);
+    AddressField typeFld = flagType.getAddressField("_type");
+    AddressField nameFld = flagType.getAddressField("_name");
+    AddressField addrFld = flagType.getAddressField("_addr");
+    CIntField flagsFld = new CIntField(flagType.getCIntegerField("_flags"), 0);
 
     long flagSize = flagType.getSize(); // sizeof(Flag)
 
@@ -856,9 +848,8 @@
       String type = CStringUtilities.getString(typeFld.getValue(flagAddr));
       String name = CStringUtilities.getString(nameFld.getValue(flagAddr));
       Address addr = addrFld.getValue(flagAddr);
-      String kind = CStringUtilities.getString(kindFld.getValue(flagAddr));
-      int origin = (int)originFld.getValue(flagAddr);
-      commandLineFlags[f] = new Flag(type, name, addr, kind, origin);
+      int flags = (int)flagsFld.getValue(flagAddr);
+      commandLineFlags[f] = new Flag(type, name, addr, flags);
       flagAddr = flagAddr.addOffsetTo(flagSize);
     }
 
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java	Mon Oct 21 14:08:09 2013 +0100
@@ -59,6 +59,7 @@
 
                     public boolean doObj(Oop oop) {
                         try {
+                            writeHeapRecordPrologue();
                             if (oop instanceof TypeArray) {
                                 writePrimitiveArray((TypeArray)oop);
                             } else if (oop instanceof ObjArray) {
@@ -97,6 +98,7 @@
                                 // not-a-Java-visible oop
                                 writeInternalObject(oop);
                             }
+                            writeHeapRecordEpilogue();
                         } catch (IOException exp) {
                             throw new RuntimeException(exp);
                         }
@@ -416,6 +418,12 @@
     protected void writeHeapFooter() throws IOException {
     }
 
+    protected void writeHeapRecordPrologue() throws IOException {
+    }
+
+    protected void writeHeapRecordEpilogue() throws IOException {
+    }
+
     // HeapVisitor, OopVisitor methods can't throw any non-runtime
     // exception. But, derived class write methods (which are called
     // from visitor callbacks) may throw IOException. Hence, we throw
--- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Thu Oct 03 19:13:12 2013 +0100
+++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Mon Oct 21 14:08:09 2013 +0100
@@ -44,7 +44,7 @@
  * WARNING: This format is still under development, and is subject to
  * change without notice.
  *
- * header    "JAVA PROFILE 1.0.1" (0-terminated)
+ * header    "JAVA PROFILE 1.0.1" or "JAVA PROFILE 1.0.2" (0-terminated)
  * u4        size of identifiers. Identifiers are used to represent
  *            UTF8 strings, objects, stack traces, etc. They usually
  *            have the same size as host pointers. For example, on
@@ -292,11 +292,34 @@
  *                          0x00000002: cpu sampling on/off
  *                u2        stack trace depth
  *
+ *
+ * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
+ * be generated as a sequence of heap dump segments. This sequence is
+ * terminated by an end record. The additional tags allowed by format
+ * "JAVA PROFILE 1.0.2" are:
+ *
+ * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
+ *
+ *               [heap dump sub-records]*
+ *               The same sub-record types allowed by HPROF_HEAP_DUMP
+ *
+ * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
+ *
  */
 
 public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
+
+    // The heap size threshold used to determine if segmented format
+    // ("JAVA PROFILE 1.0.2") should be used.
+    private static final long HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD = 2L * 0x40000000;
+
+    // The approximate size of a heap segment. Used to calculate when to create
+    // a new segment.
+    private static final long HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE = 1L * 0x40000000;
+
     // hprof binary file header
-    private static final String HPROF_HEADER = "JAVA PROFILE 1.0.1";
+    private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1";
+    private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2";
 
     // constants in enum HprofTag
     private static final int HPROF_UTF8             = 0x01;
@@ -312,6 +335,10 @@
     private static final int HPROF_CPU_SAMPLES      = 0x0D;
     private static final int HPROF_CONTROL_SETTINGS = 0x0E;
 
+    // 1.0.2 record types
+    private static final int HPROF_HEAP_DUMP_SEGMENT = 0x1C;
+    private static final int HPROF_HEAP_DUMP_END     = 0x2C;
+
     // Heap dump constants
     // constants in enum HprofGcTag
     private static final int HPROF_GC_ROOT_UNKNOWN       = 0xFF;
@@ -352,11 +379,9 @@
     private static final int JVM_SIGNATURE_ARRAY   = '[';
     private static final int JVM_SIGNATURE_CLASS   = 'L';
 
-
     public synchronized void write(String fileName) throws IOException {
         // open file stream and create buffered data output stream
-        FileOutputStream fos = new FileOutputStream(fileName);
-        FileChannel chn = fos.getChannel();
+        fos = new FileOutputStream(fileName);
         out = new DataOutputStream(new BufferedOutputStream(fos));
 
         VM vm = VM.getVM();
@@ -385,6 +410,9 @@
         FLOAT_SIZE = objectHeap.getFloatSize();
         DOUBLE_SIZE = objectHeap.getDoubleSize();
 
+        // Check weather we should dump the heap as segments
+        useSegmentedHeapDump = vm.getUniverse().heap().used() > HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD;
+
         // hprof bin format header
         writeFileHeader();
 
@@ -394,38 +422,87 @@
 
         // hprof UTF-8 symbols section
         writeSymbols();
+
         // HPROF_LOAD_CLASS records for all classes
         writeClasses();
 
-        // write heap data now
-        out.writeByte((byte)HPROF_HEAP_DUMP);
-        out.writeInt(0); // relative timestamp
-
-        // remember position of dump length, we will fixup
-        // length later - hprof format requires length.
-        out.flush();
-        long dumpStart = chn.position();
-
-        // write dummy length of 0 and we'll fix it later.
-        out.writeInt(0);
-
         // write CLASS_DUMP records
         writeClassDumpRecords();
 
         // this will write heap data into the buffer stream
         super.write();
 
+        // flush buffer stream.
+        out.flush();
+
+        // Fill in final length
+        fillInHeapRecordLength();
+
+        if (useSegmentedHeapDump) {
+            // Write heap segment-end record
+            out.writeByte((byte) HPROF_HEAP_DUMP_END);
+            out.writeInt(0);
+            out.writeInt(0);
+        }
+
         // flush buffer stream and throw it.
         out.flush();
         out = null;
 
+        // close the file stream
+        fos.close();
+    }
+
+    @Override
+    protected void writeHeapRecordPrologue() throws IOException {
+        if (currentSegmentStart == 0) {
+            // write heap data header, depending on heap size use segmented heap
+            // format
+            out.writeByte((byte) (useSegmentedHeapDump ? HPROF_HEAP_DUMP_SEGMENT
+                    : HPROF_HEAP_DUMP));
+            out.writeInt(0);
+
+            // remember position of dump length, we will fixup
+            // length later - hprof format requires length.
+            out.flush();
+            currentSegmentStart = fos.getChannel().position();
+
+            // write dummy length of 0 and we'll fix it later.
+            out.writeInt(0);
+        }
+    }
+
+    @Override
+    protected void writeHeapRecordEpilogue() throws IOException {
+        if (useSegmentedHeapDump) {
+            out.flush();
+            if ((fos.getChannel().position() - currentSegmentStart - 4) >= HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE) {
+                fillInHeapRecordLength();
+                currentSegmentStart = 0;
+            }
+        }
+    }
+
+    private void fillInHeapRecordLength() throws IOException {
+
         // now get current position to calculate length
-        long dumpEnd = chn.position();
+        long dumpEnd = fos.getChannel().position();
+
         // calculate length of heap data
-        int dumpLen = (int) (dumpEnd - dumpStart - 4);
+        long dumpLenLong = (dumpEnd - currentSegmentStart - 4L);
+
+        // Check length boundary, overflow could happen but is _very_ unlikely
+        if(dumpLenLong >= (4L * 0x40000000)){
+            throw new RuntimeException("Heap segment size overflow.");
+        }
+
+        // Save the current position
+        long currentPosition = fos.getChannel().position();
 
         // seek the position to write length
-        chn.position(dumpStart);
+        fos.getChannel().position(currentSegmentStart);
+
+        int dumpLen = (int) dumpLenLong;
 
         // write length as integer
         fos.write((dumpLen >>> 24) & 0xFF);
@@ -433,8 +510,8 @@
         fos.write((dumpLen >>> 8) & 0xFF);
         fos.write((dumpLen >>> 0) & 0xFF);
 
-        // close the file stream
-        fos.close();
+        //Reset to previous current position
+        fos.getChannel().position(currentPosition);
     }
 
     private void writeClassDumpRecords() throws IOException {
@@ -443,7 +520,9 @@
             sysDict.allClassesDo(new SystemDictionary.ClassVisitor() {
                             public void visit(Klass k) {
                                 try {
+                                    writeHeapRecordPrologue();
                                     writeClassDumpRecord(k);
+                                    writeHeapRecordEpilogue();
                                 } catch (IOException e) {
                                     throw new RuntimeException(e);
                                 }
@@ -884,7 +963,12 @@
     // writes hprof binary file header
     private void writeFileHeader() throws IOException {
         // version string
-        out.writeBytes(HPROF_HEADER);
+        if(useSegmentedHeapDump) {
+            out.writeBytes(HPROF_HEADER_1_0_2);
+        }
+        else {
+            out.writeBytes(HPROF_HEADER_1_0_1);
+        }
         out.writeByte((byte)'\0');
 
         // write identifier size. we use pointers as identifiers.
@@ -976,6 +1060,7 @@
     private static final int EMPTY_FRAME_DEPTH = -1;
 
     private DataOutputStream out;
+    private FileOutputStream fos;
     private Debugger dbg;
     private ObjectHeap objectHeap;
     private SymbolTable symTbl;
@@ -983,6 +1068,10 @@
     // oopSize of the debuggee
     private int OBJ_ID_SIZE;
 
+    // Added for hprof file format 1.0.2 support
+    private boolean useSegmentedHeapDump;
+    private long currentSegmentStart;
+
     private long BOOLEAN_BASE_OFFSET;
     private long BYTE_BASE_OFFSET;
     private long CHAR_BASE_OFFSET;
@@ -1005,6 +1094,7 @@
     private static class ClassData {
         int instSize;
         List fields;
+
         ClassData(int instSize, List fields) {
             this.instSize = instSize;
             this.fields = fields;
--- a/make/bsd/makefiles/fastdebug.make	Thu Oct 03 19:13:12 2013 +0100
+++ b/make/bsd/makefiles/fastdebug.make	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -59,5 +59,5 @@
 MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
 
 VERSION = fastdebug
-SYSDEFS += -DASSERT
+SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
 PICFLAGS = DEFAULT
--- a/make/bsd/makefiles/gcc.make	Thu Oct 03 19:13:12 2013 +0100
+++ b/make/bsd/makefiles/gcc.make	Mon Oct 21 14:08:09 2013 +0100
@@ -247,7 +247,7 @@
 
 ifeq ($(USE_CLANG), true)
   # However we need to clean the code up before we can unrestrictedly enable this option with Clang
-  WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
+  WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
   WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare
 # Not yet supported by clang in Xcode 4.6.2
 #  WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare
@@ -262,7 +262,7 @@
   # conversions which might affect the values. Only enable it in earlier versions.
   WARNING_FLAGS = -Wunused-function
   ifeq ($(USE_CLANG),)
-    WARNINGS_FLAGS += -Wconversion
+    WARNING_FLAGS += -Wconversion
   endif
 endif
 
--- a/make/hotspot_version	Thu Oct 03 19:13:12 2013 +0100
+++ b/make/hotspot_version	Mon Oct 21 14:08:09 2013 +0100
@@ -35,7 +35,7 @@
 
 HS_MAJOR_VER=25
 HS_MINOR_VER=0
-HS_BUILD_NUMBER=52
+HS_BUILD_NUMBER=54
 
 JDK_MAJOR_VER=1
 JDK_MINOR_VER=8
--- a/make/linux/makefiles/fastdebug.make	Thu Oct 03 19:13:12 2013 +0100
+++ b/make/linux/makefiles/fastdebug.make	Mon Oct 21 14:08:09 2013 +0100
@@ -59,5 +59,5 @@
 MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
 
 VERSION = optimized
-SYSDEFS += -DASSERT
+SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
 PICFLAGS = DEFAULT
--- a/make/linux/makefiles/gcc.make	Thu Oct 03 19:13:12 2013 +0100
+++ b/make/linux/makefiles/gcc.make	Mon Oct 21 14:08:09 2013 +0100
@@ -208,7 +208,7 @@
 
 ifeq ($(USE_CLANG), true)
   # However we need to clean the code up before we can unrestrictedly enable this option with Clang
-  WARNINGS_ARE_ERRORS += -Wno-unused-value -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
+  WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses
   WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-constant-out-of-range-compare -Wno-tautological-compare
   WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess
   WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
--- a/make/windows/makefiles/compile.make	Thu Oct 03 19:13:12 2013 +0100
+++ b/make/windows/makefiles/compile.make	Mon Oct 21 14:08:09 2013 +0100
@@ -44,6 +44,7 @@
 #   /GS       Inserts security stack checks in some functions (VS2005 default)
 #   /Oi       Use intrinsics (in /O2)
 #   /Od       Disable all optimizations
+#   /MP       Use multiple cores for compilation
 #
 # NOTE: Normally following any of the above with a '-' will turn off that flag
 #
@@ -208,6 +209,7 @@
 DEBUG_OPT_OPTION     = /Od
 GX_OPTION = /EHsc
 LD_FLAGS = /manifest $(LD_FLAGS)
+MP_FLAG = /MP
 # Manifest Tool - used in VS2005 and later to adjust manifests stored
 # as resources inside build artifacts.
 !if "x$(MT)" == "x"
@@ -222,6 +224,7 @@
 DEBUG_OPT_OPTION     = /Od
 GX_OPTION = /EHsc
 LD_FLAGS = /manifest $(LD_FLAGS)
+MP_FLAG = /MP
 # Manifest Tool - used in VS2005 and later to adjust manifests stored
 # as resources inside build artifacts.
 !if "x$(MT)" == "x"
@@ -238,6 +241,7 @@
 DEBUG_OPT_OPTION     = /Od
 GX_OPTION = /EHsc
 LD_FLAGS = /manifest $(LD_FLAGS)
+MP_FLAG = /MP
 # Manifest Tool - used in VS2005 and later to adjust manifests stored
 # as resources inside build artifacts.
 !if "x$(MT)" == "x"
@@ -250,6 +254,8 @@
 LD_FLAGS = $(SAFESEH_FLAG) $(LD_FLAGS)
 !endif
 
+CXX_FLAGS = $(CXX_FLAGS) $(MP_FLAG)
+
 # If NO_OPTIMIZATIONS is defined in the environment, turn everything off
 !ifdef NO_OPTIMIZATIONS
 PRODUCT_OPT_OPTION   = $(DEBUG_OPT_OPTION)
--- a/make/windows/makefiles/fastdebug.make	Thu Oct 03 19:13:12 2013 +0100
+++ b/make/windows/makefiles/fastdebug.make	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
--- a/make/windows/makefiles/sa.make	Thu Oct 03 19:13:12 2013 +0100
+++ b/make/windows/makefiles/sa.make	Mon Oct 21 14:08:09 2013 +0100
@@ -102,7 +102,10 @@
 !if "$(MT)" != ""
 SA_LD_FLAGS = -manifest $(SA_LD_FLAGS)
 !endif
-SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp
+
+SASRCFILES = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp \
+		$(AGENT_DIR)/src/share/native/sadis.c
+		            
 SA_LFLAGS = $(SA_LD_FLAGS) -nologo -subsystem:console -machine:$(MACHINE)
 !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
 SA_LFLAGS = $(SA_LFLAGS) -map -debug
@@ -111,22 +114,24 @@
 SA_LFLAGS = $(SAFESEH_FLAG) $(SA_LFLAGS)
 !endif
 
+SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
+
 # Note that we do not keep sawindbj.obj around as it would then
 # get included in the dumpbin command in build_vm_def.sh
 
 # In VS2005 or VS2008 the link command creates a .manifest file that we want
 # to insert into the linked artifact so we do not need to track it separately.
 # Use ";#2" for .dll and ";#1" for .exe in the MT command below:
-$(SAWINDBG): $(SASRCFILE)
+$(SAWINDBG): $(SASRCFILES)
 	set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
 	$(CXX) @<<
 	  -I"$(BootStrapDir)/include" -I"$(BootStrapDir)/include/win32" 
 	  -I"$(GENERATED)" $(SA_CFLAGS)
-	  $(SASRCFILE)
+	  $(SASRCFILES)
 	  -out:$*.obj
 <<
 	set LIB=$(SA_LIB)$(LIB)
-	$(LD) -out:$@ -DLL $*.obj dbgeng.lib $(SA_LFLAGS)
+	$(LD) -out:$@ -DLL sawindbg.obj sadis.obj dbgeng.lib $(SA_LFLAGS)
 !if "$(MT)" != ""
 	$(MT) -manifest $(@F).manifest -outputresource:$(@F);#2
 !endif
--- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -37,6 +37,9 @@
 #include "runtime/vframeArray.hpp"
 #include "utilities/macros.hpp"
 #include "vmreg_sparc.inline.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif
 
 // Implementation of StubAssembler
 
@@ -912,7 +915,7 @@
         Register tmp2 = G3_scratch;
         jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
 
-        Label not_already_dirty, restart, refill;
+        Label not_already_dirty, restart, refill, young_card;
 
 #ifdef _LP64
         __ srlx(addr, CardTableModRefBS::card_shift, addr);
@@ -924,9 +927,15 @@
         __ set(rs, cardtable);         // cardtable := <card table base>
         __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
 
+        __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
+
+        __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+        __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
+
         assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
         __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
 
+        __ bind(young_card);
         // We didn't take the branch, so we're already dirty: return.
         // Use return-from-leaf
         __ retl();
--- a/src/cpu/sparc/vm/frame_sparc.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/sparc/vm/frame_sparc.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -764,7 +764,7 @@
 #ifdef CC_INTERP
         *oop_result = istate->_oop_temp;
 #else
-        oop obj = (oop) at(interpreter_frame_oop_temp_offset);
+        oop obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
         *oop_result = obj;
 #endif // CC_INTERP
@@ -788,7 +788,7 @@
     switch(type) {
       case T_OBJECT:
       case T_ARRAY: {
-        oop obj = (oop)*tos_addr;
+        oop obj = cast_to_oop(*tos_addr);
         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
         *oop_result = obj;
         break;
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -3752,7 +3752,7 @@
 #define __ masm.
   address start = __ pc();
 
-  Label not_already_dirty, restart, refill;
+  Label not_already_dirty, restart, refill, young_card;
 
 #ifdef _LP64
   __ srlx(O0, CardTableModRefBS::card_shift, O0);
@@ -3763,9 +3763,15 @@
   __ set(addrlit, O1); // O1 := <card table base>
   __ ldub(O0, O1, O2); // O2 := [O0 + O1]
 
+  __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
+
+  __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+  __ ldub(O0, O1, O2); // O2 := [O0 + O1]
+
   assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
   __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
 
+  __ bind(young_card);
   // We didn't take the branch, so we're already dirty: return.
   // Use return-from-leaf
   __ retl();
--- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -121,6 +121,7 @@
 
 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register target, Register temp,
                                             bool for_compiler_entry) {
+  Label L_no_such_method;
   assert(method == G5_method, "interpreter calling convention");
   assert_different_registers(method, target, temp);
 
@@ -133,6 +134,9 @@
     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
     __ ld(interp_only, temp);
     __ cmp_and_br_short(temp, 0, Assembler::zero, Assembler::pt, run_compiled_code);
+    // Null method test is replicated below in compiled case,
+    // it might be able to address across the verify_thread()
+    __ br_null_short(G5_method, Assembler::pn, L_no_such_method);
     __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), target);
     __ jmp(target, 0);
     __ delayed()->nop();
@@ -141,11 +145,19 @@
     // it doesn't matter, since this is interpreter code.
   }
 
+  // Compiled case, either static or fall-through from runtime conditional
+  __ br_null_short(G5_method, Assembler::pn, L_no_such_method);
+
   const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
                                                      Method::from_interpreted_offset();
   __ ld_ptr(G5_method, in_bytes(entry_offset), target);
   __ jmp(target, 0);
   __ delayed()->nop();
+
+  __ bind(L_no_such_method);
+  AddressLiteral ame(StubRoutines::throw_AbstractMethodError_entry());
+  __ jump_to(ame, temp);
+  __ delayed()->nop();
 }
 
 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
--- a/src/cpu/sparc/vm/nativeInst_sparc.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -358,7 +358,7 @@
         oop_Relocation *r = iter.oop_reloc();
         if (oop_addr == NULL) {
           oop_addr = r->oop_addr();
-          *oop_addr = (oop)x;
+          *oop_addr = cast_to_oop(x);
         } else {
           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
         }
@@ -478,7 +478,7 @@
         oop_Relocation *r = iter.oop_reloc();
         if (oop_addr == NULL) {
           oop_addr = r->oop_addr();
-          *oop_addr = (oop)x;
+          *oop_addr = cast_to_oop(x);
         } else {
           assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
         }
--- a/src/cpu/sparc/vm/sparc.ad	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/sparc/vm/sparc.ad	Mon Oct 21 14:08:09 2013 +0100
@@ -2018,6 +2018,15 @@
   return L7_REGP_mask();
 }
 
+const RegMask Matcher::mathExactI_result_proj_mask() {
+  return G1_REGI_mask();
+}
+
+const RegMask Matcher::mathExactI_flags_proj_mask() {
+  return INT_FLAGS_mask();
+}
+
+
 %}
 
 
@@ -4245,12 +4254,16 @@
     greater_equal(0xB);
     less_equal(0x2);
     greater(0xA);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
 // Comparison Op, unsigned
 operand cmpOpU() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "u" %}
   interface(COND_INTER) %{
@@ -4260,12 +4273,16 @@
     greater_equal(0xD);
     less_equal(0x4);
     greater(0xC);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
 // Comparison Op, pointer (same as unsigned)
 operand cmpOpP() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "p" %}
   interface(COND_INTER) %{
@@ -4275,12 +4292,16 @@
     greater_equal(0xD);
     less_equal(0x4);
     greater(0xC);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
 // Comparison Op, branch-register encoding
 operand cmpOp_reg() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "" %}
   interface(COND_INTER) %{
@@ -4290,12 +4311,16 @@
     greater_equal(0x7);
     less_equal   (0x2);
     greater      (0x6);
+    overflow(0x7); // not supported
+    no_overflow(0xF); // not supported
   %}
 %}
 
 // Comparison Code, floating, unordered same as less
 operand cmpOpF() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "fl" %}
   interface(COND_INTER) %{
@@ -4305,12 +4330,17 @@
     greater_equal(0xB);
     less_equal(0xE);
     greater(0x6);
+
+    overflow(0x7); // not supported
+    no_overflow(0xF); // not supported
   %}
 %}
 
 // Used by long compare
 operand cmpOp_commute() %{
   match(Bool);
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
 
   format %{ "" %}
   interface(COND_INTER) %{
@@ -4320,6 +4350,8 @@
     greater_equal(0x2);
     less_equal(0xB);
     greater(0x3);
+    overflow(0x7);
+    no_overflow(0xF);
   %}
 %}
 
--- a/src/cpu/x86/vm/assembler_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/x86/vm/assembler_x86.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -4769,7 +4769,7 @@
 }
 
 void Assembler::adcq(Register dst, Register src) {
-  (int) prefixq_and_encode(dst->encoding(), src->encoding());
+  (void) prefixq_and_encode(dst->encoding(), src->encoding());
   emit_arith(0x13, 0xC0, dst, src);
 }
 
@@ -4824,7 +4824,7 @@
 }
 
 void Assembler::andq(Register dst, Register src) {
-  (int) prefixq_and_encode(dst->encoding(), src->encoding());
+  (void) prefixq_and_encode(dst->encoding(), src->encoding());
   emit_arith(0x23, 0xC0, dst, src);
 }
 
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -38,6 +38,9 @@
 #include "runtime/vframeArray.hpp"
 #include "utilities/macros.hpp"
 #include "vmreg_x86.inline.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#endif
 
 
 // Implementation of StubAssembler
@@ -1753,13 +1756,17 @@
         __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
 #endif
 
-        __ cmpb(Address(card_addr, 0), 0);
+        __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
+        __ jcc(Assembler::equal, done);
+
+        __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+        __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
         __ jcc(Assembler::equal, done);
 
         // storing region crossing non-NULL, card is clean.
         // dirty card and log.
 
-        __ movb(Address(card_addr, 0), 0);
+        __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
 
         __ cmpl(queue_index, 0);
         __ jcc(Assembler::equal, runtime);
--- a/src/cpu/x86/vm/frame_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/x86/vm/frame_x86.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -639,7 +639,7 @@
 #ifdef CC_INTERP
         obj = istate->_oop_temp;
 #else
-        obj = (oop) at(interpreter_frame_oop_temp_offset);
+        obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
 #endif // CC_INTERP
       } else {
         oop* obj_p = (oop*)tos_addr;
--- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -3389,13 +3389,18 @@
   const Register card_addr = tmp;
   lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
 #endif
-  cmpb(Address(card_addr, 0), 0);
+  cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
   jcc(Assembler::equal, done);
 
+  membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
+  cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
+  jcc(Assembler::equal, done);
+
+
   // storing a region crossing, non-NULL oop, card is clean.
   // dirty card and log.
 
-  movb(Address(card_addr, 0), 0);
+  movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
 
   cmpl(queue_index, 0);
   jcc(Assembler::equal, runtime);
--- a/src/cpu/x86/vm/methodHandles_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -114,6 +114,11 @@
 void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
                                             bool for_compiler_entry) {
   assert(method == rbx, "interpreter calling convention");
+
+   Label L_no_such_method;
+   __ testptr(rbx, rbx);
+   __ jcc(Assembler::zero, L_no_such_method);
+
   __ verify_method_ptr(method);
 
   if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
@@ -138,6 +143,9 @@
   const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
                                                      Method::from_interpreted_offset();
   __ jmp(Address(method, entry_offset));
+
+  __ bind(L_no_such_method);
+  __ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
 }
 
 void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
@@ -475,7 +483,7 @@
   const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
   tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
                 adaptername, mh_reg_name,
-                mh, entry_sp);
+                (void *)mh, entry_sp);
 
   if (Verbose) {
     tty->print_cr("Registers:");
--- a/src/cpu/x86/vm/x86_32.ad	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/x86/vm/x86_32.ad	Mon Oct 21 14:08:09 2013 +0100
@@ -351,7 +351,7 @@
         int format) {
 #ifdef ASSERT
   if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
-    assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
+    assert(cast_to_oop(d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
   }
 #endif
   cbuf.relocate(cbuf.insts_mark(), rspec, format);
@@ -1534,6 +1534,14 @@
   return EBP_REG_mask();
 }
 
+const RegMask Matcher::mathExactI_result_proj_mask() {
+  return EAX_REG_mask();
+}
+
+const RegMask Matcher::mathExactI_flags_proj_mask() {
+  return INT_FLAGS_mask();
+}
+
 // Returns true if the high 32 bits of the value is known to be zero.
 bool is_operand_hi32_zero(Node* n) {
   int opc = n->Opcode();
@@ -4922,6 +4930,8 @@
     greater_equal(0xD, "ge");
     less_equal(0xE, "le");
     greater(0xF, "g");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4939,6 +4949,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4957,6 +4969,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4974,6 +4988,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4981,6 +4997,8 @@
 operand cmpOp_fcmov() %{
   match(Bool);
 
+  predicate(n->as_Bool()->_test._test != BoolTest::overflow &&
+            n->as_Bool()->_test._test != BoolTest::no_overflow);
   format %{ "" %}
   interface(COND_INTER) %{
     equal        (0x0C8);
@@ -4989,6 +5007,8 @@
     greater_equal(0x1C0);
     less_equal   (0x0D0);
     greater      (0x1D0);
+    overflow(0x0, "o"); // not really supported by the instruction
+    no_overflow(0x1, "no"); // not really supported by the instruction
   %}
 %}
 
@@ -5004,6 +5024,8 @@
     greater_equal(0xE, "le");
     less_equal(0xD, "ge");
     greater(0xC, "l");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -7496,6 +7518,31 @@
 
 //----------Arithmetic Instructions--------------------------------------------
 //----------Addition Instructions----------------------------------------------
+
+instruct addExactI_rReg(eAXRegI dst, rRegI src, eFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "ADD    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addExactI_rReg_imm(eAXRegI dst, immI src, eFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "ADD    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
 // Integer Addition Instructions
 instruct addI_eReg(rRegI dst, rRegI src, eFlagsReg cr) %{
   match(Set dst (AddI dst src));
--- a/src/cpu/x86/vm/x86_64.ad	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/cpu/x86/vm/x86_64.ad	Mon Oct 21 14:08:09 2013 +0100
@@ -529,7 +529,7 @@
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
     assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop");
-    assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
+    assert(cast_to_oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
   }
 #endif
   cbuf.relocate(cbuf.insts_mark(), rspec, format);
@@ -556,7 +556,7 @@
   if (rspec.reloc()->type() == relocInfo::oop_type &&
       d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
     assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop");
-    assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
+    assert(cast_to_oop(d64)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d64)->is_scavengable()),
            "cannot embed scavengable oops in code");
   }
 #endif
@@ -1649,6 +1649,14 @@
   return PTR_RBP_REG_mask();
 }
 
+const RegMask Matcher::mathExactI_result_proj_mask() {
+  return INT_RAX_REG_mask();
+}
+
+const RegMask Matcher::mathExactI_flags_proj_mask() {
+  return INT_FLAGS_mask();
+}
+
 %}
 
 //----------ENCODING BLOCK-----------------------------------------------------
@@ -4133,6 +4141,8 @@
     greater_equal(0xD, "ge");
     less_equal(0xE, "le");
     greater(0xF, "g");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4151,6 +4161,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4170,6 +4182,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -4187,6 +4201,8 @@
     greater_equal(0x3, "nb");
     less_equal(0x6, "be");
     greater(0x7, "nbe");
+    overflow(0x0, "o");
+    no_overflow(0x1, "no");
   %}
 %}
 
@@ -6922,6 +6938,30 @@
 //----------Arithmetic Instructions--------------------------------------------
 //----------Addition Instructions----------------------------------------------
 
+instruct addExactI_rReg(rax_RegI dst, rRegI src, rFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "addl    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$Register);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
+instruct addExactI_rReg_imm(rax_RegI dst, immI src, rFlagsReg cr)
+%{
+  match(AddExactI dst src);
+  effect(DEF cr);
+
+  format %{ "addl    $dst, $src\t# addExact int" %}
+  ins_encode %{
+    __ addl($dst$$Register, $src$$constant);
+  %}
+  ins_pipe(ialu_reg_reg);
+%}
+
 instruct addI_rReg(rRegI dst, rRegI src, rFlagsReg cr)
 %{
   match(Set dst (AddI dst src));
--- a/src/os/bsd/vm/osThread_bsd.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/os/bsd/vm/osThread_bsd.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -42,7 +42,7 @@
 #ifdef __APPLE__
   typedef thread_t thread_id_t;
 #else
-  typedef pthread_t thread_id_t;
+  typedef pid_t thread_id_t;
 #endif
 
   // _pthread_id is the pthread id, which is used by library calls
--- a/src/os/bsd/vm/os_bsd.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/os/bsd/vm/os_bsd.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -100,6 +100,7 @@
 # include <stdint.h>
 # include <inttypes.h>
 # include <sys/ioctl.h>
+# include <sys/syscall.h>
 
 #if defined(__FreeBSD__) || defined(__NetBSD__)
 # include <elf.h>
@@ -152,6 +153,7 @@
 // utility functions
 
 static int SR_initialize();
+static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
 
 julong os::available_memory() {
   return Bsd::available_memory();
@@ -247,7 +249,17 @@
    * since it returns a 64 bit value)
    */
   mib[0] = CTL_HW;
+
+#if defined (HW_MEMSIZE) // Apple
   mib[1] = HW_MEMSIZE;
+#elif defined(HW_PHYSMEM) // Most of BSD
+  mib[1] = HW_PHYSMEM;
+#elif defined(HW_REALMEM) // Old FreeBSD
+  mib[1] = HW_REALMEM;
+#else
+  #error No ways to get physmem
+#endif
+
   len = sizeof(mem_val);
   if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) {
        assert(len == sizeof(mem_val), "unexpected data size");
@@ -679,18 +691,12 @@
     return NULL;
   }
 
+  osthread->set_thread_id(os::Bsd::gettid());
+
 #ifdef __APPLE__
-  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
-  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
-  guarantee(thread_id != 0, "thread id missing from pthreads");
-  osthread->set_thread_id(thread_id);
-
-  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
+  uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
   guarantee(unique_thread_id != 0, "unique thread id was not found");
   osthread->set_unique_thread_id(unique_thread_id);
-#else
-  // thread_id is pthread_id on BSD
-  osthread->set_thread_id(::pthread_self());
 #endif
   // initialize signal mask for this thread
   os::Bsd::hotspot_sigmask(thread);
@@ -847,18 +853,13 @@
     return false;
   }
 
+  osthread->set_thread_id(os::Bsd::gettid());
+
   // Store pthread info into the OSThread
 #ifdef __APPLE__
-  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
-  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
-  guarantee(thread_id != 0, "just checking");
-  osthread->set_thread_id(thread_id);
-
-  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
+  uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
   guarantee(unique_thread_id != 0, "just checking");
   osthread->set_unique_thread_id(unique_thread_id);
-#else
-  osthread->set_thread_id(::pthread_self());
 #endif
   osthread->set_pthread_id(::pthread_self());
 
@@ -1125,6 +1126,30 @@
   return n;
 }
 
+// Information of current thread in variety of formats
+pid_t os::Bsd::gettid() {
+  int retval = -1;
+
+#ifdef __APPLE__ //XNU kernel
+  // despite the fact mach port is actually not a thread id use it
+  // instead of syscall(SYS_thread_selfid) as it certainly fits to u4
+  retval = ::pthread_mach_thread_np(::pthread_self());
+  guarantee(retval != 0, "just checking");
+  return retval;
+
+#elif __FreeBSD__
+  retval = syscall(SYS_thr_self);
+#elif __OpenBSD__
+  retval = syscall(SYS_getthrid);
+#elif __NetBSD__
+  retval = (pid_t) syscall(SYS__lwp_self);
+#endif
+
+  if (retval == -1) {
+    return getpid();
+  }
+}
+
 intx os::current_thread_id() {
 #ifdef __APPLE__
   return (intx)::pthread_mach_thread_np(::pthread_self());
@@ -1132,6 +1157,7 @@
   return (intx)::pthread_self();
 #endif
 }
+
 int os::current_process_id() {
 
   // Under the old bsd thread library, bsd gives each thread
@@ -1904,7 +1930,7 @@
     bool timedwait(unsigned int sec, int nsec);
   private:
     jlong currenttime() const;
-    semaphore_t _semaphore;
+    os_semaphore_t _semaphore;
 };
 
 Semaphore::Semaphore() : _semaphore(0) {
@@ -1972,7 +1998,7 @@
 
 bool Semaphore::timedwait(unsigned int sec, int nsec) {
   struct timespec ts;
-  jlong endtime = unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
+  unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
 
   while (1) {
     int result = sem_timedwait(&_semaphore, &ts);
--- a/src/os/bsd/vm/os_bsd.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/os/bsd/vm/os_bsd.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -84,6 +84,7 @@
   static void hotspot_sigmask(Thread* thread);
 
   static bool is_initial_thread(void);
+  static pid_t gettid();
 
   static int page_size(void)                                        { return _page_size; }
   static void set_page_size(int val)                                { _page_size = val; }
--- a/src/os/linux/vm/globals_linux.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/os/linux/vm/globals_linux.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -53,7 +53,7 @@
 // Defines Linux-specific default values. The flags are available on all
 // platforms, but they may have different default values on other platforms.
 //
-define_pd_global(bool, UseLargePages, true);
+define_pd_global(bool, UseLargePages, false);
 define_pd_global(bool, UseLargePagesIndividualAllocation, false);
 define_pd_global(bool, UseOSErrorReporting, false);
 define_pd_global(bool, UseThreadPriorities, true) ;
--- a/src/os/linux/vm/os_linux.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/os/linux/vm/os_linux.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -3361,13 +3361,15 @@
   if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
       FLAG_IS_DEFAULT(UseSHM) &&
       FLAG_IS_DEFAULT(UseTransparentHugePages)) {
-    // If UseLargePages is specified on the command line try all methods,
-    // if it's default, then try only UseTransparentHugePages.
-    if (FLAG_IS_DEFAULT(UseLargePages)) {
-      UseTransparentHugePages = true;
-    } else {
-      UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
-    }
+
+    // The type of large pages has not been specified by the user.
+
+    // Try UseHugeTLBFS and then UseSHM.
+    UseHugeTLBFS = UseSHM = true;
+
+    // Don't try UseTransparentHugePages since there are known
+    // performance issues with it turned on. This might change in the future.
+    UseTransparentHugePages = false;
   }
 
   if (UseTransparentHugePages) {
@@ -3393,9 +3395,19 @@
 }
 
 void os::large_page_init() {
-  if (!UseLargePages) {
+  if (!UseLargePages &&
+      !UseTransparentHugePages &&
+      !UseHugeTLBFS &&
+      !UseSHM) {
+    // Not using large pages.
+    return;
+  }
+
+  if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
+    // The user explicitly turned off large pages.
+    // Ignore the rest of the large pages flags.
+    UseTransparentHugePages = false;
     UseHugeTLBFS = false;
-    UseTransparentHugePages = false;
     UseSHM = false;
     return;
   }
@@ -4839,6 +4851,10 @@
 
   Linux::capture_initial_stack(JavaThread::stack_size_at_create());
 
+#if defined(IA32)
+  workaround_expand_exec_shield_cs_limit();
+#endif
+
   Linux::libpthread_init();
   if (PrintMiscellaneous && (Verbose || WizardMode)) {
      tty->print_cr("[HotSpot is running with %s, %s(%s)]\n",
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -876,3 +876,46 @@
 #endif
 }
 #endif
+
+
+/*
+ * IA32 only: execute code at a high address in case buggy NX emulation is present. I.e. avoid CS limit
+ * updates (JDK-8023956).
+ */
+void os::workaround_expand_exec_shield_cs_limit() {
+#if defined(IA32)
+  size_t page_size = os::vm_page_size();
+  /*
+   * Take the highest VA the OS will give us and exec
+   *
+   * Although using -(pagesz) as mmap hint works on newer kernel as you would
+   * think, older variants affected by this work-around don't (search forward only).
+   *
+   * On the affected distributions, we understand the memory layout to be:
+   *
+   *   TASK_LIMIT= 3G, main stack base close to TASK_LIMT.
+   *
+   * A few pages south main stack will do it.
+   *
+   * If we are embedded in an app other than launcher (initial != main stack),
+   * we don't have much control or understanding of the address space, just let it slide.
+   */
+  char* hint = (char*) (Linux::initial_thread_stack_bottom() -
+                        ((StackYellowPages + StackRedPages + 1) * page_size));
+  char* codebuf = os::reserve_memory(page_size, hint);
+  if ( (codebuf == NULL) || (!os::commit_memory(codebuf, page_size, true)) ) {
+    return; // No matter, we tried, best effort.
+  }
+  if (PrintMiscellaneous && (Verbose || WizardMode)) {
+     tty->print_cr("[CS limit NX emulation work-around, exec code at: %p]", codebuf);
+  }
+
+  // Some code to exec: the 'ret' instruction
+  codebuf[0] = 0xC3;
+
+  // Call the code in the codebuf
+  __asm__ volatile("call *%0" : : "r"(codebuf));
+
+  // keep the page mapped so CS limit isn't reduced.
+#endif
+}
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -36,4 +36,17 @@
   // Note: Currently only used in 64 bit Windows implementations
   static bool register_code_area(char *low, char *high) { return true; }
 
+  /*
+   * Work-around for broken NX emulation using CS limit, Red Hat patch "Exec-Shield"
+   * (IA32 only).
+   *
+   * Map and execute at a high VA to prevent CS lazy updates race with SMP MM
+   * invalidation.Further code generation by the JVM will no longer cause CS limit
+   * updates.
+   *
+   * Affects IA32: RHEL 5 & 6, Ubuntu 10.04 (LTS), 10.10, 11.04, 11.10, 12.04.
+   * @see JDK-8023956
+   */
+  static void workaround_expand_exec_shield_cs_limit();
+
 #endif // OS_CPU_LINUX_X86_VM_OS_LINUX_X86_HPP
--- a/src/share/vm/adlc/adlparse.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/adlc/adlparse.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -3395,12 +3395,16 @@
   char *greater_equal;
   char *less_equal;
   char *greater;
+  char *overflow;
+  char *no_overflow;
   const char *equal_format = "eq";
   const char *not_equal_format = "ne";
   const char *less_format = "lt";
   const char *greater_equal_format = "ge";
   const char *less_equal_format = "le";
   const char *greater_format = "gt";
+  const char *overflow_format = "o";
+  const char *no_overflow_format = "no";
 
   if (_curchar != '%') {
     parse_err(SYNERR, "Missing '%%{' for 'cond_interface' block.\n");
@@ -3437,6 +3441,12 @@
     else if ( strcmp(field,"greater") == 0 ) {
       greater = interface_field_parse(&greater_format);
     }
+    else if ( strcmp(field,"overflow") == 0 ) {
+      overflow = interface_field_parse(&overflow_format);
+    }
+    else if ( strcmp(field,"no_overflow") == 0 ) {
+      no_overflow = interface_field_parse(&no_overflow_format);
+    }
     else {
       parse_err(SYNERR, "Expected keyword, base|index|scale|disp,  or '%%}' ending interface.\n");
       return NULL;
@@ -3455,7 +3465,9 @@
                                        less,          less_format,
                                        greater_equal, greater_equal_format,
                                        less_equal,    less_equal_format,
-                                       greater,       greater_format);
+                                       greater,       greater_format,
+                                       overflow,      overflow_format,
+                                       no_overflow,   no_overflow_format);
   return inter;
 }
 
--- a/src/share/vm/adlc/archDesc.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/adlc/archDesc.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1192,6 +1192,8 @@
          || strcmp(idealName,"CmpF") == 0
          || strcmp(idealName,"FastLock") == 0
          || strcmp(idealName,"FastUnlock") == 0
+         || strcmp(idealName,"AddExactI") == 0
+         || strcmp(idealName,"FlagsProj") == 0
          || strcmp(idealName,"Bool") == 0
          || strcmp(idealName,"Binary") == 0 ) {
       // Removed ConI from the must_clone list.  CPUs that cannot use
--- a/src/share/vm/adlc/formssel.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/adlc/formssel.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -2757,14 +2757,18 @@
                              const char* less,          const char* less_format,
                              const char* greater_equal, const char* greater_equal_format,
                              const char* less_equal,    const char* less_equal_format,
-                             const char* greater,       const char* greater_format)
+                             const char* greater,       const char* greater_format,
+                             const char* overflow,      const char* overflow_format,
+                             const char* no_overflow,   const char* no_overflow_format)
   : Interface("COND_INTER"),
     _equal(equal),                 _equal_format(equal_format),
     _not_equal(not_equal),         _not_equal_format(not_equal_format),
     _less(less),                   _less_format(less_format),
     _greater_equal(greater_equal), _greater_equal_format(greater_equal_format),
     _less_equal(less_equal),       _less_equal_format(less_equal_format),
-    _greater(greater),             _greater_format(greater_format) {
+    _greater(greater),             _greater_format(greater_format),
+    _overflow(overflow),           _overflow_format(overflow_format),
+    _no_overflow(no_overflow),     _no_overflow_format(no_overflow_format) {
 }
 CondInterface::~CondInterface() {
   // not owner of any character arrays
@@ -2777,12 +2781,14 @@
 // Write info to output files
 void CondInterface::output(FILE *fp) {
   Interface::output(fp);
-  if ( _equal  != NULL )     fprintf(fp," equal       == %s\n", _equal);
-  if ( _not_equal  != NULL ) fprintf(fp," not_equal   == %s\n", _not_equal);
-  if ( _less  != NULL )      fprintf(fp," less        == %s\n", _less);
-  if ( _greater_equal  != NULL ) fprintf(fp," greater_equal   == %s\n", _greater_equal);
-  if ( _less_equal  != NULL ) fprintf(fp," less_equal  == %s\n", _less_equal);
-  if ( _greater  != NULL )    fprintf(fp," greater     == %s\n", _greater);
+  if ( _equal  != NULL )     fprintf(fp," equal        == %s\n", _equal);
+  if ( _not_equal  != NULL ) fprintf(fp," not_equal    == %s\n", _not_equal);
+  if ( _less  != NULL )      fprintf(fp," less         == %s\n", _less);
+  if ( _greater_equal  != NULL ) fprintf(fp," greater_equal    == %s\n", _greater_equal);
+  if ( _less_equal  != NULL ) fprintf(fp," less_equal   == %s\n", _less_equal);
+  if ( _greater  != NULL )    fprintf(fp," greater      == %s\n", _greater);
+  if ( _overflow != NULL )    fprintf(fp," overflow     == %s\n", _overflow);
+  if ( _no_overflow != NULL ) fprintf(fp," no_overflow  == %s\n", _no_overflow);
   // fprintf(fp,"\n");
 }
 
--- a/src/share/vm/adlc/formssel.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/adlc/formssel.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -798,12 +798,16 @@
   const char *_greater_equal;
   const char *_less_equal;
   const char *_greater;
+  const char *_overflow;
+  const char *_no_overflow;
   const char *_equal_format;
   const char *_not_equal_format;
   const char *_less_format;
   const char *_greater_equal_format;
   const char *_less_equal_format;
   const char *_greater_format;
+  const char *_overflow_format;
+  const char *_no_overflow_format;
 
   // Public Methods
   CondInterface(const char* equal,         const char* equal_format,
@@ -811,7 +815,9 @@
                 const char* less,          const char* less_format,
                 const char* greater_equal, const char* greater_equal_format,
                 const char* less_equal,    const char* less_equal_format,
-                const char* greater,       const char* greater_format);
+                const char* greater,       const char* greater_format,
+                const char* overflow,      const char* overflow_format,
+                const char* no_overflow,   const char* no_overflow_format);
   ~CondInterface();
 
   void dump();
--- a/src/share/vm/adlc/output_h.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/adlc/output_h.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -388,6 +388,8 @@
   fprintf(fp, "  else if( _c%d == BoolTest::ge ) st->print(\"%s\");\n",i,cond->_greater_equal_format);
   fprintf(fp, "  else if( _c%d == BoolTest::lt ) st->print(\"%s\");\n",i,cond->_less_format);
   fprintf(fp, "  else if( _c%d == BoolTest::gt ) st->print(\"%s\");\n",i,cond->_greater_format);
+  fprintf(fp, "  else if( _c%d == BoolTest::overflow ) st->print(\"%s\");\n",i,cond->_overflow_format);
+  fprintf(fp, "  else if( _c%d == BoolTest::no_overflow ) st->print(\"%s\");\n",i,cond->_no_overflow_format);
 }
 
 // Output code that dumps constant values, increment "i" if type is constant
@@ -1208,6 +1210,8 @@
       fprintf(fp,"    case  BoolTest::ne : return not_equal();\n");
       fprintf(fp,"    case  BoolTest::le : return less_equal();\n");
       fprintf(fp,"    case  BoolTest::ge : return greater_equal();\n");
+      fprintf(fp,"    case  BoolTest::overflow : return overflow();\n");
+      fprintf(fp,"    case  BoolTest::no_overflow: return no_overflow();\n");
       fprintf(fp,"    default : ShouldNotReachHere(); return 0;\n");
       fprintf(fp,"    }\n");
       fprintf(fp,"  };\n");
@@ -1373,6 +1377,14 @@
         if( greater != NULL ) {
           define_oper_interface(fp, *oper, _globalNames, "greater", greater);
         }
+        const char *overflow = cInterface->_overflow;
+        if( overflow != NULL ) {
+          define_oper_interface(fp, *oper, _globalNames, "overflow", overflow);
+        }
+        const char *no_overflow = cInterface->_no_overflow;
+        if( no_overflow != NULL ) {
+          define_oper_interface(fp, *oper, _globalNames, "no_overflow", no_overflow);
+        }
       } // end Conditional Interface
       // Check if it is a Constant Interface
       else if (oper->_interface->is_ConstInterface() != NULL ) {
--- a/src/share/vm/c1/c1_Runtime1.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/c1/c1_Runtime1.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1019,7 +1019,7 @@
               n_copy->set_data((intx) (load_klass()));
             } else {
               assert(mirror() != NULL, "klass not set");
-              n_copy->set_data((intx) (mirror()));
+              n_copy->set_data(cast_from_oop<intx>(mirror()));
             }
 
             if (TracePatching) {
@@ -1031,7 +1031,7 @@
           assert(n_copy->data() == 0 ||
                  n_copy->data() == (intptr_t)Universe::non_oop_word(),
                  "illegal init value");
-          n_copy->set_data((intx) (appendix()));
+          n_copy->set_data(cast_from_oop<intx>(appendix()));
 
           if (TracePatching) {
             Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
@@ -1078,14 +1078,17 @@
           // replace instructions
           // first replace the tail, then the call
 #ifdef ARM
-          if(load_klass_or_mirror_patch_id && !VM_Version::supports_movw()) {
+          if((load_klass_or_mirror_patch_id ||
+              stub_id == Runtime1::load_appendix_patching_id) &&
+             !VM_Version::supports_movw()) {
             nmethod* nm = CodeCache::find_nmethod(instr_pc);
             address addr = NULL;
             assert(nm != NULL, "invalid nmethod_pc");
             RelocIterator mds(nm, copy_buff, copy_buff + 1);
             while (mds.next()) {
               if (mds.type() == relocInfo::oop_type) {
-                assert(stub_id == Runtime1::load_mirror_patching_id, "wrong stub id");
+                assert(stub_id == Runtime1::load_mirror_patching_id ||
+                       stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
                 oop_Relocation* r = mds.oop_reloc();
                 addr = (address)r->oop_addr();
                 break;
--- a/src/share/vm/classfile/classFileParser.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/classFileParser.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1787,7 +1787,7 @@
     if (_location != _in_method)  break;  // only allow for methods
     if (!privileged)              break;  // only allow in privileged code
     return _method_LambdaForm_Hidden;
-  case vmSymbols::VM_SYMBOL_ENUM_NAME(sun_invoke_Stable_signature):
+  case vmSymbols::VM_SYMBOL_ENUM_NAME(java_lang_invoke_Stable_signature):
     if (_location != _in_field)   break;  // only allow for fields
     if (!privileged)              break;  // only allow in privileged code
     return _field_Stable;
@@ -2545,7 +2545,9 @@
       if (method->is_final()) {
         *has_final_method = true;
       }
-      if (is_interface && !method->is_abstract() && !method->is_static()) {
+      if (is_interface && !(*has_default_methods)
+        && !method->is_abstract() && !method->is_static()
+        && !method->is_private()) {
         // default method
         *has_default_methods = true;
       }
@@ -4078,8 +4080,7 @@
 
     // Generate any default methods - default methods are interface methods
     // that have a default implementation.  This is new with Lambda project.
-    if (has_default_methods && !access_flags.is_interface() &&
-        local_interfaces->length() > 0) {
+    if (has_default_methods && !access_flags.is_interface() ) {
       DefaultMethods::generate_default_methods(
           this_klass(), &all_mirandas, CHECK_(nullHandle));
     }
--- a/src/share/vm/classfile/classLoaderData.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/classLoaderData.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -261,7 +261,7 @@
                   k,
                   k->external_name(),
                   k->class_loader_data(),
-                  k->class_loader(),
+                  (void *)k->class_loader(),
                   loader_name());
   }
 }
@@ -297,7 +297,7 @@
   if (TraceClassLoaderData) {
     ResourceMark rm;
     tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this);
-    tty->print(" for instance "PTR_FORMAT" of %s", class_loader(),
+    tty->print(" for instance "PTR_FORMAT" of %s", (void *)class_loader(),
                loader_name());
     if (is_anonymous()) {
       tty->print(" for anonymous class  "PTR_FORMAT " ", _klasses);
@@ -458,7 +458,7 @@
 void ClassLoaderData::dump(outputStream * const out) {
   ResourceMark rm;
   out->print("ClassLoaderData CLD: "PTR_FORMAT", loader: "PTR_FORMAT", loader_klass: "PTR_FORMAT" %s {",
-      this, class_loader(),
+      this, (void *)class_loader(),
       class_loader() != NULL ? class_loader()->klass() : NULL, loader_name());
   if (claimed()) out->print(" claimed ");
   if (is_unloading()) out->print(" unloading ");
@@ -553,7 +553,7 @@
         ResourceMark rm;
         tty->print("[ClassLoaderData: ");
         tty->print("create class loader data "PTR_FORMAT, cld);
-        tty->print(" for instance "PTR_FORMAT" of %s", cld->class_loader(),
+        tty->print(" for instance "PTR_FORMAT" of %s", (void *)cld->class_loader(),
                    cld->loader_name());
         tty->print_cr("]");
       }
--- a/src/share/vm/classfile/defaultMethods.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/defaultMethods.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -325,6 +325,7 @@
 
   Method* _selected_target;  // Filled in later, if a unique target exists
   Symbol* _exception_message; // If no unique target is found
+  Symbol* _exception_name;    // If no unique target is found
 
   bool contains_method(Method* method) {
     int* lookup = _member_index.get(method);
@@ -344,13 +345,12 @@
   }
 
   Symbol* generate_no_defaults_message(TRAPS) const;
-  Symbol* generate_abstract_method_message(Method* method, TRAPS) const;
   Symbol* generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const;
 
  public:
 
   MethodFamily()
-      : _selected_target(NULL), _exception_message(NULL) {}
+      : _selected_target(NULL), _exception_message(NULL), _exception_name(NULL) {}
 
   void set_target_if_empty(Method* m) {
     if (_selected_target == NULL && !m->is_overpass()) {
@@ -383,6 +383,7 @@
 
   Method* get_selected_target() { return _selected_target; }
   Symbol* get_exception_message() { return _exception_message; }
+  Symbol* get_exception_name() { return _exception_name; }
 
   // Either sets the target or the exception error message
   void determine_target(InstanceKlass* root, TRAPS) {
@@ -400,19 +401,21 @@
 
     if (qualified_methods.length() == 0) {
       _exception_message = generate_no_defaults_message(CHECK);
+      _exception_name = vmSymbols::java_lang_AbstractMethodError();
     } else if (qualified_methods.length() == 1) {
+      // leave abstract methods alone, they will be found via normal search path
       Method* method = qualified_methods.at(0);
-      if (method->is_abstract()) {
-        _exception_message = generate_abstract_method_message(method, CHECK);
-      } else {
+      if (!method->is_abstract()) {
         _selected_target = qualified_methods.at(0);
       }
     } else {
       _exception_message = generate_conflicts_message(&qualified_methods,CHECK);
+      _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
+      if (TraceDefaultMethods) {
+        _exception_message->print_value_on(tty);
+        tty->print_cr("");
+      }
     }
-
-    assert((has_target() ^ throws_exception()) == 1,
-           "One and only one must be true");
   }
 
   bool contains_signature(Symbol* query) {
@@ -459,8 +462,9 @@
 
   void print_exception(outputStream* str, int indent) {
     assert(throws_exception(), "Should be called otherwise");
+    assert(_exception_name != NULL, "exception_name should be set");
     streamIndentor si(str, indent * 2);
-    str->indent().print_cr("%s", _exception_message->as_C_string());
+    str->indent().print_cr("%s: %s", _exception_name->as_C_string(), _exception_message->as_C_string());
   }
 #endif // ndef PRODUCT
 };
@@ -469,20 +473,6 @@
   return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL);
 }
 
-Symbol* MethodFamily::generate_abstract_method_message(Method* method, TRAPS) const {
-  Symbol* klass = method->klass_name();
-  Symbol* name = method->name();
-  Symbol* sig = method->signature();
-  stringStream ss;
-  ss.print("Method ");
-  ss.write((const char*)klass->bytes(), klass->utf8_length());
-  ss.print(".");
-  ss.write((const char*)name->bytes(), name->utf8_length());
-  ss.write((const char*)sig->bytes(), sig->utf8_length());
-  ss.print(" is abstract");
-  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
-}
-
 Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
   stringStream ss;
   ss.print("Conflicting default methods:");
@@ -589,6 +579,18 @@
 #endif // ndef PRODUCT
 };
 
+static bool already_in_vtable_slots(GrowableArray<EmptyVtableSlot*>* slots, Method* m) {
+  bool found = false;
+  for (int j = 0; j < slots->length(); ++j) {
+    if (slots->at(j)->name() == m->name() &&
+        slots->at(j)->signature() == m->signature() ) {
+      found = true;
+      break;
+    }
+  }
+  return found;
+}
+
 static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
     InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
 
@@ -598,8 +600,10 @@
 
   // All miranda methods are obvious candidates
   for (int i = 0; i < mirandas->length(); ++i) {
-    EmptyVtableSlot* slot = new EmptyVtableSlot(mirandas->at(i));
-    slots->append(slot);
+    Method* m = mirandas->at(i);
+    if (!already_in_vtable_slots(slots, m)) {
+      slots->append(new EmptyVtableSlot(m));
+    }
   }
 
   // Also any overpasses in our superclasses, that we haven't implemented.
@@ -615,7 +619,26 @@
         // unless we have a real implementation of it in the current class.
         Method* impl = klass->lookup_method(m->name(), m->signature());
         if (impl == NULL || impl->is_overpass()) {
-          slots->append(new EmptyVtableSlot(m));
+          if (!already_in_vtable_slots(slots, m)) {
+            slots->append(new EmptyVtableSlot(m));
+          }
+        }
+      }
+    }
+
+    // also any default methods in our superclasses
+    if (super->default_methods() != NULL) {
+      for (int i = 0; i < super->default_methods()->length(); ++i) {
+        Method* m = super->default_methods()->at(i);
+        // m is a method that would have been a miranda if not for the
+        // default method processing that occurred on behalf of our superclass,
+        // so it's a method we want to re-examine in this new context.  That is,
+        // unless we have a real implementation of it in the current class.
+        Method* impl = klass->lookup_method(m->name(), m->signature());
+        if (impl == NULL || impl->is_overpass()) {
+          if (!already_in_vtable_slots(slots, m)) {
+            slots->append(new EmptyVtableSlot(m));
+          }
         }
       }
     }
@@ -670,7 +693,10 @@
     InstanceKlass* iklass = current_class();
 
     Method* m = iklass->find_method(_method_name, _method_signature);
-    if (m != NULL) {
+    // private interface methods are not candidates for default methods
+    // invokespecial to private interface methods doesn't use default method logic
+    // future: take access controls into account for superclass methods
+    if (m != NULL && !m->is_static() && (!iklass->is_interface() || m->is_public())) {
       if (_family == NULL) {
         _family = new StatefulMethodFamily();
       }
@@ -691,7 +717,7 @@
 
 
 
-static void create_overpasses(
+static void create_defaults_and_exceptions(
     GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
 
 static void generate_erased_defaults(
@@ -712,6 +738,8 @@
 
 static void merge_in_new_methods(InstanceKlass* klass,
     GrowableArray<Method*>* new_methods, TRAPS);
+static void create_default_methods( InstanceKlass* klass,
+    GrowableArray<Method*>* new_methods, TRAPS);
 
 // This is the guts of the default methods implementation.  This is called just
 // after the classfile has been parsed if some ancestor has default methods.
@@ -773,7 +801,7 @@
   }
 #endif // ndef PRODUCT
 
-  create_overpasses(empty_slots, klass, CHECK);
+  create_defaults_and_exceptions(empty_slots, klass, CHECK);
 
 #ifndef PRODUCT
   if (TraceDefaultMethods) {
@@ -782,263 +810,9 @@
 #endif // ndef PRODUCT
 }
 
-/**
- * Interface inheritance rules were used to find a unique default method
- * candidate for the resolved class. This
- * method is only viable if it would also be in the set of default method
- * candidates if we ran a full analysis on the current class.
- *
- * The only reason that the method would not be in the set of candidates for
- * the current class is if that there's another matching method
- * which is "more specific" than the found method -- i.e., one could find a
- * path in the interface hierarchy in which the matching method appears
- * before we get to '_target'.
- *
- * In order to determine this, we examine all of the implemented
- * interfaces.  If we find path that leads to the '_target' interface, then
- * we examine that path to see if there are any methods that would shadow
- * the selected method along that path.
- */
-class ShadowChecker : public HierarchyVisitor<ShadowChecker> {
- protected:
-  Thread* THREAD;
+static int assemble_method_error(
+    BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* errorName, Symbol* message, TRAPS) {
 
-  InstanceKlass* _target;
-
-  Symbol* _method_name;
-  InstanceKlass* _method_holder;
-  bool _found_shadow;
-
-
- public:
-
-  ShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
-                InstanceKlass* target)
-                : THREAD(thread), _method_name(name), _method_holder(holder),
-                _target(target), _found_shadow(false) {}
-
-  void* new_node_data(InstanceKlass* cls) { return NULL; }
-  void free_node_data(void* data) { return; }
-
-  bool visit() {
-    InstanceKlass* ik = current_class();
-    if (ik == _target && current_depth() == 1) {
-      return false; // This was the specified super -- no need to search it
-    }
-    if (ik == _method_holder || ik == _target) {
-      // We found a path that should be examined to see if it shadows _method
-      if (path_has_shadow()) {
-        _found_shadow = true;
-        cancel_iteration();
-      }
-      return false; // no need to continue up hierarchy
-    }
-    return true;
-  }
-
-  virtual bool path_has_shadow() = 0;
-  bool found_shadow() { return _found_shadow; }
-};
-
-// Used for Invokespecial.
-// Invokespecial is allowed to invoke a concrete interface method
-// and can be used to disambuiguate among qualified candidates,
-// which are methods in immediate superinterfaces,
-// but may not be used to invoke a candidate that would be shadowed
-// from the perspective of the caller.
-// Invokespecial is also used in the overpass generation today
-// We re-run the shadowchecker because we can't distinguish this case,
-// but it should return the same answer, since the overpass target
-// is now the invokespecial caller.
-class ErasedShadowChecker : public ShadowChecker {
- private:
-  bool path_has_shadow() {
-
-    for (int i = current_depth() - 1; i > 0; --i) {
-      InstanceKlass* ik = class_at_depth(i);
-
-      if (ik->is_interface()) {
-        int end;
-        int start = ik->find_method_by_name(_method_name, &end);
-        if (start != -1) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
- public:
-
-  ErasedShadowChecker(Thread* thread, Symbol* name, InstanceKlass* holder,
-                InstanceKlass* target)
-    : ShadowChecker(thread, name, holder, target) {}
-};
-
-// Find the unique qualified candidate from the perspective of the super_class
-// which is the resolved_klass, which must be an immediate superinterface
-// of klass
-Method* find_erased_super_default(InstanceKlass* current_class, InstanceKlass* super_class, Symbol* method_name, Symbol* sig, TRAPS) {
-
-  FindMethodsByErasedSig visitor(method_name, sig);
-  visitor.run(super_class);      // find candidates from resolved_klass
-
-  MethodFamily* family;
-  visitor.get_discovered_family(&family);
-
-  if (family != NULL) {
-    family->determine_target(current_class, CHECK_NULL);  // get target from current_class
-
-    if (family->has_target()) {
-      Method* target = family->get_selected_target();
-      InstanceKlass* holder = InstanceKlass::cast(target->method_holder());
-
-      // Verify that the identified method is valid from the context of
-      // the current class, which is the caller class for invokespecial
-      // link resolution, i.e. ensure there it is not shadowed.
-      // You can use invokespecial to disambiguate interface methods, but
-      // you can not use it to skip over an interface method that would shadow it.
-      ErasedShadowChecker checker(THREAD, target->name(), holder, super_class);
-      checker.run(current_class);
-
-      if (checker.found_shadow()) {
-#ifndef PRODUCT
-        if (TraceDefaultMethods) {
-          tty->print_cr("    Only candidate found was shadowed.");
-        }
-#endif // ndef PRODUCT
-        THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-                   "Accessible default method not found", NULL);
-      } else {
-#ifndef PRODUCT
-        if (TraceDefaultMethods) {
-          family->print_sig_on(tty, target->signature(), 1);
-        }
-#endif // ndef PRODUCT
-        return target;
-      }
-    } else {
-      assert(family->throws_exception(), "must have target or throw");
-      THROW_MSG_(vmSymbols::java_lang_AbstractMethodError(),
-                 family->get_exception_message()->as_C_string(), NULL);
-   }
-  } else {
-    // no method found
-    ResourceMark rm(THREAD);
-    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodError(),
-              Method::name_and_sig_as_C_string(current_class,
-                                               method_name, sig), NULL);
-  }
-}
-// This is called during linktime when we find an invokespecial call that
-// refers to a direct superinterface.  It indicates that we should find the
-// default method in the hierarchy of that superinterface, and if that method
-// would have been a candidate from the point of view of 'this' class, then we
-// return that method.
-// This logic assumes that the super is a direct superclass of the caller
-Method* DefaultMethods::find_super_default(
-    Klass* cls, Klass* super, Symbol* method_name, Symbol* sig, TRAPS) {
-
-  ResourceMark rm(THREAD);
-
-  assert(cls != NULL && super != NULL, "Need real classes");
-
-  InstanceKlass* current_class = InstanceKlass::cast(cls);
-  InstanceKlass* super_class = InstanceKlass::cast(super);
-
-  // Keep entire hierarchy alive for the duration of the computation
-  KeepAliveRegistrar keepAlive(THREAD);
-  KeepAliveVisitor loadKeepAlive(&keepAlive);
-  loadKeepAlive.run(current_class);   // get hierarchy from current class
-
-#ifndef PRODUCT
-  if (TraceDefaultMethods) {
-    tty->print_cr("Finding super default method %s.%s%s from %s",
-      super_class->name()->as_C_string(),
-      method_name->as_C_string(), sig->as_C_string(),
-      current_class->name()->as_C_string());
-  }
-#endif // ndef PRODUCT
-
-  assert(super_class->is_interface(), "only call for default methods");
-
-  Method* target = NULL;
-  target = find_erased_super_default(current_class, super_class,
-                                     method_name, sig, CHECK_NULL);
-
-#ifndef PRODUCT
-  if (target != NULL) {
-    if (TraceDefaultMethods) {
-      tty->print("    Returning ");
-      print_method(tty, target, true);
-      tty->print_cr("");
-    }
-  }
-#endif // ndef PRODUCT
-  return target;
-}
-
-#ifndef PRODUCT
-// Return true is broad type is a covariant return of narrow type
-static bool covariant_return_type(BasicType narrow, BasicType broad) {
-  if (narrow == broad) {
-    return true;
-  }
-  if (broad == T_OBJECT) {
-    return true;
-  }
-  return false;
-}
-#endif // ndef PRODUCT
-
-static int assemble_redirect(
-    BytecodeConstantPool* cp, BytecodeBuffer* buffer,
-    Symbol* incoming, Method* target, TRAPS) {
-
-  BytecodeAssembler assem(buffer, cp);
-
-  SignatureStream in(incoming, true);
-  SignatureStream out(target->signature(), true);
-  u2 parameter_count = 0;
-
-  assem.aload(parameter_count++); // load 'this'
-
-  while (!in.at_return_type()) {
-    assert(!out.at_return_type(), "Parameter counts do not match");
-    BasicType bt = in.type();
-    assert(out.type() == bt, "Parameter types are not compatible");
-    assem.load(bt, parameter_count);
-    if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
-      assem.checkcast(out.as_symbol(THREAD));
-    } else if (bt == T_LONG || bt == T_DOUBLE) {
-      ++parameter_count; // longs and doubles use two slots
-    }
-    ++parameter_count;
-    in.next();
-    out.next();
-  }
-  assert(out.at_return_type(), "Parameter counts do not match");
-  assert(covariant_return_type(out.type(), in.type()), "Return types are not compatible");
-
-  if (parameter_count == 1 && (in.type() == T_LONG || in.type() == T_DOUBLE)) {
-    ++parameter_count; // need room for return value
-  }
-  if (target->method_holder()->is_interface()) {
-    assem.invokespecial(target);
-  } else {
-    assem.invokevirtual(target);
-  }
-
-  if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
-    assem.checkcast(in.as_symbol(THREAD));
-  }
-  assem._return(in.type());
-  return parameter_count;
-}
-
-static int assemble_abstract_method_error(
-    BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* message, TRAPS) {
-
-  Symbol* errorName = vmSymbols::java_lang_AbstractMethodError();
   Symbol* init = vmSymbols::object_initializer_name();
   Symbol* sig = vmSymbols::string_void_signature();
 
@@ -1109,18 +883,18 @@
   }
 }
 
-// A "bridge" is a method created by javac to bridge the gap between
-// an implementation and a generically-compatible, but different, signature.
-// Bridges have actual bytecode implementation in classfiles.
-// An "overpass", on the other hand, performs the same function as a bridge
-// but does not occur in a classfile; the VM creates overpass itself,
-// when it needs a path to get from a call site to an default method, and
-// a bridge doesn't exist.
-static void create_overpasses(
+// Create default_methods list for the current class.
+// With the VM only processing erased signatures, the VM only
+// creates an overpass in a conflict case or a case with no candidates.
+// This allows virtual methods to override the overpass, but ensures
+// that a local method search will find the exception rather than an abstract
+// or default method that is not a valid candidate.
+static void create_defaults_and_exceptions(
     GrowableArray<EmptyVtableSlot*>* slots,
     InstanceKlass* klass, TRAPS) {
 
   GrowableArray<Method*> overpasses;
+  GrowableArray<Method*> defaults;
   BytecodeConstantPool bpool(klass->constants());
 
   for (int i = 0; i < slots->length(); ++i) {
@@ -1128,7 +902,6 @@
 
     if (slot->is_bound()) {
       MethodFamily* method = slot->get_binding();
-      int max_stack = 0;
       BytecodeBuffer buffer;
 
 #ifndef PRODUCT
@@ -1138,27 +911,27 @@
         tty->print_cr("");
         if (method->has_target()) {
           method->print_selected(tty, 1);
-        } else {
+        } else if (method->throws_exception()) {
           method->print_exception(tty, 1);
         }
       }
 #endif // ndef PRODUCT
+
       if (method->has_target()) {
         Method* selected = method->get_selected_target();
         if (selected->method_holder()->is_interface()) {
-          max_stack = assemble_redirect(
-            &bpool, &buffer, slot->signature(), selected, CHECK);
+          defaults.push(selected);
         }
       } else if (method->throws_exception()) {
-        max_stack = assemble_abstract_method_error(
-            &bpool, &buffer, method->get_exception_message(), CHECK);
-      }
-      if (max_stack != 0) {
+        int max_stack = assemble_method_error(&bpool, &buffer,
+           method->get_exception_name(), method->get_exception_message(), CHECK);
         AccessFlags flags = accessFlags_from(
           JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
-        Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
+         Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
           flags, max_stack, slot->size_of_parameters(),
           ConstMethod::OVERPASS, CHECK);
+        // We push to the methods list:
+        // overpass methods which are exception throwing methods
         if (m != NULL) {
           overpasses.push(m);
         }
@@ -1169,11 +942,31 @@
 #ifndef PRODUCT
   if (TraceDefaultMethods) {
     tty->print_cr("Created %d overpass methods", overpasses.length());
+    tty->print_cr("Created %d default  methods", defaults.length());
   }
 #endif // ndef PRODUCT
 
-  switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
-  merge_in_new_methods(klass, &overpasses, CHECK);
+  if (overpasses.length() > 0) {
+    switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
+    merge_in_new_methods(klass, &overpasses, CHECK);
+  }
+  if (defaults.length() > 0) {
+    create_default_methods(klass, &defaults, CHECK);
+  }
+}
+
+static void create_default_methods( InstanceKlass* klass,
+    GrowableArray<Method*>* new_methods, TRAPS) {
+
+  int new_size = new_methods->length();
+  Array<Method*>* total_default_methods = MetadataFactory::new_array<Method*>(
+      klass->class_loader_data(), new_size, NULL, CHECK);
+  for (int index = 0; index < new_size; index++ ) {
+    total_default_methods->at_put(index, new_methods->at(index));
+  }
+  Method::sort_methods(total_default_methods, false, false);
+
+  klass->set_default_methods(total_default_methods);
 }
 
 static void sort_methods(GrowableArray<Method*>* methods) {
@@ -1281,4 +1074,3 @@
     MetadataFactory::free_array(cld, original_ordering);
   }
 }
-
--- a/src/share/vm/classfile/defaultMethods.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/defaultMethods.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -44,15 +44,5 @@
   // the class.
   static void generate_default_methods(
       InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS);
-
-
-  // Called during linking when an invokespecial to an direct interface
-  // method is found.  Selects and returns a method if there is a unique
-  // default method in the 'super_iface' part of the hierarchy which is
-  // also a candidate default for 'this_klass'.  Otherwise throws an AME.
-  static Method* find_super_default(
-      Klass* this_klass, Klass* super_iface,
-      Symbol* method_name, Symbol* method_sig, TRAPS);
 };
-
 #endif // SHARE_VM_CLASSFILE_DEFAULTMETHODS_HPP
--- a/src/share/vm/classfile/dictionary.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/dictionary.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "classfile/dictionary.hpp"
 #include "classfile/systemDictionary.hpp"
+#include "memory/iterator.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiRedefineClassesTrace.hpp"
 #include "utilities/hashtable.inline.hpp"
@@ -38,17 +39,21 @@
   : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry)) {
   _current_class_index = 0;
   _current_class_entry = NULL;
+  _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
 };
 
 
-
 Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t,
                        int number_of_entries)
   : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) {
   _current_class_index = 0;
   _current_class_entry = NULL;
+  _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
 };
 
+ProtectionDomainCacheEntry* Dictionary::cache_get(oop protection_domain) {
+  return _pd_cache_table->get(protection_domain);
+}
 
 DictionaryEntry* Dictionary::new_entry(unsigned int hash, Klass* klass,
                                        ClassLoaderData* loader_data) {
@@ -105,11 +110,12 @@
 }
 
 
-void DictionaryEntry::add_protection_domain(oop protection_domain) {
+void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_domain) {
   assert_locked_or_safepoint(SystemDictionary_lock);
   if (!contains_protection_domain(protection_domain)) {
+    ProtectionDomainCacheEntry* entry = dict->cache_get(protection_domain);
     ProtectionDomainEntry* new_head =
-                new ProtectionDomainEntry(protection_domain, _pd_set);
+                new ProtectionDomainEntry(entry, _pd_set);
     // Warning: Preserve store ordering.  The SystemDictionary is read
     //          without locks.  The new ProtectionDomainEntry must be
     //          complete before other threads can be allowed to see it
@@ -193,7 +199,10 @@
 
 
 void Dictionary::always_strong_oops_do(OopClosure* blk) {
-  // Follow all system classes and temporary placeholders in dictionary
+  // Follow all system classes and temporary placeholders in dictionary; only
+  // protection domain oops contain references into the heap. In a first
+  // pass over the system dictionary determine which need to be treated as
+  // strongly reachable and mark them as such.
   for (int index = 0; index < table_size(); index++) {
     for (DictionaryEntry *probe = bucket(index);
                           probe != NULL;
@@ -201,10 +210,13 @@
       Klass* e = probe->klass();
       ClassLoaderData* loader_data = probe->loader_data();
       if (is_strongly_reachable(loader_data, e)) {
-        probe->protection_domain_set_oops_do(blk);
+        probe->set_strongly_reachable();
       }
     }
   }
+  // Then iterate over the protection domain cache to apply the closure on the
+  // previously marked ones.
+  _pd_cache_table->always_strong_oops_do(blk);
 }
 
 
@@ -266,18 +278,12 @@
   }
 }
 
-
 void Dictionary::oops_do(OopClosure* f) {
-  for (int index = 0; index < table_size(); index++) {
-    for (DictionaryEntry* probe = bucket(index);
-                          probe != NULL;
-                          probe = probe->next()) {
-      probe->protection_domain_set_oops_do(f);
-    }
-  }
+  // Only the protection domain oops contain references into the heap. Iterate
+  // over all of them.
+  _pd_cache_table->oops_do(f);
 }
 
-
 void Dictionary::methods_do(void f(Method*)) {
   for (int index = 0; index < table_size(); index++) {
     for (DictionaryEntry* probe = bucket(index);
@@ -292,6 +298,11 @@
   }
 }
 
+void Dictionary::unlink(BoolObjectClosure* is_alive) {
+  // Only the protection domain cache table may contain references to the heap
+  // that need to be unlinked.
+  _pd_cache_table->unlink(is_alive);
+}
 
 Klass* Dictionary::try_get_next_class() {
   while (true) {
@@ -306,7 +317,6 @@
   // never reached
 }
 
-
 // Add a loaded class to the system dictionary.
 // Readers of the SystemDictionary aren't always locked, so _buckets
 // is volatile. The store of the next field in the constructor is
@@ -396,7 +406,7 @@
   assert(protection_domain() != NULL,
          "real protection domain should be present");
 
-  entry->add_protection_domain(protection_domain());
+  entry->add_protection_domain(this, protection_domain());
 
   assert(entry->contains_protection_domain(protection_domain()),
          "now protection domain should be present");
@@ -446,6 +456,146 @@
   }
 }
 
+ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
+  : Hashtable<oop, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
+{
+}
+
+void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) {
+  assert(SafepointSynchronize::is_at_safepoint(), "must be");
+  for (int i = 0; i < table_size(); ++i) {
+    ProtectionDomainCacheEntry** p = bucket_addr(i);
+    ProtectionDomainCacheEntry* entry = bucket(i);
+    while (entry != NULL) {
+      if (is_alive->do_object_b(entry->literal())) {
+        p = entry->next_addr();
+      } else {
+        *p = entry->next();
+        free_entry(entry);
+      }
+      entry = *p;
+    }
+  }
+}
+
+void ProtectionDomainCacheTable::oops_do(OopClosure* f) {
+  for (int index = 0; index < table_size(); index++) {
+    for (ProtectionDomainCacheEntry* probe = bucket(index);
+                                     probe != NULL;
+                                     probe = probe->next()) {
+      probe->oops_do(f);
+    }
+  }
+}
+
+uint ProtectionDomainCacheTable::bucket_size() {
+  return sizeof(ProtectionDomainCacheEntry);
+}
+
+#ifndef PRODUCT
+void ProtectionDomainCacheTable::print() {
+  tty->print_cr("Protection domain cache table (table_size=%d, classes=%d)",
+                table_size(), number_of_entries());
+  for (int index = 0; index < table_size(); index++) {
+    for (ProtectionDomainCacheEntry* probe = bucket(index);
+                                     probe != NULL;
+                                     probe = probe->next()) {
+      probe->print();
+    }
+  }
+}
+
+void ProtectionDomainCacheEntry::print() {
+  tty->print_cr("entry "PTR_FORMAT" value "PTR_FORMAT" strongly_reachable %d next "PTR_FORMAT,
+                this, (void*)literal(), _strongly_reachable, next());
+}
+#endif
+
+void ProtectionDomainCacheTable::verify() {
+  int element_count = 0;
+  for (int index = 0; index < table_size(); index++) {
+    for (ProtectionDomainCacheEntry* probe = bucket(index);
+                                     probe != NULL;
+                                     probe = probe->next()) {
+      probe->verify();
+      element_count++;
+    }
+  }
+  guarantee(number_of_entries() == element_count,
+            "Verify of protection domain cache table failed");
+  debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
+}
+
+void ProtectionDomainCacheEntry::verify() {
+  guarantee(literal()->is_oop(), "must be an oop");
+}
+
+void ProtectionDomainCacheTable::always_strong_oops_do(OopClosure* f) {
+  // the caller marked the protection domain cache entries that we need to apply
+  // the closure on. Only process them.
+  for (int index = 0; index < table_size(); index++) {
+    for (ProtectionDomainCacheEntry* probe = bucket(index);
+                                     probe != NULL;
+                                     probe = probe->next()) {
+      if (probe->is_strongly_reachable()) {
+        probe->reset_strongly_reachable();
+        probe->oops_do(f);
+      }
+    }
+  }
+}
+
+ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(oop protection_domain) {
+  unsigned int hash = compute_hash(protection_domain);
+  int index = hash_to_index(hash);
+
+  ProtectionDomainCacheEntry* entry = find_entry(index, protection_domain);
+  if (entry == NULL) {
+    entry = add_entry(index, hash, protection_domain);
+  }
+  return entry;
+}
+
+ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, oop protection_domain) {
+  for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
+    if (e->protection_domain() == protection_domain) {
+      return e;
+    }
+  }
+
+  return NULL;
+}
+
+ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, unsigned int hash, oop protection_domain) {
+  assert_locked_or_safepoint(SystemDictionary_lock);
+  assert(index == index_for(protection_domain), "incorrect index?");
+  assert(find_entry(index, protection_domain) == NULL, "no double entry");
+
+  ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain);
+  Hashtable<oop, mtClass>::add_entry(index, p);
+  return p;
+}
+
+void ProtectionDomainCacheTable::free(ProtectionDomainCacheEntry* to_delete) {
+  unsigned int hash = compute_hash(to_delete->protection_domain());
+  int index = hash_to_index(hash);
+
+  ProtectionDomainCacheEntry** p = bucket_addr(index);
+  ProtectionDomainCacheEntry* entry = bucket(index);
+  while (true) {
+    assert(entry != NULL, "sanity");
+
+    if (entry == to_delete) {
+      *p = entry->next();
+      Hashtable<oop, mtClass>::free_entry(entry);
+      break;
+    } else {
+      p = entry->next_addr();
+      entry = *p;
+    }
+  }
+}
+
 SymbolPropertyTable::SymbolPropertyTable(int table_size)
   : Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry))
 {
@@ -532,11 +682,13 @@
       tty->cr();
     }
   }
+  tty->cr();
+  _pd_cache_table->print();
+  tty->cr();
 }
 
 #endif
 
-
 void Dictionary::verify() {
   guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
 
@@ -563,5 +715,7 @@
   guarantee(number_of_entries() == element_count,
             "Verify of system dictionary failed");
   debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
+
+  _pd_cache_table->verify();
 }
 
--- a/src/share/vm/classfile/dictionary.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/dictionary.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,11 +27,14 @@
 
 #include "classfile/systemDictionary.hpp"
 #include "oops/instanceKlass.hpp"
-#include "oops/oop.hpp"
+#include "oops/oop.inline.hpp"
 #include "utilities/hashtable.hpp"
 
 class DictionaryEntry;
 class PSPromotionManager;
+class ProtectionDomainCacheTable;
+class ProtectionDomainCacheEntry;
+class BoolObjectClosure;
 
 //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 // The data structure for the system dictionary (and the shared system
@@ -45,6 +48,8 @@
   // pointer to the current hash table entry.
   static DictionaryEntry*       _current_class_entry;
 
+  ProtectionDomainCacheTable*   _pd_cache_table;
+
   DictionaryEntry* get_entry(int index, unsigned int hash,
                              Symbol* name, ClassLoaderData* loader_data);
 
@@ -93,6 +98,7 @@
 
   void methods_do(void f(Method*));
 
+  void unlink(BoolObjectClosure* is_alive);
 
   // Classes loaded by the bootstrap loader are always strongly reachable.
   // If we're not doing class unloading, all classes are strongly reachable.
@@ -118,6 +124,7 @@
   // Sharing support
   void reorder_dictionary();
 
+  ProtectionDomainCacheEntry* cache_get(oop protection_domain);
 
 #ifndef PRODUCT
   void print();
@@ -126,21 +133,112 @@
 };
 
 // The following classes can be in dictionary.cpp, but we need these
-// to be in header file so that SA's vmStructs can access.
+// to be in header file so that SA's vmStructs can access them.
+class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> {
+  friend class VMStructs;
+ private:
+  // Flag indicating whether this protection domain entry is strongly reachable.
+  // Used during iterating over the system dictionary to remember oops that need
+  // to be updated.
+  bool _strongly_reachable;
+ public:
+  oop protection_domain() { return literal(); }
+
+  void init() {
+    _strongly_reachable = false;
+  }
+
+  ProtectionDomainCacheEntry* next() {
+    return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next();
+  }
+
+  ProtectionDomainCacheEntry** next_addr() {
+    return (ProtectionDomainCacheEntry**)HashtableEntry<oop, mtClass>::next_addr();
+  }
+
+  void oops_do(OopClosure* f) {
+    f->do_oop(literal_addr());
+  }
+
+  void set_strongly_reachable()   { _strongly_reachable = true; }
+  bool is_strongly_reachable()    { return _strongly_reachable; }
+  void reset_strongly_reachable() { _strongly_reachable = false; }
+
+  void print() PRODUCT_RETURN;
+  void verify();
+};
+
+// The ProtectionDomainCacheTable contains all protection domain oops. The system
+// dictionary entries reference its entries instead of having references to oops
+// directly.
+// This is used to speed up system dictionary iteration: the oops in the
+// protection domain are the only ones referring the Java heap. So when there is
+// need to update these, instead of going over every entry of the system dictionary,
+// we only need to iterate over this set.
+// The amount of different protection domains used is typically magnitudes smaller
+// than the number of system dictionary entries (loaded classes).
+class ProtectionDomainCacheTable : public Hashtable<oop, mtClass> {
+  friend class VMStructs;
+private:
+  ProtectionDomainCacheEntry* bucket(int i) {
+    return (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::bucket(i);
+  }
+
+  // The following method is not MT-safe and must be done under lock.
+  ProtectionDomainCacheEntry** bucket_addr(int i) {
+    return (ProtectionDomainCacheEntry**) Hashtable<oop, mtClass>::bucket_addr(i);
+  }
+
+  ProtectionDomainCacheEntry* new_entry(unsigned int hash, oop protection_domain) {
+    ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::new_entry(hash, protection_domain);
+    entry->init();
+    return entry;
+  }
+
+  static unsigned int compute_hash(oop protection_domain) {
+    return (unsigned int)(protection_domain->identity_hash());
+  }
+
+  int index_for(oop protection_domain) {
+    return hash_to_index(compute_hash(protection_domain));
+  }
+
+  ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, oop protection_domain);
+  ProtectionDomainCacheEntry* find_entry(int index, oop protection_domain);
+
+public:
+
+  ProtectionDomainCacheTable(int table_size);
+
+  ProtectionDomainCacheEntry* get(oop protection_domain);
+  void free(ProtectionDomainCacheEntry* entry);
+
+  void unlink(BoolObjectClosure* cl);
+
+  // GC support
+  void oops_do(OopClosure* f);
+  void always_strong_oops_do(OopClosure* f);
+
+  static uint bucket_size();
+
+  void print() PRODUCT_RETURN;
+  void verify();
+};
+
 
 class ProtectionDomainEntry :public CHeapObj<mtClass> {
   friend class VMStructs;
  public:
   ProtectionDomainEntry* _next;
-  oop                    _protection_domain;
+  ProtectionDomainCacheEntry* _pd_cache;
 
-  ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) {
-    _protection_domain = protection_domain;
-    _next              = next;
+  ProtectionDomainEntry(ProtectionDomainCacheEntry* pd_cache, ProtectionDomainEntry* next) {
+    _pd_cache = pd_cache;
+    _next     = next;
   }
 
   ProtectionDomainEntry* next() { return _next; }
-  oop protection_domain() { return _protection_domain; }
+  oop protection_domain() { return _pd_cache->protection_domain(); }
 };
 
 // An entry in the system dictionary, this describes a class as
@@ -151,6 +249,24 @@
  private:
   // Contains the set of approved protection domains that can access
   // this system dictionary entry.
+  //
+  // This protection domain set is a set of tuples:
+  //
+  // (InstanceKlass C, initiating class loader ICL, Protection Domain PD)
+  //
+  // [Note that C.protection_domain(), which is stored in the java.lang.Class
+  // mirror of C, is NOT the same as PD]
+  //
+  // If such an entry (C, ICL, PD) exists in the table, it means that
+  // it is okay for a class Foo to reference C, where
+  //
+  //    Foo.protection_domain() == PD, and
+  //    Foo's defining class loader == ICL
+  //
+  // The usage of the PD set can be seen in SystemDictionary::validate_protection_domain()
+  // It is essentially a cache to avoid repeated Java up-calls to
+  // ClassLoader.checkPackageAccess().
+  //
   ProtectionDomainEntry* _pd_set;
   ClassLoaderData*       _loader_data;
 
@@ -158,7 +274,7 @@
   // Tells whether a protection is in the approved set.
   bool contains_protection_domain(oop protection_domain) const;
   // Adds a protection domain to the approved set.
-  void add_protection_domain(oop protection_domain);
+  void add_protection_domain(Dictionary* dict, oop protection_domain);
 
   Klass* klass() const { return (Klass*)literal(); }
   Klass** klass_addr() { return (Klass**)literal_addr(); }
@@ -189,12 +305,11 @@
          : contains_protection_domain(protection_domain());
   }
 
-
-  void protection_domain_set_oops_do(OopClosure* f) {
+  void set_strongly_reachable() {
     for (ProtectionDomainEntry* current = _pd_set;
                                 current != NULL;
                                 current = current->_next) {
-      f->do_oop(&(current->_protection_domain));
+      current->_pd_cache->set_strongly_reachable();
     }
   }
 
@@ -202,7 +317,7 @@
     for (ProtectionDomainEntry* current = _pd_set;
                                 current != NULL;
                                 current = current->_next) {
-      current->_protection_domain->verify();
+      current->_pd_cache->protection_domain()->verify();
     }
   }
 
@@ -264,7 +379,7 @@
     }
     if (method_type() != NULL) {
       if (printed)  st->print(" and ");
-      st->print(INTPTR_FORMAT, method_type());
+      st->print(INTPTR_FORMAT, (void *)method_type());
       printed = true;
     }
     st->print_cr(printed ? "" : "(empty)");
--- a/src/share/vm/classfile/javaClasses.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/javaClasses.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1376,8 +1376,15 @@
   const char* klass_name  = holder->external_name();
   int buf_len = (int)strlen(klass_name);
 
-  // pushing to the stack trace added one.
+  // The method id may point to an obsolete method, can't get more stack information
   Method* method = holder->method_with_idnum(method_id);
+  if (method == NULL) {
+    char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
+    // This is what the java code prints in this case - added Redefined
+    sprintf(buf, "\tat %s.null (Redefined)", klass_name);
+    return buf;
+  }
+
   char* method_name = method->name()->as_C_string();
   buf_len += (int)strlen(method_name);
 
@@ -1773,7 +1780,8 @@
   return element;
 }
 
-oop java_lang_StackTraceElement::create(Handle mirror, int method_id, int version, int bci, TRAPS) {
+oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
+                                        int version, int bci, TRAPS) {
   // Allocate java.lang.StackTraceElement instance
   Klass* k = SystemDictionary::StackTraceElement_klass();
   assert(k != NULL, "must be loaded in 1.4+");
@@ -1790,8 +1798,16 @@
   oop classname = StringTable::intern((char*) str, CHECK_0);
   java_lang_StackTraceElement::set_declaringClass(element(), classname);
 
+  Method* method = holder->method_with_idnum(method_id);
+  // Method on stack may be obsolete because it was redefined so cannot be
+  // found by idnum.
+  if (method == NULL) {
+    // leave name and fileName null
+    java_lang_StackTraceElement::set_lineNumber(element(), -1);
+    return element();
+  }
+
   // Fill in method name
-  Method* method = holder->method_with_idnum(method_id);
   oop methodname = StringTable::intern(method->name(), CHECK_0);
   java_lang_StackTraceElement::set_methodName(element(), methodname);
 
--- a/src/share/vm/classfile/symbolTable.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/symbolTable.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -840,7 +840,7 @@
   if (str1 == str2) {
     tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
                   "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
-                  str1, bkt1, e_cnt1, bkt2, e_cnt2);
+                  (void *)str1, bkt1, e_cnt1, bkt2, e_cnt2);
     return _verify_fail_continue;
   }
 
--- a/src/share/vm/classfile/symbolTable.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/symbolTable.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -107,18 +107,13 @@
     add(loader_data, cp, names_count, name, lengths, cp_indices, hashValues, THREAD);
   }
 
-  // Table size
-  enum {
-    symbol_table_size = 20011
-  };
-
   Symbol* lookup(int index, const char* name, int len, unsigned int hash);
 
   SymbolTable()
-    : Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
+    : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
 
   SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
-    : Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
+    : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
                 number_of_entries) {}
 
   // Arena for permanent symbols (null class loader) that are never unloaded
@@ -136,6 +131,9 @@
   // The symbol table
   static SymbolTable* the_table() { return _the_table; }
 
+  // Size of one bucket in the string table.  Used when checking for rollover.
+  static uint bucket_size() { return sizeof(HashtableBucket<mtSymbol>); }
+
   static void create_table() {
     assert(_the_table == NULL, "One symbol table allowed.");
     _the_table = new SymbolTable();
@@ -145,8 +143,11 @@
   static void create_table(HashtableBucket<mtSymbol>* t, int length,
                            int number_of_entries) {
     assert(_the_table == NULL, "One symbol table allowed.");
-    assert(length == symbol_table_size * sizeof(HashtableBucket<mtSymbol>),
-           "bad shared symbol size.");
+
+    // If CDS archive used a different symbol table size, use that size instead
+    // which is better than giving an error.
+    SymbolTableSize = length/bucket_size();
+
     _the_table = new SymbolTable(t, number_of_entries);
     // if CDS give symbol table a default arena size since most symbols
     // are already allocated in the shared misc section.
--- a/src/share/vm/classfile/systemDictionary.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/systemDictionary.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1697,6 +1697,24 @@
   return newsize;
 }
 
+#ifdef ASSERT
+class VerifySDReachableAndLiveClosure : public OopClosure {
+private:
+  BoolObjectClosure* _is_alive;
+
+  template <class T> void do_oop_work(T* p) {
+    oop obj = oopDesc::load_decode_heap_oop(p);
+    guarantee(_is_alive->do_object_b(obj), "Oop in system dictionary must be live");
+  }
+
+public:
+  VerifySDReachableAndLiveClosure(BoolObjectClosure* is_alive) : OopClosure(), _is_alive(is_alive) { }
+
+  virtual void do_oop(oop* p)       { do_oop_work(p); }
+  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
+};
+#endif
+
 // Assumes classes in the SystemDictionary are only unloaded at a safepoint
 // Note: anonymous classes are not in the SD.
 bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
@@ -1707,7 +1725,15 @@
     unloading_occurred = dictionary()->do_unloading();
     constraints()->purge_loader_constraints();
     resolution_errors()->purge_resolution_errors();
-}
+  }
+  // Oops referenced by the system dictionary may get unreachable independently
+  // of the class loader (eg. cached protection domain oops). So we need to
+  // explicitly unlink them here instead of in Dictionary::do_unloading.
+  dictionary()->unlink(is_alive);
+#ifdef ASSERT
+  VerifySDReachableAndLiveClosure cl(is_alive);
+  dictionary()->oops_do(&cl);
+#endif
   return unloading_occurred;
 }
 
--- a/src/share/vm/classfile/verifier.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/verifier.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -2439,8 +2439,14 @@
              && !ref_class_type.equals(current_type())
              && !ref_class_type.equals(VerificationType::reference_type(
                   current_class()->super()->name()))) {
-    bool subtype = ref_class_type.is_assignable_from(
-      current_type(), this, CHECK_VERIFY(this));
+    bool subtype = false;
+    if (!current_class()->is_anonymous()) {
+      subtype = ref_class_type.is_assignable_from(
+                 current_type(), this, CHECK_VERIFY(this));
+    } else {
+      subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
+                 current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
+    }
     if (!subtype) {
       verify_error(ErrorContext::bad_code(bci),
           "Bad invokespecial instruction: "
@@ -2461,7 +2467,24 @@
     } else {   // other methods
       // Ensures that target class is assignable to method class.
       if (opcode == Bytecodes::_invokespecial) {
-        current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
+        if (!current_class()->is_anonymous()) {
+          current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
+        } else {
+          // anonymous class invokespecial calls: check if the
+          // objectref is a subtype of the host_klass of the current class
+          // to allow an anonymous class to reference methods in the host_klass
+          VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
+          VerificationType hosttype =
+            VerificationType::reference_type(current_class()->host_klass()->name());
+          bool subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
+          if (!subtype) {
+            verify_error( ErrorContext::bad_type(current_frame->offset(),
+              current_frame->stack_top_ctx(),
+              TypeOrigin::implicit(top)),
+              "Bad type on operand stack");
+            return;
+          }
+        }
       } else if (opcode == Bytecodes::_invokevirtual) {
         VerificationType stack_object_type =
           current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this));
--- a/src/share/vm/classfile/vmSymbols.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/classfile/vmSymbols.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -270,7 +270,7 @@
   template(java_lang_invoke_LambdaForm,               "java/lang/invoke/LambdaForm")              \
   template(java_lang_invoke_ForceInline_signature,    "Ljava/lang/invoke/ForceInline;")           \
   template(java_lang_invoke_DontInline_signature,     "Ljava/lang/invoke/DontInline;")            \
-  template(sun_invoke_Stable_signature,               "Lsun/invoke/Stable;")                      \
+  template(java_lang_invoke_Stable_signature,         "Ljava/lang/invoke/Stable;")                \
   template(java_lang_invoke_LambdaForm_Compiled_signature, "Ljava/lang/invoke/LambdaForm$Compiled;") \
   template(java_lang_invoke_LambdaForm_Hidden_signature, "Ljava/lang/invoke/LambdaForm$Hidden;")  \
   template(java_lang_invoke_MagicLambdaImpl,          "java/lang/invoke/MagicLambdaImpl")         \
@@ -631,6 +631,10 @@
   do_name(log_name,"log")       do_name(log10_name,"log10")     do_name(pow_name,"pow")                                 \
   do_name(exp_name,"exp")       do_name(min_name,"min")         do_name(max_name,"max")                                 \
                                                                                                                         \
+  do_name(addExact_name,"addExact")                                                                                     \
+  do_name(subtractExact_name,"subtractExact")                                                                           \
+  do_name(multiplyExact_name,"multiplyExact")                                                                           \
+                                                                                                                        \
   do_intrinsic(_dabs,                     java_lang_Math,         abs_name,   double_double_signature,           F_S)   \
   do_intrinsic(_dsin,                     java_lang_Math,         sin_name,   double_double_signature,           F_S)   \
   do_intrinsic(_dcos,                     java_lang_Math,         cos_name,   double_double_signature,           F_S)   \
@@ -643,6 +647,7 @@
   do_intrinsic(_dexp,                     java_lang_Math,         exp_name,   double_double_signature,           F_S)   \
   do_intrinsic(_min,                      java_lang_Math,         min_name,   int2_int_signature,                F_S)   \
   do_intrinsic(_max,                      java_lang_Math,         max_name,   int2_int_signature,                F_S)   \
+  do_intrinsic(_addExact,                 java_lang_Math,         addExact_name, int2_int_signature,             F_S)   \
                                                                                                                         \
   do_intrinsic(_floatToRawIntBits,        java_lang_Float,        floatToRawIntBits_name,   float_int_signature, F_S)   \
    do_name(     floatToRawIntBits_name,                          "floatToRawIntBits")                                   \
--- a/src/share/vm/code/codeCache.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/code/codeCache.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -124,7 +124,6 @@
 int CodeCache::_number_of_nmethods_with_dependencies = 0;
 bool CodeCache::_needs_cache_clean = false;
 nmethod* CodeCache::_scavenge_root_nmethods = NULL;
-nmethod* CodeCache::_saved_nmethods = NULL;
 
 int CodeCache::_codemem_full_count = 0;
 
@@ -464,96 +463,11 @@
 }
 #endif //PRODUCT
 
-/**
- * Remove and return nmethod from the saved code list in order to reanimate it.
- */
-nmethod* CodeCache::reanimate_saved_code(Method* m) {
-  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-  nmethod* saved = _saved_nmethods;
-  nmethod* prev = NULL;
-  while (saved != NULL) {
-    if (saved->is_in_use() && saved->method() == m) {
-      if (prev != NULL) {
-        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
-      } else {
-        _saved_nmethods = saved->saved_nmethod_link();
-      }
-      assert(saved->is_speculatively_disconnected(), "shouldn't call for other nmethods");
-      saved->set_speculatively_disconnected(false);
-      saved->set_saved_nmethod_link(NULL);
-      if (PrintMethodFlushing) {
-        saved->print_on(tty, " ### nmethod is reconnected");
-      }
-      if (LogCompilation && (xtty != NULL)) {
-        ttyLocker ttyl;
-        xtty->begin_elem("nmethod_reconnected compile_id='%3d'", saved->compile_id());
-        xtty->method(m);
-        xtty->stamp();
-        xtty->end_elem();
-      }
-      return saved;
-    }
-    prev = saved;
-    saved = saved->saved_nmethod_link();
-  }
-  return NULL;
-}
-
-/**
- * Remove nmethod from the saved code list in order to discard it permanently
- */
-void CodeCache::remove_saved_code(nmethod* nm) {
-  // For conc swpr this will be called with CodeCache_lock taken by caller
-  assert_locked_or_safepoint(CodeCache_lock);
-  assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
-  nmethod* saved = _saved_nmethods;
-  nmethod* prev = NULL;
-  while (saved != NULL) {
-    if (saved == nm) {
-      if (prev != NULL) {
-        prev->set_saved_nmethod_link(saved->saved_nmethod_link());
-      } else {
-        _saved_nmethods = saved->saved_nmethod_link();
-      }
-      if (LogCompilation && (xtty != NULL)) {
-        ttyLocker ttyl;
-        xtty->begin_elem("nmethod_removed compile_id='%3d'", nm->compile_id());
-        xtty->stamp();
-        xtty->end_elem();
-      }
-      return;
-    }
-    prev = saved;
-    saved = saved->saved_nmethod_link();
-  }
-  ShouldNotReachHere();
-}
-
-void CodeCache::speculatively_disconnect(nmethod* nm) {
-  assert_locked_or_safepoint(CodeCache_lock);
-  assert(nm->is_in_use() && !nm->is_speculatively_disconnected(), "should only disconnect live nmethods");
-  nm->set_saved_nmethod_link(_saved_nmethods);
-  _saved_nmethods = nm;
-  if (PrintMethodFlushing) {
-    nm->print_on(tty, " ### nmethod is speculatively disconnected");
-  }
-  if (LogCompilation && (xtty != NULL)) {
-    ttyLocker ttyl;
-    xtty->begin_elem("nmethod_disconnected compile_id='%3d'", nm->compile_id());
-    xtty->method(nm->method());
-    xtty->stamp();
-    xtty->end_elem();
-  }
-  nm->method()->clear_code();
-  nm->set_speculatively_disconnected(true);
-}
-
 
 void CodeCache::gc_prologue() {
   assert(!nmethod::oops_do_marking_is_active(), "oops_do_marking_epilogue must be called");
 }
 
-
 void CodeCache::gc_epilogue() {
   assert_locked_or_safepoint(CodeCache_lock);
   FOR_ALL_ALIVE_BLOBS(cb) {
--- a/src/share/vm/code/codeCache.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/code/codeCache.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -57,7 +57,6 @@
   static int _number_of_nmethods_with_dependencies;
   static bool _needs_cache_clean;
   static nmethod* _scavenge_root_nmethods;  // linked via nm->scavenge_root_link()
-  static nmethod* _saved_nmethods;          // Linked list of speculatively disconnected nmethods.
 
   static void verify_if_often() PRODUCT_RETURN;
 
@@ -167,17 +166,12 @@
   static size_t  capacity()                      { return _heap->capacity(); }
   static size_t  max_capacity()                  { return _heap->max_capacity(); }
   static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
-  static bool    needs_flushing()                { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
   static double  reverse_free_ratio();
 
   static bool needs_cache_clean()                { return _needs_cache_clean; }
   static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
   static void clear_inline_caches();             // clear all inline caches
 
-  static nmethod* reanimate_saved_code(Method* m);
-  static void remove_saved_code(nmethod* nm);
-  static void speculatively_disconnect(nmethod* nm);
-
   // Deoptimization
   static int  mark_for_deoptimization(DepChange& changes);
 #ifdef HOTSWAP
--- a/src/share/vm/code/dependencies.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/code/dependencies.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -812,8 +812,8 @@
     Klass* k = ctxk;
     Method* lm = k->lookup_method(m->name(), m->signature());
     if (lm == NULL && k->oop_is_instance()) {
-      // It might be an abstract interface method, devoid of mirandas.
-      lm = ((InstanceKlass*)k)->lookup_method_in_all_interfaces(m->name(),
+      // It might be an interface method
+        lm = ((InstanceKlass*)k)->lookup_method_in_ordered_interfaces(m->name(),
                                                                 m->signature());
     }
     if (lm == m)
--- a/src/share/vm/code/nmethod.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/code/nmethod.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -462,7 +462,6 @@
   _state                      = alive;
   _marked_for_reclamation     = 0;
   _has_flushed_dependencies   = 0;
-  _speculatively_disconnected = 0;
   _has_unsafe_access          = 0;
   _has_method_handle_invokes  = 0;
   _lazy_critical_native       = 0;
@@ -481,7 +480,6 @@
   _osr_link                = NULL;
   _scavenge_root_link      = NULL;
   _scavenge_root_state     = 0;
-  _saved_nmethod_link      = NULL;
   _compiler                = NULL;
 
 #ifdef HAVE_DTRACE_H
@@ -686,6 +684,7 @@
     _osr_entry_point         = NULL;
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(NULL);
+    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     code_buffer->copy_values_to(this);
     if (ScavengeRootsInCode && detect_scavenge_root_oops()) {
@@ -770,6 +769,7 @@
     _osr_entry_point         = NULL;
     _exception_cache         = NULL;
     _pc_desc_cache.reset_to(NULL);
+    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     code_buffer->copy_values_to(this);
     debug_only(verify_scavenge_root_oops());
@@ -842,6 +842,7 @@
     _comp_level              = comp_level;
     _compiler                = compiler;
     _orig_pc_offset          = orig_pc_offset;
+    _hotness_counter         = NMethodSweeper::hotness_counter_reset_val();
 
     // Section offsets
     _consts_offset           = content_offset()      + code_buffer->total_offset_of(code_buffer->consts());
@@ -1176,7 +1177,7 @@
 
 // This is a private interface with the sweeper.
 void nmethod::mark_as_seen_on_stack() {
-  assert(is_not_entrant(), "must be a non-entrant method");
+  assert(is_alive(), "Must be an alive method");
   // Set the traversal mark to ensure that the sweeper does 2
   // cleaning passes before moving to zombie.
   set_stack_traversal_mark(NMethodSweeper::traversal_count());
@@ -1261,7 +1262,7 @@
 
   set_osr_link(NULL);
   //set_scavenge_root_link(NULL); // done by prune_scavenge_root_nmethods
-  NMethodSweeper::notify(this);
+  NMethodSweeper::notify();
 }
 
 void nmethod::invalidate_osr_method() {
@@ -1351,6 +1352,15 @@
       nmethod_needs_unregister = true;
     }
 
+    // Must happen before state change. Otherwise we have a race condition in
+    // nmethod::can_not_entrant_be_converted(). I.e., a method can immediately
+    // transition its state from 'not_entrant' to 'zombie' without having to wait
+    // for stack scanning.
+    if (state == not_entrant) {
+      mark_as_seen_on_stack();
+      OrderAccess::storestore();
+    }
+
     // Change state
     _state = state;
 
@@ -1369,11 +1379,6 @@
       HandleMark hm;
       method()->clear_code();
     }
-
-    if (state == not_entrant) {
-      mark_as_seen_on_stack();
-    }
-
   } // leave critical region under Patching_lock
 
   // When the nmethod becomes zombie it is no longer alive so the
@@ -1416,7 +1421,7 @@
   }
 
   // Make sweeper aware that there is a zombie method that needs to be removed
-  NMethodSweeper::notify(this);
+  NMethodSweeper::notify();
 
   return true;
 }
@@ -1451,10 +1456,6 @@
     CodeCache::drop_scavenge_root_nmethod(this);
   }
 
-  if (is_speculatively_disconnected()) {
-    CodeCache::remove_saved_code(this);
-  }
-
 #ifdef SHARK
   ((SharkCompiler *) compiler())->free_compiled_method(insts_begin());
 #endif // SHARK
@@ -1965,7 +1966,7 @@
     if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
     tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
                   _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
-                  (intptr_t)(*p), (intptr_t)p);
+                  (void *)(*p), (intptr_t)p);
     (*p)->print();
   }
 #endif //PRODUCT
@@ -2345,7 +2346,7 @@
       _ok = false;
     }
     tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
-                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
   }
   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 };
@@ -2466,7 +2467,7 @@
       _ok = false;
     }
     tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
-                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
+                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
     (*p)->print();
   }
   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
--- a/src/share/vm/code/nmethod.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/code/nmethod.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -119,7 +119,6 @@
   // To support simple linked-list chaining of nmethods:
   nmethod*  _osr_link;         // from InstanceKlass::osr_nmethods_head
   nmethod*  _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
-  nmethod*  _saved_nmethod_link; // from CodeCache::speculatively_disconnect
 
   static nmethod* volatile _oops_do_mark_nmethods;
   nmethod*        volatile _oops_do_mark_link;
@@ -165,7 +164,6 @@
 
   // protected by CodeCache_lock
   bool _has_flushed_dependencies;            // Used for maintenance of dependencies (CodeCache_lock)
-  bool _speculatively_disconnected;          // Marked for potential unload
 
   bool _marked_for_reclamation;              // Used by NMethodSweeper (set only by sweeper)
   bool _marked_for_deoptimization;           // Used for stack deoptimization
@@ -180,7 +178,7 @@
   unsigned int _has_wide_vectors:1;          // Preserve wide vectors at safepoints
 
   // Protected by Patching_lock
-  unsigned char _state;                      // {alive, not_entrant, zombie, unloaded}
+  volatile unsigned char _state;             // {alive, not_entrant, zombie, unloaded}
 
 #ifdef ASSERT
   bool _oops_are_stale;  // indicates that it's no longer safe to access oops section
@@ -202,11 +200,18 @@
 
   // not_entrant method removal. Each mark_sweep pass will update
   // this mark to current sweep invocation count if it is seen on the
-  // stack.  An not_entrant method can be removed when there is no
+  // stack.  An not_entrant method can be removed when there are no
   // more activations, i.e., when the _stack_traversal_mark is less than
   // current sweep traversal index.
   long _stack_traversal_mark;
 
+  // The _hotness_counter indicates the hotness of a method. The higher
+  // the value the hotter the method. The hotness counter of a nmethod is
+  // set to [(ReservedCodeCacheSize / (1024 * 1024)) * 2] each time the method
+  // is active while stack scanning (mark_active_nmethods()). The hotness
+  // counter is decreased (by 1) while sweeping.
+  int _hotness_counter;
+
   ExceptionCache *_exception_cache;
   PcDescCache     _pc_desc_cache;
 
@@ -382,6 +387,10 @@
 
   int total_size        () const;
 
+  void dec_hotness_counter()        { _hotness_counter--; }
+  void set_hotness_counter(int val) { _hotness_counter = val; }
+  int  hotness_counter() const      { return _hotness_counter; }
+
   // Containment
   bool consts_contains       (address addr) const { return consts_begin       () <= addr && addr < consts_end       (); }
   bool insts_contains        (address addr) const { return insts_begin        () <= addr && addr < insts_end        (); }
@@ -408,8 +417,8 @@
   // alive.  It is used when an uncommon trap happens.  Returns true
   // if this thread changed the state of the nmethod or false if
   // another thread performed the transition.
-  bool  make_not_entrant()                        { return make_not_entrant_or_zombie(not_entrant); }
-  bool  make_zombie()                             { return make_not_entrant_or_zombie(zombie); }
+  bool  make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); }
+  bool  make_zombie()      { return make_not_entrant_or_zombie(zombie); }
 
   // used by jvmti to track if the unload event has been reported
   bool  unload_reported()                         { return _unload_reported; }
@@ -437,9 +446,6 @@
   bool  has_method_handle_invokes() const         { return _has_method_handle_invokes; }
   void  set_has_method_handle_invokes(bool z)     { _has_method_handle_invokes = z; }
 
-  bool  is_speculatively_disconnected() const     { return _speculatively_disconnected; }
-  void  set_speculatively_disconnected(bool z)    { _speculatively_disconnected = z; }
-
   bool  is_lazy_critical_native() const           { return _lazy_critical_native; }
   void  set_lazy_critical_native(bool z)          { _lazy_critical_native = z; }
 
@@ -499,9 +505,6 @@
   nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
   void     set_scavenge_root_link(nmethod *n)          { _scavenge_root_link = n; }
 
-  nmethod* saved_nmethod_link() const                  { return _saved_nmethod_link; }
-  void     set_saved_nmethod_link(nmethod *n)          { _saved_nmethod_link = n; }
-
  public:
 
   // Sweeper support
--- a/src/share/vm/compiler/compileBroker.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/compiler/compileBroker.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -634,19 +634,36 @@
   NMethodSweeper::possibly_sweep();
 
   MutexLocker locker(lock());
-  // Wait for an available CompileTask.
+  // If _first is NULL we have no more compile jobs. There are two reasons for
+  // having no compile jobs: First, we compiled everything we wanted. Second,
+  // we ran out of code cache so compilation has been disabled. In the latter
+  // case we perform code cache sweeps to free memory such that we can re-enable
+  // compilation.
   while (_first == NULL) {
-    // There is no work to be done right now.  Wait.
-    if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
-      // During the emergency sweeping periods, wake up and sweep occasionally
-      bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
-      if (timedout) {
+    if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
+      // Wait a certain amount of time to possibly do another sweep.
+      // We must wait until stack scanning has happened so that we can
+      // transition a method's state from 'not_entrant' to 'zombie'.
+      long wait_time = NmethodSweepCheckInterval * 1000;
+      if (FLAG_IS_DEFAULT(NmethodSweepCheckInterval)) {
+        // Only one thread at a time can do sweeping. Scale the
+        // wait time according to the number of compiler threads.
+        // As a result, the next sweep is likely to happen every 100ms
+        // with an arbitrary number of threads that do sweeping.
+        wait_time = 100 * CICompilerCount;
+      }
+      bool timeout = lock()->wait(!Mutex::_no_safepoint_check_flag, wait_time);
+      if (timeout) {
         MutexUnlocker ul(lock());
-        // When otherwise not busy, run nmethod sweeping
         NMethodSweeper::possibly_sweep();
       }
     } else {
-      // During normal operation no need to wake up on timer
+      // If there are no compilation tasks and we can compile new jobs
+      // (i.e., there is enough free space in the code cache) there is
+      // no need to invoke the sweeper. As a result, the hotness of methods
+      // remains unchanged. This behavior is desired, since we want to keep
+      // the stable state, i.e., we do not want to evict methods from the
+      // code cache if it is unnecessary.
       lock()->wait();
     }
   }
@@ -1227,16 +1244,9 @@
         return method_code;
       }
     }
-    if (method->is_not_compilable(comp_level)) return NULL;
-
-    if (UseCodeCacheFlushing) {
-      nmethod* saved = CodeCache::reanimate_saved_code(method());
-      if (saved != NULL) {
-        method->set_code(method, saved);
-        return saved;
-      }
+    if (method->is_not_compilable(comp_level)) {
+      return NULL;
     }
-
   } else {
     // osr compilation
 #ifndef TIERED
@@ -1585,9 +1595,6 @@
       if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
         // the code cache is really full
         handle_full_code_cache();
-      } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
-        // Attempt to start cleaning the code cache while there is still a little headroom
-        NMethodSweeper::handle_full_code_cache(false);
       }
 
       CompileTask* task = queue->get();
@@ -1943,7 +1950,11 @@
     }
 #endif
     if (UseCodeCacheFlushing) {
-      NMethodSweeper::handle_full_code_cache(true);
+      // Since code cache is full, immediately stop new compiles
+      if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
+        NMethodSweeper::log_sweep("disable_compiler");
+        NMethodSweeper::possibly_sweep();
+      }
     } else {
       UseCompiler               = false;
       AlwaysCompileLoopMethods  = false;
--- a/src/share/vm/compiler/oopMap.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/compiler/oopMap.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -628,7 +628,7 @@
 
 
 // Returns value of location as an int
-intptr_t value_of_loc(oop *pointer) { return (intptr_t)(*pointer); }
+intptr_t value_of_loc(oop *pointer) { return cast_from_oop<intptr_t>((*pointer)); }
 
 
 void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -9065,7 +9065,7 @@
   return !stack->isEmpty();
 }
 
-#define BUSY  (oop(0x1aff1aff))
+#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
 // (MT-safe) Get a prefix of at most "num" from the list.
 // The overflow list is chained through the mark word of
 // each object in the list. We fetch the entire list,
@@ -9098,7 +9098,7 @@
     return false;
   }
   // Grab the entire list; we'll put back a suffix
-  oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
   Thread* tid = Thread::current();
   // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
   // set to ParallelGCThreads.
@@ -9113,7 +9113,7 @@
       return false;
     } else if (_overflow_list != BUSY) {
       // Try and grab the prefix
-      prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
     }
   }
   // If the list was found to be empty, or we spun long
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -2694,7 +2694,7 @@
 
     if (print_it) {
       _out->print_cr(" "PTR_FORMAT"%s",
-                     o, (over_tams) ? " >" : (marked) ? " M" : "");
+                     (void *)o, (over_tams) ? " >" : (marked) ? " M" : "");
       PrintReachableOopClosure oopCl(_out, _vo, _all);
       o->oop_iterate_no_header(&oopCl);
     }
--- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -81,7 +81,7 @@
                                          size_t* marked_bytes_array,
                                          BitMap* task_card_bm) {
   G1CollectedHeap* g1h = _g1h;
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
+  CardTableModRefBS* ct_bs = g1h->g1_barrier_set();
 
   HeapWord* start = mr.start();
   HeapWord* end = mr.end();
--- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -65,9 +65,7 @@
     // threshold limit is no more than this.
     guarantee(G1ConcRSHotCardLimit <= max_jubyte, "sanity");
 
-    ModRefBarrierSet* bs = _g1h->mr_bs();
-    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
-    _ct_bs = (CardTableModRefBS*)bs;
+    _ct_bs = _g1h->g1_barrier_set();
     _ct_bot = _ct_bs->byte_for_const(_g1h->reserved_region().start());
 
     // Allocate/Reserve the counts table
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -125,10 +125,8 @@
   int _histo[256];
 public:
   ClearLoggedCardTableEntryClosure() :
-    _calls(0)
+    _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set())
   {
-    _g1h = G1CollectedHeap::heap();
-    _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
     for (int i = 0; i < 256; i++) _histo[i] = 0;
   }
   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
@@ -158,11 +156,8 @@
   CardTableModRefBS* _ctbs;
 public:
   RedirtyLoggedCardTableEntryClosure() :
-    _calls(0)
-  {
-    _g1h = G1CollectedHeap::heap();
-    _ctbs = (CardTableModRefBS*)_g1h->barrier_set();
-  }
+    _calls(0), _g1h(G1CollectedHeap::heap()), _ctbs(_g1h->g1_barrier_set()) {}
+
   bool do_card_ptr(jbyte* card_ptr, int worker_i) {
     if (_g1h->is_in_reserved(_ctbs->addr_for(card_ptr))) {
       _calls++;
@@ -478,7 +473,7 @@
 
 void G1CollectedHeap::check_ct_logs_at_safepoint() {
   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
+  CardTableModRefBS* ct_bs = g1_barrier_set();
 
   // Count the dirty cards at the start.
   CountNonCleanMemRegionClosure count1(this);
@@ -1205,7 +1200,7 @@
 };
 
 void G1CollectedHeap::clear_rsets_post_compaction() {
-  PostMCRemSetClearClosure rs_clear(this, mr_bs());
+  PostMCRemSetClearClosure rs_clear(this, g1_barrier_set());
   heap_region_iterate(&rs_clear);
 }
 
@@ -1777,7 +1772,6 @@
 }
 
 bool G1CollectedHeap::expand(size_t expand_bytes) {
-  size_t old_mem_size = _g1_storage.committed_size();
   size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
   aligned_expand_bytes = align_size_up(aligned_expand_bytes,
                                        HeapRegion::GrainBytes);
@@ -1787,6 +1781,13 @@
                 ergo_format_byte("attempted expansion amount"),
                 expand_bytes, aligned_expand_bytes);
 
+  if (_g1_storage.uncommitted_size() == 0) {
+    ergo_verbose0(ErgoHeapSizing,
+                      "did not expand the heap",
+                      ergo_format_reason("heap already fully expanded"));
+    return false;
+  }
+
   // First commit the memory.
   HeapWord* old_end = (HeapWord*) _g1_storage.high();
   bool successful = _g1_storage.expand_by(aligned_expand_bytes);
@@ -1845,7 +1846,6 @@
 }
 
 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
-  size_t old_mem_size = _g1_storage.committed_size();
   size_t aligned_shrink_bytes =
     ReservedSpace::page_align_size_down(shrink_bytes);
   aligned_shrink_bytes = align_size_down(aligned_shrink_bytes,
@@ -2045,20 +2045,13 @@
   // Create the gen rem set (and barrier set) for the entire reserved region.
   _rem_set = collector_policy()->create_rem_set(_reserved, 2);
   set_barrier_set(rem_set()->bs());
-  if (barrier_set()->is_a(BarrierSet::ModRef)) {
-    _mr_bs = (ModRefBarrierSet*)_barrier_set;
-  } else {
-    vm_exit_during_initialization("G1 requires a mod ref bs.");
+  if (!barrier_set()->is_a(BarrierSet::G1SATBCTLogging)) {
+    vm_exit_during_initialization("G1 requires a G1SATBLoggingCardTableModRefBS");
     return JNI_ENOMEM;
   }
 
   // Also create a G1 rem set.
-  if (mr_bs()->is_a(BarrierSet::CardTableModRef)) {
-    _g1_rem_set = new G1RemSet(this, (CardTableModRefBS*)mr_bs());
-  } else {
-    vm_exit_during_initialization("G1 requires a cardtable mod ref bs.");
-    return JNI_ENOMEM;
-  }
+  _g1_rem_set = new G1RemSet(this, g1_barrier_set());
 
   // Carve out the G1 part of the heap.
 
@@ -3681,6 +3674,11 @@
   assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
   // Fill TLAB's and such
   ensure_parsability(true);
+
+  if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
+      (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
+    g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
+  }
 }
 
 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
@@ -3689,7 +3687,7 @@
       (G1SummarizeRSetStatsPeriod > 0) &&
       // we are at the end of the GC. Total collections has already been increased.
       ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
-    g1_rem_set()->print_periodic_summary_info();
+    g1_rem_set()->print_periodic_summary_info("After GC RS summary");
   }
 
   // FIXME: what is this about?
@@ -4550,7 +4548,7 @@
   : _g1h(g1h),
     _refs(g1h->task_queue(queue_num)),
     _dcq(&g1h->dirty_card_queue_set()),
-    _ct_bs((CardTableModRefBS*)_g1h->barrier_set()),
+    _ct_bs(g1h->g1_barrier_set()),
     _g1_rem(g1h->g1_rem_set()),
     _hash_seed(17), _queue_num(queue_num),
     _term_attempts(0),
@@ -4617,7 +4615,7 @@
   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
   oop p = oopDesc::load_decode_heap_oop(ref);
   assert(_g1h->is_in_g1_reserved(p),
-         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   return true;
 }
 
@@ -4627,11 +4625,11 @@
     // Must be in the collection set--it's already been copied.
     oop p = clear_partial_array_mask(ref);
     assert(_g1h->obj_in_cs(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   } else {
     oop p = oopDesc::load_decode_heap_oop(ref);
     assert(_g1h->is_in_g1_reserved(p),
-           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
+           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   }
   return true;
 }
@@ -5979,11 +5977,11 @@
 }
 
 class G1ParCleanupCTTask : public AbstractGangTask {
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
   G1CollectedHeap* _g1h;
   HeapRegion* volatile _su_head;
 public:
-  G1ParCleanupCTTask(CardTableModRefBS* ct_bs,
+  G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
                      G1CollectedHeap* g1h) :
     AbstractGangTask("G1 Par Cleanup CT Task"),
     _ct_bs(ct_bs), _g1h(g1h) { }
@@ -6006,9 +6004,9 @@
 #ifndef PRODUCT
 class G1VerifyCardTableCleanup: public HeapRegionClosure {
   G1CollectedHeap* _g1h;
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
 public:
-  G1VerifyCardTableCleanup(G1CollectedHeap* g1h, CardTableModRefBS* ct_bs)
+  G1VerifyCardTableCleanup(G1CollectedHeap* g1h, G1SATBCardTableModRefBS* ct_bs)
     : _g1h(g1h), _ct_bs(ct_bs) { }
   virtual bool doHeapRegion(HeapRegion* r) {
     if (r->is_survivor()) {
@@ -6022,7 +6020,7 @@
 
 void G1CollectedHeap::verify_not_dirty_region(HeapRegion* hr) {
   // All of the region should be clean.
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   MemRegion mr(hr->bottom(), hr->end());
   ct_bs->verify_not_dirty_region(mr);
 }
@@ -6035,13 +6033,17 @@
   // not dirty that area (one less thing to have to do while holding
   // a lock). So we can only verify that [bottom(),pre_dummy_top()]
   // is dirty.
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   MemRegion mr(hr->bottom(), hr->pre_dummy_top());
-  ct_bs->verify_dirty_region(mr);
+  if (hr->is_young()) {
+    ct_bs->verify_g1_young_region(mr);
+  } else {
+    ct_bs->verify_dirty_region(mr);
+  }
 }
 
 void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) barrier_set();
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   for (HeapRegion* hr = head; hr != NULL; hr = hr->get_next_young_region()) {
     verify_dirty_region(hr);
   }
@@ -6053,7 +6055,7 @@
 #endif
 
 void G1CollectedHeap::cleanUpCardTable() {
-  CardTableModRefBS* ct_bs = (CardTableModRefBS*) (barrier_set());
+  G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
   double start = os::elapsedTime();
 
   {
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -31,6 +31,7 @@
 #include "gc_implementation/g1/g1HRPrinter.hpp"
 #include "gc_implementation/g1/g1MonitoringSupport.hpp"
 #include "gc_implementation/g1/g1RemSet.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/g1YCTypes.hpp"
 #include "gc_implementation/g1/heapRegionSeq.hpp"
 #include "gc_implementation/g1/heapRegionSets.hpp"
@@ -703,7 +704,7 @@
     if (_g1_committed.contains((HeapWord*) obj)) {
       // no need to subtract the bottom of the heap from obj,
       // _in_cset_fast_test is biased
-      uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
+      uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
       bool ret = _in_cset_fast_test[index];
       // let's make sure the result is consistent with what the slower
       // test returns
@@ -791,8 +792,6 @@
 
   // The g1 remembered set of the heap.
   G1RemSet* _g1_rem_set;
-  // And it's mod ref barrier set, used to track updates for the above.
-  ModRefBarrierSet* _mr_bs;
 
   // A set of cards that cover the objects for which the Rsets should be updated
   // concurrently after the collection.
@@ -1127,7 +1126,6 @@
 
   // The rem set and barrier set.
   G1RemSet* g1_rem_set() const { return _g1_rem_set; }
-  ModRefBarrierSet* mr_bs() const { return _mr_bs; }
 
   unsigned get_gc_time_stamp() {
     return _gc_time_stamp;
@@ -1346,6 +1344,10 @@
 
   virtual bool is_in_closed_subset(const void* p) const;
 
+  G1SATBCardTableModRefBS* g1_barrier_set() {
+    return (G1SATBCardTableModRefBS*) barrier_set();
+  }
+
   // This resets the card table to all zeros.  It is used after
   // a collection pause which used the card table to claim cards.
   void cleanUpCardTable();
@@ -1875,7 +1877,7 @@
   G1CollectedHeap* _g1h;
   RefToScanQueue*  _refs;
   DirtyCardQueue   _dcq;
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
   G1RemSet* _g1_rem;
 
   G1ParGCAllocBufferContainer  _surviving_alloc_buffer;
@@ -1914,7 +1916,7 @@
   void   add_to_undo_waste(size_t waste)         { _undo_waste += waste; }
 
   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
-  CardTableModRefBS* ctbs()                      { return _ct_bs; }
+  G1SATBCardTableModRefBS* ctbs()                { return _ct_bs; }
 
   template <class T> void immediate_rs_update(HeapRegion* from, T* p, int tid) {
     if (!from->is_survivor()) {
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -29,6 +29,7 @@
 #include "gc_implementation/g1/g1CollectedHeap.hpp"
 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
+#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
 #include "utilities/taskqueue.hpp"
 
@@ -134,7 +135,7 @@
   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
 
   MemRegion mr(start, end);
-  ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
+  g1_barrier_set()->g1_mark_as_young(mr);
 }
 
 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -319,10 +319,10 @@
 }
 
 void G1CollectorPolicy::initialize_flags() {
-  set_min_alignment(HeapRegion::GrainBytes);
+  _min_alignment = HeapRegion::GrainBytes;
   size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
-  set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
+  _max_alignment = MAX3(card_table_alignment, _min_alignment, page_size);
   if (SurvivorRatio < 1) {
     vm_exit_during_initialization("Invalid survivor ratio specified");
   }
--- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -41,11 +41,11 @@
 private:
   G1CollectedHeap* _g1;
   DirtyCardQueue *_dcq;
-  CardTableModRefBS* _ct_bs;
+  G1SATBCardTableModRefBS* _ct_bs;
 
 public:
   UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
-    _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
+    _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
 
   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   virtual void do_oop(      oop* p) { do_oop_work(p); }
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -220,7 +220,7 @@
 public:
   G1PrepareCompactClosure(CompactibleSpace* cs)
   : _g1h(G1CollectedHeap::heap()),
-    _mrbs(G1CollectedHeap::heap()->mr_bs()),
+    _mrbs(_g1h->g1_barrier_set()),
     _cp(NULL, cs, cs->initialize_threshold()),
     _humongous_proxy_set("G1MarkSweep Humongous Proxy Set") { }
 
--- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -91,12 +91,12 @@
 }
 
 template <class T> inline T* set_partial_array_mask(T obj) {
-  assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
-  return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
+  assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
+  return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
 }
 
 template <class T> inline oop clear_partial_array_mask(T* ref) {
-  return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
+  return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
 }
 
 class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
--- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -83,7 +83,9 @@
   for (uint i = 0; i < n_workers(); i++) {
     _cset_rs_update_cl[i] = NULL;
   }
-  _prev_period_summary.initialize(this, n_workers());
+  if (G1SummarizeRSetStats) {
+    _prev_period_summary.initialize(this);
+  }
 }
 
 G1RemSet::~G1RemSet() {
@@ -109,7 +111,7 @@
   CodeBlobToOopClosure* _code_root_cl;
 
   G1BlockOffsetSharedArray* _bot_shared;
-  CardTableModRefBS *_ct_bs;
+  G1SATBCardTableModRefBS *_ct_bs;
 
   double _strong_code_root_scan_time_sec;
   int    _worker_i;
@@ -130,7 +132,7 @@
   {
     _g1h = G1CollectedHeap::heap();
     _bot_shared = _g1h->bot_shared();
-    _ct_bs = (CardTableModRefBS*) (_g1h->barrier_set());
+    _ct_bs = _g1h->g1_barrier_set();
     _block_size = MAX2<int>(G1RSetScanBlockSize, 1);
   }
 
@@ -505,12 +507,7 @@
   ScrubRSClosure(BitMap* region_bm, BitMap* card_bm) :
     _g1h(G1CollectedHeap::heap()),
     _region_bm(region_bm), _card_bm(card_bm),
-    _ctbs(NULL)
-  {
-    ModRefBarrierSet* bs = _g1h->mr_bs();
-    guarantee(bs->is_a(BarrierSet::CardTableModRef), "Precondition");
-    _ctbs = (CardTableModRefBS*)bs;
-  }
+    _ctbs(_g1h->g1_barrier_set()) {}
 
   bool doHeapRegion(HeapRegion* r) {
     if (!r->continuesHumongous()) {
@@ -731,19 +728,19 @@
   return has_refs_into_cset;
 }
 
-void G1RemSet::print_periodic_summary_info() {
+void G1RemSet::print_periodic_summary_info(const char* header) {
   G1RemSetSummary current;
-  current.initialize(this, n_workers());
+  current.initialize(this);
 
   _prev_period_summary.subtract_from(&current);
-  print_summary_info(&_prev_period_summary);
+  print_summary_info(&_prev_period_summary, header);
 
   _prev_period_summary.set(&current);
 }
 
 void G1RemSet::print_summary_info() {
   G1RemSetSummary current;
-  current.initialize(this, n_workers());
+  current.initialize(this);
 
   print_summary_info(&current, " Cumulative RS summary");
 }
--- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -145,7 +145,7 @@
   virtual void print_summary_info();
 
   // Print accumulated summary info from the last time called.
-  virtual void print_periodic_summary_info();
+  virtual void print_periodic_summary_info(const char* header);
 
   // Prepare remembered set for verification.
   virtual void prepare_for_verify();
--- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -77,12 +77,12 @@
   return _rs_threads_vtimes[thread];
 }
 
-void G1RemSetSummary::initialize(G1RemSet* remset, uint num_workers) {
+void G1RemSetSummary::initialize(G1RemSet* remset) {
   assert(_rs_threads_vtimes == NULL, "just checking");
   assert(remset != NULL, "just checking");
 
   _remset = remset;
-  _num_vtimes = num_workers;
+  _num_vtimes = ConcurrentG1Refine::thread_num();
   _rs_threads_vtimes = NEW_C_HEAP_ARRAY(double, _num_vtimes, mtGC);
   memset(_rs_threads_vtimes, 0, sizeof(double) * _num_vtimes);
 
@@ -125,25 +125,115 @@
   _sampling_thread_vtime = other->sampling_thread_vtime() - _sampling_thread_vtime;
 }
 
+static double percent_of(size_t numerator, size_t denominator) {
+  if (denominator != 0) {
+    return (double)numerator / denominator * 100.0f;
+  } else {
+    return 0.0f;
+  }
+}
+
+static size_t round_to_K(size_t value) {
+  return value / K;
+}
+
+class RegionTypeCounter VALUE_OBJ_CLASS_SPEC {
+private:
+  const char* _name;
+
+  size_t _rs_mem_size;
+  size_t _cards_occupied;
+  size_t _amount;
+
+  size_t _code_root_mem_size;
+  size_t _code_root_elems;
+
+  double rs_mem_size_percent_of(size_t total) {
+    return percent_of(_rs_mem_size, total);
+  }
+
+  double cards_occupied_percent_of(size_t total) {
+    return percent_of(_cards_occupied, total);
+  }
+
+  double code_root_mem_size_percent_of(size_t total) {
+    return percent_of(_code_root_mem_size, total);
+  }
+
+  double code_root_elems_percent_of(size_t total) {
+    return percent_of(_code_root_elems, total);
+  }
+
+  size_t amount() const { return _amount; }
+
+public:
+
+  RegionTypeCounter(const char* name) : _name(name), _rs_mem_size(0), _cards_occupied(0),
+    _amount(0), _code_root_mem_size(0), _code_root_elems(0) { }
+
+  void add(size_t rs_mem_size, size_t cards_occupied, size_t code_root_mem_size,
+    size_t code_root_elems) {
+    _rs_mem_size += rs_mem_size;
+    _cards_occupied += cards_occupied;
+    _code_root_mem_size += code_root_mem_size;
+    _code_root_elems += code_root_elems;
+    _amount++;
+  }
+
+  size_t rs_mem_size() const { return _rs_mem_size; }
+  size_t cards_occupied() const { return _cards_occupied; }
+
+  size_t code_root_mem_size() const { return _code_root_mem_size; }
+  size_t code_root_elems() const { return _code_root_elems; }
+
+  void print_rs_mem_info_on(outputStream * out, size_t total) {
+    out->print_cr("    %8dK (%5.1f%%) by %zd %s regions", round_to_K(rs_mem_size()), rs_mem_size_percent_of(total), amount(), _name);
+  }
+
+  void print_cards_occupied_info_on(outputStream * out, size_t total) {
+    out->print_cr("     %8d (%5.1f%%) entries by %zd %s regions", cards_occupied(), cards_occupied_percent_of(total), amount(), _name);
+  }
+
+  void print_code_root_mem_info_on(outputStream * out, size_t total) {
+    out->print_cr("    %8dK (%5.1f%%) by %zd %s regions", round_to_K(code_root_mem_size()), code_root_mem_size_percent_of(total), amount(), _name);
+  }
+
+  void print_code_root_elems_info_on(outputStream * out, size_t total) {
+    out->print_cr("     %8d (%5.1f%%) elements by %zd %s regions", code_root_elems(), code_root_elems_percent_of(total), amount(), _name);
+  }
+};
+
+
 class HRRSStatsIter: public HeapRegionClosure {
-  size_t _occupied;
+private:
+  RegionTypeCounter _young;
+  RegionTypeCounter _humonguous;
+  RegionTypeCounter _free;
+  RegionTypeCounter _old;
+  RegionTypeCounter _all;
 
-  size_t _total_rs_mem_sz;
   size_t _max_rs_mem_sz;
   HeapRegion* _max_rs_mem_sz_region;
 
-  size_t _total_code_root_mem_sz;
+  size_t total_rs_mem_sz() const            { return _all.rs_mem_size(); }
+  size_t total_cards_occupied() const       { return _all.cards_occupied(); }
+
+  size_t max_rs_mem_sz() const              { return _max_rs_mem_sz; }
+  HeapRegion* max_rs_mem_sz_region() const  { return _max_rs_mem_sz_region; }
+
   size_t _max_code_root_mem_sz;
   HeapRegion* _max_code_root_mem_sz_region;
+
+  size_t total_code_root_mem_sz() const     { return _all.code_root_mem_size(); }
+  size_t total_code_root_elems() const      { return _all.code_root_elems(); }
+
+  size_t max_code_root_mem_sz() const       { return _max_code_root_mem_sz; }
+  HeapRegion* max_code_root_mem_sz_region() const { return _max_code_root_mem_sz_region; }
+
 public:
-  HRRSStatsIter() :
-    _occupied(0),
-    _total_rs_mem_sz(0),
-    _max_rs_mem_sz(0),
-    _max_rs_mem_sz_region(NULL),
-    _total_code_root_mem_sz(0),
-    _max_code_root_mem_sz(0),
-    _max_code_root_mem_sz_region(NULL)
+  HRRSStatsIter() : _all("All"), _young("Young"), _humonguous("Humonguous"),
+    _free("Free"), _old("Old"), _max_code_root_mem_sz_region(NULL), _max_rs_mem_sz_region(NULL),
+    _max_rs_mem_sz(0), _max_code_root_mem_sz(0)
   {}
 
   bool doHeapRegion(HeapRegion* r) {
@@ -156,46 +246,95 @@
       _max_rs_mem_sz = rs_mem_sz;
       _max_rs_mem_sz_region = r;
     }
-    _total_rs_mem_sz += rs_mem_sz;
-
+    size_t occupied_cards = hrrs->occupied();
     size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
-    if (code_root_mem_sz > _max_code_root_mem_sz) {
-      _max_code_root_mem_sz = code_root_mem_sz;
+    if (code_root_mem_sz > max_code_root_mem_sz()) {
       _max_code_root_mem_sz_region = r;
     }
-    _total_code_root_mem_sz += code_root_mem_sz;
+    size_t code_root_elems = hrrs->strong_code_roots_list_length();
 
-    size_t occ = hrrs->occupied();
-    _occupied += occ;
+    RegionTypeCounter* current = NULL;
+    if (r->is_young()) {
+      current = &_young;
+    } else if (r->isHumongous()) {
+      current = &_humonguous;
+    } else if (r->is_empty()) {
+      current = &_free;
+    } else {
+      current = &_old;
+    }
+    current->add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
+    _all.add(rs_mem_sz, occupied_cards, code_root_mem_sz, code_root_elems);
+
     return false;
   }
-  size_t total_rs_mem_sz() { return _total_rs_mem_sz; }
-  size_t max_rs_mem_sz() { return _max_rs_mem_sz; }
-  HeapRegion* max_rs_mem_sz_region() { return _max_rs_mem_sz_region; }
-  size_t total_code_root_mem_sz() { return _total_code_root_mem_sz; }
-  size_t max_code_root_mem_sz() { return _max_code_root_mem_sz; }
-  HeapRegion* max_code_root_mem_sz_region() { return _max_code_root_mem_sz_region; }
-  size_t occupied() { return _occupied; }
+
+  void print_summary_on(outputStream* out) {
+    RegionTypeCounter* counters[] = { &_young, &_humonguous, &_free, &_old, NULL };
+
+    out->print_cr("\n Current rem set statistics");
+    out->print_cr("  Total per region rem sets sizes = "SIZE_FORMAT"K."
+                  " Max = "SIZE_FORMAT"K.",
+                  round_to_K(total_rs_mem_sz()), round_to_K(max_rs_mem_sz()));
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_rs_mem_info_on(out, total_rs_mem_sz());
+    }
+
+    out->print_cr("   Static structures = "SIZE_FORMAT"K,"
+                  " free_lists = "SIZE_FORMAT"K.",
+                  round_to_K(HeapRegionRemSet::static_mem_size()),
+                  round_to_K(HeapRegionRemSet::fl_mem_size()));
+
+    out->print_cr("    "SIZE_FORMAT" occupied cards represented.",
+                  total_cards_occupied());
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_cards_occupied_info_on(out, total_cards_occupied());
+    }
+
+    // Largest sized rem set region statistics
+    HeapRegionRemSet* rem_set = max_rs_mem_sz_region()->rem_set();
+    out->print_cr("    Region with largest rem set = "HR_FORMAT", "
+                  "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
+                  HR_FORMAT_PARAMS(max_rs_mem_sz_region()),
+                  round_to_K(rem_set->mem_size()),
+                  round_to_K(rem_set->occupied()));
+
+    // Strong code root statistics
+    HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region()->rem_set();
+    out->print_cr("  Total heap region code root sets sizes = "SIZE_FORMAT"K."
+                  "  Max = "SIZE_FORMAT"K.",
+                  round_to_K(total_code_root_mem_sz()),
+                  round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()));
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_code_root_mem_info_on(out, total_code_root_mem_sz());
+    }
+
+    out->print_cr("    "SIZE_FORMAT" code roots represented.",
+                  total_code_root_elems());
+    for (RegionTypeCounter** current = &counters[0]; *current != NULL; current++) {
+      (*current)->print_code_root_elems_info_on(out, total_code_root_elems());
+    }
+
+    out->print_cr("    Region with largest amount of code roots = "HR_FORMAT", "
+                  "size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
+                  HR_FORMAT_PARAMS(max_code_root_mem_sz_region()),
+                  round_to_K(max_code_root_rem_set->strong_code_roots_mem_size()),
+                  round_to_K(max_code_root_rem_set->strong_code_roots_list_length()));
+  }
 };
 
-double calc_percentage(size_t numerator, size_t denominator) {
-  if (denominator != 0) {
-    return (double)numerator / denominator * 100.0;
-  } else {
-    return 0.0f;
-  }
-}
-
 void G1RemSetSummary::print_on(outputStream* out) {
-  out->print_cr("\n Concurrent RS processed "SIZE_FORMAT" cards",
+  out->print_cr("\n Recent concurrent refinement statistics");
+  out->print_cr("  Processed "SIZE_FORMAT" cards",
                 num_concurrent_refined_cards());
   out->print_cr("  Of %d completed buffers:", num_processed_buf_total());
   out->print_cr("     %8d (%5.1f%%) by concurrent RS threads.",
                 num_processed_buf_total(),
-                calc_percentage(num_processed_buf_rs_threads(), num_processed_buf_total()));
+                percent_of(num_processed_buf_rs_threads(), num_processed_buf_total()));
   out->print_cr("     %8d (%5.1f%%) by mutator threads.",
                 num_processed_buf_mutator(),
-                calc_percentage(num_processed_buf_mutator(), num_processed_buf_total()));
+                percent_of(num_processed_buf_mutator(), num_processed_buf_total()));
+  out->print_cr("  Did %d coarsenings.", num_coarsenings());
   out->print_cr("  Concurrent RS threads times (s)");
   out->print("     ");
   for (uint i = 0; i < _num_vtimes; i++) {
@@ -207,33 +346,5 @@
 
   HRRSStatsIter blk;
   G1CollectedHeap::heap()->heap_region_iterate(&blk);
-  // RemSet stats
-  out->print_cr("  Total heap region rem set sizes = "SIZE_FORMAT"K."
-                "  Max = "SIZE_FORMAT"K.",
-                blk.total_rs_mem_sz()/K, blk.max_rs_mem_sz()/K);
-  out->print_cr("  Static structures = "SIZE_FORMAT"K,"
-                " free_lists = "SIZE_FORMAT"K.",
-                HeapRegionRemSet::static_mem_size() / K,
-                HeapRegionRemSet::fl_mem_size() / K);
-  out->print_cr("    "SIZE_FORMAT" occupied cards represented.",
-                blk.occupied());
-  HeapRegion* max_rs_mem_sz_region = blk.max_rs_mem_sz_region();
-  HeapRegionRemSet* max_rs_rem_set = max_rs_mem_sz_region->rem_set();
-  out->print_cr("    Max size region = "HR_FORMAT", "
-                "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
-                HR_FORMAT_PARAMS(max_rs_mem_sz_region),
-                (max_rs_rem_set->mem_size() + K - 1)/K,
-                (max_rs_rem_set->occupied() + K - 1)/K);
-  out->print_cr("    Did %d coarsenings.", num_coarsenings());
-  // Strong code root stats
-  out->print_cr("  Total heap region code-root set sizes = "SIZE_FORMAT"K."
-                "  Max = "SIZE_FORMAT"K.",
-                blk.total_code_root_mem_sz()/K, blk.max_code_root_mem_sz()/K);
-  HeapRegion* max_code_root_mem_sz_region = blk.max_code_root_mem_sz_region();
-  HeapRegionRemSet* max_code_root_rem_set = max_code_root_mem_sz_region->rem_set();
-  out->print_cr("    Max size region = "HR_FORMAT", "
-                "size = "SIZE_FORMAT "K, num_elems = "SIZE_FORMAT".",
-                HR_FORMAT_PARAMS(max_code_root_mem_sz_region),
-                (max_code_root_rem_set->strong_code_roots_mem_size() + K - 1)/K,
-                (max_code_root_rem_set->strong_code_roots_list_length()));
+  blk.print_summary_on(out);
 }
--- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -84,7 +84,7 @@
   void subtract_from(G1RemSetSummary* other);
 
   // initialize and get the first sampling
-  void initialize(G1RemSet* remset, uint num_workers);
+  void initialize(G1RemSet* remset);
 
   void print_on(outputStream* out);
 
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,6 +64,46 @@
   }
 }
 
+bool G1SATBCardTableModRefBS::mark_card_deferred(size_t card_index) {
+  jbyte val = _byte_map[card_index];
+  // It's already processed
+  if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
+    return false;
+  }
+
+  if  (val == g1_young_gen) {
+    // the card is for a young gen region. We don't need to keep track of all pointers into young
+    return false;
+  }
+
+  // Cached bit can be installed either on a clean card or on a claimed card.
+  jbyte new_val = val;
+  if (val == clean_card_val()) {
+    new_val = (jbyte)deferred_card_val();
+  } else {
+    if (val & claimed_card_val()) {
+      new_val = val | (jbyte)deferred_card_val();
+    }
+  }
+  if (new_val != val) {
+    Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
+  }
+  return true;
+}
+
+void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
+  jbyte *const first = byte_for(mr.start());
+  jbyte *const last = byte_after(mr.last());
+
+  memset(first, g1_young_gen, last - first);
+}
+
+#ifndef PRODUCT
+void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
+  verify_region(mr, g1_young_gen,  true);
+}
+#endif
+
 G1SATBCardTableLoggingModRefBS::
 G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
                                int max_covered_regions) :
@@ -76,7 +116,11 @@
 void
 G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
                                                      oop new_val) {
-  jbyte* byte = byte_for(field);
+  volatile jbyte* byte = byte_for(field);
+  if (*byte == g1_young_gen) {
+    return;
+  }
+  OrderAccess::storeload();
   if (*byte != dirty_card) {
     *byte = dirty_card;
     Thread* thr = Thread::current();
@@ -95,7 +139,7 @@
 G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
                                                        oop new_val) {
   uintptr_t field_uint = (uintptr_t)field;
-  uintptr_t new_val_uint = (uintptr_t)new_val;
+  uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val);
   uintptr_t comb = field_uint ^ new_val_uint;
   comb = comb >> HeapRegion::LogOfHRGrainBytes;
   if (comb == 0) return;
@@ -108,7 +152,7 @@
 
 void
 G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
-  jbyte* byte = byte_for(mr.start());
+  volatile jbyte* byte = byte_for(mr.start());
   jbyte* last_byte = byte_for(mr.last());
   Thread* thr = Thread::current();
   if (whole_heap) {
@@ -117,25 +161,35 @@
       byte++;
     }
   } else {
-    // Enqueue if necessary.
-    if (thr->is_Java_thread()) {
-      JavaThread* jt = (JavaThread*)thr;
-      while (byte <= last_byte) {
-        if (*byte != dirty_card) {
-          *byte = dirty_card;
-          jt->dirty_card_queue().enqueue(byte);
+    // skip all consecutive young cards
+    for (; byte <= last_byte && *byte == g1_young_gen; byte++);
+
+    if (byte <= last_byte) {
+      OrderAccess::storeload();
+      // Enqueue if necessary.
+      if (thr->is_Java_thread()) {
+        JavaThread* jt = (JavaThread*)thr;
+        for (; byte <= last_byte; byte++) {
+          if (*byte == g1_young_gen) {
+            continue;
+          }
+          if (*byte != dirty_card) {
+            *byte = dirty_card;
+            jt->dirty_card_queue().enqueue(byte);
+          }
         }
-        byte++;
-      }
-    } else {
-      MutexLockerEx x(Shared_DirtyCardQ_lock,
-                      Mutex::_no_safepoint_check_flag);
-      while (byte <= last_byte) {
-        if (*byte != dirty_card) {
-          *byte = dirty_card;
-          _dcqs.shared_dirty_card_queue()->enqueue(byte);
+      } else {
+        MutexLockerEx x(Shared_DirtyCardQ_lock,
+                        Mutex::_no_safepoint_check_flag);
+        for (; byte <= last_byte; byte++) {
+          if (*byte == g1_young_gen) {
+            continue;
+          }
+          if (*byte != dirty_card) {
+            *byte = dirty_card;
+            _dcqs.shared_dirty_card_queue()->enqueue(byte);
+          }
         }
-        byte++;
       }
     }
   }
--- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -38,7 +38,14 @@
 // snapshot-at-the-beginning marking.
 
 class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
+protected:
+  enum G1CardValues {
+    g1_young_gen = CT_MR_BS_last_reserved << 1
+  };
+
 public:
+  static int g1_young_card_val()   { return g1_young_gen; }
+
   // Add "pre_val" to a set of objects that may have been disconnected from the
   // pre-marking object graph.
   static void enqueue(oop pre_val);
@@ -89,6 +96,45 @@
       write_ref_array_pre_work(dst, count);
     }
   }
+
+/*
+   Claimed and deferred bits are used together in G1 during the evacuation
+   pause. These bits can have the following state transitions:
+   1. The claimed bit can be put over any other card state. Except that
+      the "dirty -> dirty and claimed" transition is checked for in
+      G1 code and is not used.
+   2. Deferred bit can be set only if the previous state of the card
+      was either clean or claimed. mark_card_deferred() is wait-free.
+      We do not care if the operation is be successful because if
+      it does not it will only result in duplicate entry in the update
+      buffer because of the "cache-miss". So it's not worth spinning.
+ */
+
+  bool is_card_claimed(size_t card_index) {
+    jbyte val = _byte_map[card_index];
+    return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
+  }
+
+  void set_card_claimed(size_t card_index) {
+      jbyte val = _byte_map[card_index];
+      if (val == clean_card_val()) {
+        val = (jbyte)claimed_card_val();
+      } else {
+        val |= (jbyte)claimed_card_val();
+      }
+      _byte_map[card_index] = val;
+  }
+
+  void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
+  void g1_mark_as_young(const MemRegion& mr);
+
+  bool mark_card_deferred(size_t card_index);
+
+  bool is_card_deferred(size_t card_index) {
+    jbyte val = _byte_map[card_index];
+    return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
+  }
+
 };
 
 // Adds card-table logging to the post-barrier.
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -637,7 +637,7 @@
           gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
                                  "["PTR_FORMAT", "PTR_FORMAT") is above "
                                  "top "PTR_FORMAT,
-                                 obj, _hr->bottom(), _hr->end(), _hr->top());
+                                 (void *)obj, _hr->bottom(), _hr->end(), _hr->top());
           _failures = true;
           return;
         }
@@ -951,12 +951,12 @@
         Klass* klass = obj->klass();
         if (!klass->is_metaspace_object()) {
           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
-                                 "not metadata", klass, obj);
+                                 "not metadata", klass, (void *)obj);
           *failures = true;
           return;
         } else if (!klass->is_klass()) {
           gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
-                                 "not a klass", klass, obj);
+                                 "not a klass", klass, (void *)obj);
           *failures = true;
           return;
         } else {
@@ -971,7 +971,7 @@
           }
         }
       } else {
-        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
+        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj);
         *failures = true;
         return;
       }
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -91,8 +91,8 @@
       gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
                              from,
                              UseCompressedOops
-                             ? oopDesc::load_decode_heap_oop((narrowOop*)from)
-                             : oopDesc::load_decode_heap_oop((oop*)from));
+                             ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
+                             : (void *)oopDesc::load_decode_heap_oop((oop*)from));
     }
 
     HeapRegion* loc_hr = hr();
@@ -403,8 +403,8 @@
     gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
                                                     from,
                                                     UseCompressedOops
-                                                    ? oopDesc::load_decode_heap_oop((narrowOop*)from)
-                                                    : oopDesc::load_decode_heap_oop((oop*)from));
+                                                    ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
+                                                    : (void *)oopDesc::load_decode_heap_oop((oop*)from));
   }
 
   int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
--- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -80,6 +80,10 @@
 
   void reset() { if (_buf != NULL) _index = _sz; }
 
+  void enqueue(volatile void* ptr) {
+    enqueue((void*)(ptr));
+  }
+
   // Enqueues the given "obj".
   void enqueue(void* ptr) {
     if (!_active) return;
--- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1103,7 +1103,7 @@
   }
 }
 
-static const oop ClaimedForwardPtr = oop(0x4);
+static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
 
 // Because of concurrency, there are times where an object for which
 // "is_forwarded()" is true contains an "interim" forwarding pointer
@@ -1226,7 +1226,7 @@
   if (TraceScavenge) {
     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
        is_in_reserved(new_obj) ? "copying" : "tenuring",
-       new_obj->klass()->internal_name(), old, new_obj, new_obj->size());
+       new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
   }
 #endif
 
@@ -1347,7 +1347,7 @@
   if (TraceScavenge) {
     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
        is_in_reserved(new_obj) ? "copying" : "tenuring",
-       new_obj->klass()->internal_name(), old, new_obj, new_obj->size());
+       new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
   }
 #endif
 
@@ -1436,7 +1436,7 @@
 // (although some performance comparisons would be useful since
 // single global lists have their own performance disadvantages
 // as we were made painfully aware not long ago, see 6786503).
-#define BUSY (oop(0x1aff1aff))
+#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
 void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
   assert(is_in_reserved(from_space_obj), "Should be from this generation");
   if (ParGCUseLocalOverflow) {
@@ -1512,7 +1512,7 @@
   if (_overflow_list == NULL) return false;
 
   // Otherwise, there was something there; try claiming the list.
-  oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
   // Trim off a prefix of at most objsFromOverflow items
   Thread* tid = Thread::current();
   size_t spin_count = (size_t)ParallelGCThreads;
@@ -1526,7 +1526,7 @@
       return false;
     } else if (_overflow_list != BUSY) {
      // try and grab the prefix
-     prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
+     prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
     }
   }
   if (prefix == NULL || prefix == BUSY) {
--- a/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -84,7 +84,7 @@
         Space* sp = gch->space_containing(p);
         oop obj = oop(sp->block_start(p));
         assert((HeapWord*)obj < (HeapWord*)p, "Error");
-        tty->print_cr("Object: " PTR_FORMAT, obj);
+        tty->print_cr("Object: " PTR_FORMAT, (void *)obj);
         tty->print_cr("-------");
         obj->print();
         tty->print_cr("-----");
@@ -110,7 +110,7 @@
         if (TraceScavenge) {
           gclog_or_tty->print_cr("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
              "forwarded ",
-             new_obj->klass()->internal_name(), p, obj, new_obj, new_obj->size());
+             new_obj->klass()->internal_name(), p, (void *)obj, (void *)new_obj, new_obj->size());
         }
 #endif
 
--- a/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/generationSizer.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -40,10 +40,8 @@
 
   void initialize_flags() {
     // Do basic sizing work
-    this->TwoGenerationCollectorPolicy::initialize_flags();
+    TwoGenerationCollectorPolicy::initialize_flags();
 
-    // If the user hasn't explicitly set the number of worker
-    // threads, set the count.
     assert(UseSerialGC ||
            !FLAG_IS_DEFAULT(ParallelGCThreads) ||
            (ParallelGCThreads > 0),
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -23,7 +23,6 @@
  */
 
 #include "precompiled.hpp"
-#include "gc_implementation/parallelScavenge/generationSizer.hpp"
 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
 #include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp"
 #include "gc_implementation/parallelScavenge/psScavenge.hpp"
--- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -53,7 +53,6 @@
 
 // Forward decls
 class elapsedTimer;
-class GenerationSizer;
 
 class PSAdaptiveSizePolicy : public AdaptiveSizePolicy {
  friend class PSGCAdaptivePolicyCounters;
--- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -26,7 +26,6 @@
 #include "classfile/symbolTable.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
-#include "gc_implementation/parallelScavenge/generationSizer.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -27,7 +27,6 @@
 #include "classfile/systemDictionary.hpp"
 #include "code/codeCache.hpp"
 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
-#include "gc_implementation/parallelScavenge/generationSizer.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp"
 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -333,7 +333,7 @@
     gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
                            "promotion-failure",
                            obj->klass()->internal_name(),
-                           obj, obj->size());
+                           (void *)obj, obj->size());
 
   }
 #endif
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -126,7 +126,7 @@
 
   oop* mask_chunked_array_oop(oop obj) {
     assert(!is_oop_masked((oop*) obj), "invariant");
-    oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
+    oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK);
     assert(is_oop_masked(ret), "invariant");
     return ret;
   }
--- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -225,7 +225,7 @@
   if (TraceScavenge) {
     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
        PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
-       new_obj->klass()->internal_name(), o, new_obj, new_obj->size());
+       new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size());
   }
 #endif
 
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -27,7 +27,6 @@
 #include "code/codeCache.hpp"
 #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
-#include "gc_implementation/parallelScavenge/generationSizer.hpp"
 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
 #include "gc_implementation/parallelScavenge/psAdaptiveSizePolicy.hpp"
 #include "gc_implementation/parallelScavenge/psMarkSweep.hpp"
--- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -81,7 +81,7 @@
   if (TraceScavenge &&  o->is_forwarded()) {
     gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
        "forwarding",
-       new_obj->klass()->internal_name(), o, new_obj, new_obj->size());
+       new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size());
   }
 #endif
 
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -214,9 +214,6 @@
     : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
       _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
   }
-  ~VM_CollectForMetadataAllocation()  {
-    MetaspaceGC::set_expand_after_GC(false);
-  }
   virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
   virtual void doit();
   MetaWord* result() const       { return _result; }
--- a/src/share/vm/gc_interface/collectedHeap.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -202,12 +202,6 @@
       ShouldNotReachHere(); // Unexpected use of this function
   }
 }
-MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(
-                                              ClassLoaderData* loader_data,
-                                              size_t size, Metaspace::MetadataType mdtype) {
-  return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype);
-}
-
 
 void CollectedHeap::pre_initialize() {
   // Used for ReduceInitialCardMarks (when COMPILER2 is used);
--- a/src/share/vm/gc_interface/collectedHeap.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/gc_interface/collectedHeap.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -475,11 +475,6 @@
   // the context of the vm thread.
   virtual void collect_as_vm_thread(GCCause::Cause cause);
 
-  // Callback from VM_CollectForMetadataAllocation operation.
-  MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
-                                               size_t size,
-                                               Metaspace::MetadataType mdtype);
-
   // Returns the barrier set for this heap
   BarrierSet* barrier_set() { return _barrier_set; }
 
--- a/src/share/vm/interpreter/bytecodeTracer.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -215,7 +215,7 @@
       st->print_cr(" %s", buf);
     }
   } else {
-    st->print_cr(" " PTR_FORMAT, (intptr_t) value);
+    st->print_cr(" " PTR_FORMAT, (void *)value);
   }
 }
 
--- a/src/share/vm/interpreter/linkResolver.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/interpreter/linkResolver.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -222,8 +222,17 @@
 //
 // According to JVM spec. $5.4.3c & $5.4.3d
 
+// Look up method in klasses, including static methods
+// Then look up local default methods
 void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
   Method* result_oop = klass->uncached_lookup_method(name, signature);
+  if (result_oop == NULL) {
+    Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
+    if (default_methods != NULL) {
+      result_oop = InstanceKlass::find_method(default_methods, name, signature);
+    }
+  }
+
   if (EnableInvokeDynamic && result_oop != NULL) {
     vmIntrinsics::ID iid = result_oop->intrinsic_id();
     if (MethodHandles::is_signature_polymorphic(iid)) {
@@ -235,6 +244,7 @@
 }
 
 // returns first instance method
+// Looks up method in classes, then looks up local default methods
 void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
   Method* result_oop = klass->uncached_lookup_method(name, signature);
   result = methodHandle(THREAD, result_oop);
@@ -242,13 +252,38 @@
     klass = KlassHandle(THREAD, result->method_holder()->super());
     result = methodHandle(THREAD, klass->uncached_lookup_method(name, signature));
   }
+
+  if (result.is_null()) {
+    Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
+    if (default_methods != NULL) {
+      result = methodHandle(InstanceKlass::find_method(default_methods, name, signature));
+      assert(result.is_null() || !result->is_static(), "static defaults not allowed");
+    }
+  }
 }
 
+int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
+                                          methodHandle resolved_method, TRAPS) {
 
-int LinkResolver::vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
-  ResourceMark rm(THREAD);
-  klassVtable *vt = InstanceKlass::cast(klass())->vtable();
-  return vt->index_of_miranda(name, signature);
+  int vtable_index = Method::invalid_vtable_index;
+  Symbol* name = resolved_method->name();
+  Symbol* signature = resolved_method->signature();
+
+  // First check in default method array
+  if (!resolved_method->is_abstract()  &&
+    (InstanceKlass::cast(klass())->default_methods() != NULL)) {
+    int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature);
+    if (index >= 0 ) {
+      vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index);
+    }
+  }
+  if (vtable_index == Method::invalid_vtable_index) {
+    // get vtable_index for miranda methods
+    ResourceMark rm(THREAD);
+    klassVtable *vt = InstanceKlass::cast(klass())->vtable();
+    vtable_index = vt->index_of_miranda(name, signature);
+  }
+  return vtable_index;
 }
 
 void LinkResolver::lookup_method_in_interfaces(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
@@ -573,6 +608,16 @@
   }
 
   if (check_access) {
+    // JDK8 adds non-public interface methods, and accessability check requirement
+    assert(current_klass.not_null() , "current_klass should not be null");
+
+    // check if method can be accessed by the referring class
+    check_method_accessability(current_klass,
+                               resolved_klass,
+                               KlassHandle(THREAD, resolved_method->method_holder()),
+                               resolved_method,
+                               CHECK);
+
     HandleMark hm(THREAD);
     Handle loader (THREAD, InstanceKlass::cast(current_klass())->class_loader());
     Handle class_loader (THREAD, resolved_method->method_holder()->class_loader());
@@ -604,6 +649,26 @@
       }
     }
   }
+
+  if (TraceItables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokeinterface resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
+                   (current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
+                   (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                   Method::name_and_sig_as_C_string(resolved_klass(),
+                                                    resolved_method->name(),
+                                                    resolved_method->signature()),
+                   resolved_method->method_holder()->internal_name()
+                  );
+    resolved_method->access_flags().print_on(tty);
+    if (resolved_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (resolved_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
 }
 
 //------------------------------------------------------------------------------------------------------------------------
@@ -795,26 +860,12 @@
                                                    Symbol* method_name, Symbol* method_signature,
                                                    KlassHandle current_klass, bool check_access, TRAPS) {
 
-  if (resolved_klass->is_interface() && current_klass() != NULL) {
-    // If the target class is a direct interface, treat this as a "super"
-    // default call.
-    //
-    // If the current method is an overpass that happens to call a direct
-    // super-interface's method, then we'll end up rerunning the default method
-    // analysis even though we don't need to, but that's ok since it will end
-    // up with the same answer.
-    InstanceKlass* ik = InstanceKlass::cast(current_klass());
-    Array<Klass*>* interfaces = ik->local_interfaces();
-    int num_interfaces = interfaces->length();
-    for (int index = 0; index < num_interfaces; index++) {
-      if (interfaces->at(index) == resolved_klass()) {
-        Method* method = DefaultMethods::find_super_default(current_klass(),
-            resolved_klass(), method_name, method_signature, CHECK);
-        resolved_method = methodHandle(THREAD, method);
-        return;
-      }
-    }
-  }
+  // Invokespecial is called for multiple special reasons:
+  // <init>
+  // local private method invocation, for classes and interfaces
+  // superclass.method, which can also resolve to a default method
+  // and the selected method is recalculated relative to the direct superclass
+  // superinterface.method, which explicitly does not check shadowing
 
   resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
 
@@ -844,6 +895,26 @@
                                                          resolved_method->signature()));
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
   }
+
+  if (TraceItables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokespecial resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
+                (current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
+                (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                Method::name_and_sig_as_C_string(resolved_klass(),
+                                                 resolved_method->name(),
+                                                 resolved_method->signature()),
+                resolved_method->method_holder()->internal_name()
+               );
+    resolved_method->access_flags().print_on(tty);
+    if (resolved_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (resolved_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
 }
 
 // throws runtime exceptions
@@ -851,23 +922,24 @@
                                                   KlassHandle current_klass, bool check_access, TRAPS) {
 
   // resolved method is selected method unless we have an old-style lookup
+  // for a superclass method
+  // Invokespecial for a superinterface, resolved method is selected method,
+  // no checks for shadowing
   methodHandle sel_method(THREAD, resolved_method());
 
   // check if this is an old-style super call and do a new lookup if so
   { KlassHandle method_klass  = KlassHandle(THREAD,
                                             resolved_method->method_holder());
 
-    const bool direct_calling_default_method =
-      resolved_klass() != NULL && resolved_method() != NULL &&
-      resolved_klass->is_interface() && !resolved_method->is_abstract();
-
-    if (!direct_calling_default_method &&
-        check_access &&
+    if (check_access &&
         // a) check if ACC_SUPER flag is set for the current class
         (current_klass->is_super() || !AllowNonVirtualCalls) &&
-        // b) check if the method class is a superclass of the current class (superclass relation is not reflexive!)
-        current_klass->is_subtype_of(method_klass()) &&
-        current_klass() != method_klass() &&
+        // b) check if the class of the resolved_klass is a superclass
+        // (not supertype in order to exclude interface classes) of the current class.
+        // This check is not performed for super.invoke for interface methods
+        // in super interfaces.
+        current_klass->is_subclass_of(resolved_klass()) &&
+        current_klass() != resolved_klass() &&
         // c) check if the method is not <init>
         resolved_method->name() != vmSymbols::object_initializer_name()) {
       // Lookup super method
@@ -905,6 +977,25 @@
                                                       sel_method->signature()));
   }
 
+  if (TraceItables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokespecial selected method: resolved-class:%s, method:%s, method_holder:%s, access_flags: ",
+                 (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                 Method::name_and_sig_as_C_string(resolved_klass(),
+                                                  sel_method->name(),
+                                                  sel_method->signature()),
+                 sel_method->method_holder()->internal_name()
+                );
+    sel_method->access_flags().print_on(tty);
+    if (sel_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (sel_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
+
   // setup result
   result.set_static(resolved_klass, sel_method, CHECK);
 }
@@ -927,6 +1018,18 @@
   assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier");
   assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");
 
+  // check if private interface method
+  if (resolved_klass->is_interface() && resolved_method->is_private()) {
+    ResourceMark rm(THREAD);
+    char buf[200];
+    jio_snprintf(buf, sizeof(buf), "private interface method requires invokespecial, not invokevirtual: method %s, caller-class:%s",
+                 Method::name_and_sig_as_C_string(resolved_klass(),
+                                                  resolved_method->name(),
+                                                  resolved_method->signature()),
+                   (current_klass.is_null() ? "<NULL>" : current_klass->internal_name()));
+    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+  }
+
   // check if not static
   if (resolved_method->is_static()) {
     ResourceMark rm(THREAD);
@@ -936,6 +1039,26 @@
                                                                                                              resolved_method->signature()));
     THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
   }
+
+  if (PrintVtables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokevirtual resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
+                   (current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
+                   (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                   Method::name_and_sig_as_C_string(resolved_klass(),
+                                                    resolved_method->name(),
+                                                    resolved_method->signature()),
+                   resolved_method->method_holder()->internal_name()
+                  );
+    resolved_method->access_flags().print_on(tty);
+    if (resolved_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (resolved_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
 }
 
 // throws runtime exceptions
@@ -965,10 +1088,8 @@
 
   // do lookup based on receiver klass using the vtable index
   if (resolved_method->method_holder()->is_interface()) { // miranda method
-    vtable_index = vtable_index_of_miranda_method(resolved_klass,
-                           resolved_method->name(),
-                           resolved_method->signature(), CHECK);
-
+    vtable_index = vtable_index_of_interface_method(resolved_klass,
+                           resolved_method, CHECK);
     assert(vtable_index >= 0 , "we should have valid vtable index at this point");
 
     InstanceKlass* inst = InstanceKlass::cast(recv_klass());
@@ -1012,6 +1133,26 @@
                                                       selected_method->signature()));
   }
 
+  if (PrintVtables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokevirtual selected method: receiver-class:%s, resolved-class:%s, method:%s, method_holder:%s, vtable_index:%d, access_flags: ",
+                   (recv_klass.is_null() ? "<NULL>" : recv_klass->internal_name()),
+                   (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                   Method::name_and_sig_as_C_string(resolved_klass(),
+                                                    resolved_method->name(),
+                                                    resolved_method->signature()),
+                   selected_method->method_holder()->internal_name(),
+                   vtable_index
+                  );
+    selected_method->access_flags().print_on(tty);
+    if (selected_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (selected_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
   // setup result
   result.set_virtual(resolved_klass, recv_klass, resolved_method, selected_method, vtable_index, CHECK);
 }
@@ -1042,6 +1183,17 @@
     THROW(vmSymbols::java_lang_NullPointerException());
   }
 
+  // check if private interface method
+  if (resolved_klass->is_interface() && resolved_method->is_private()) {
+    ResourceMark rm(THREAD);
+    char buf[200];
+    jio_snprintf(buf, sizeof(buf), "private interface method requires invokespecial, not invokeinterface: method %s",
+                 Method::name_and_sig_as_C_string(resolved_klass(),
+                                                  resolved_method->name(),
+                                                  resolved_method->signature()));
+    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
+  }
+
   // check if receiver klass implements the resolved interface
   if (!recv_klass->is_subtype_of(resolved_klass())) {
     ResourceMark rm(THREAD);
@@ -1071,27 +1223,13 @@
                                                       resolved_method->signature()));
   }
   // check access
-  if (sel_method->method_holder()->is_interface()) {
-    // Method holder is an interface. Throw Illegal Access Error if sel_method
-    // is neither public nor private.
-    if (!(sel_method->is_public() || sel_method->is_private())) {
-      ResourceMark rm(THREAD);
-      THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
-                Method::name_and_sig_as_C_string(recv_klass(),
-                                                 sel_method->name(),
-                                                 sel_method->signature()));
-    }
-  }
-  else {
-    // Method holder is a class. Throw Illegal Access Error if sel_method
-    // is not public.
-    if (!sel_method->is_public()) {
-      ResourceMark rm(THREAD);
-      THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
-                Method::name_and_sig_as_C_string(recv_klass(),
-                                                 sel_method->name(),
-                                                 sel_method->signature()));
-    }
+  // Throw Illegal Access Error if sel_method is not public.
+  if (!sel_method->is_public()) {
+    ResourceMark rm(THREAD);
+    THROW_MSG(vmSymbols::java_lang_IllegalAccessError(),
+              Method::name_and_sig_as_C_string(recv_klass(),
+                                               sel_method->name(),
+                                               sel_method->signature()));
   }
   // check if abstract
   if (check_null_and_abstract && sel_method->is_abstract()) {
@@ -1109,6 +1247,26 @@
     return;
   }
   int itable_index = resolved_method()->itable_index();
+
+  if (TraceItables && Verbose) {
+    ResourceMark rm(THREAD);
+    tty->print("invokeinterface selected method: receiver-class:%s, resolved-class:%s, method:%s, method_holder:%s, access_flags: ",
+                   (recv_klass.is_null() ? "<NULL>" : recv_klass->internal_name()),
+                   (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
+                   Method::name_and_sig_as_C_string(resolved_klass(),
+                                                    resolved_method->name(),
+                                                    resolved_method->signature()),
+                   sel_method->method_holder()->internal_name()
+                  );
+    sel_method->access_flags().print_on(tty);
+    if (sel_method->is_default_method()) {
+      tty->print("default");
+    }
+    if (sel_method->is_overpass()) {
+      tty->print("overpass");
+    }
+    tty->cr();
+  }
   result.set_interface(resolved_klass, recv_klass, resolved_method, sel_method, itable_index, CHECK);
 }
 
@@ -1384,7 +1542,7 @@
                                                      THREAD);
   if (HAS_PENDING_EXCEPTION) {
     if (TraceMethodHandles) {
-      tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, PENDING_EXCEPTION);
+      tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, (void *)PENDING_EXCEPTION);
       PENDING_EXCEPTION->print();
     }
     if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) {
--- a/src/share/vm/interpreter/linkResolver.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/interpreter/linkResolver.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -130,8 +130,7 @@
   static void lookup_polymorphic_method         (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
                                                  KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
 
-  static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
-
+  static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method, TRAPS);
   static void resolve_klass           (KlassHandle& result, constantPoolHandle  pool, int index, TRAPS);
 
   static void resolve_pool  (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
--- a/src/share/vm/memory/cardTableModRefBS.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/memory/cardTableModRefBS.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -423,60 +423,6 @@
   inline_write_ref_field(field, newVal);
 }
 
-/*
-   Claimed and deferred bits are used together in G1 during the evacuation
-   pause. These bits can have the following state transitions:
-   1. The claimed bit can be put over any other card state. Except that
-      the "dirty -> dirty and claimed" transition is checked for in
-      G1 code and is not used.
-   2. Deferred bit can be set only if the previous state of the card
-      was either clean or claimed. mark_card_deferred() is wait-free.
-      We do not care if the operation is be successful because if
-      it does not it will only result in duplicate entry in the update
-      buffer because of the "cache-miss". So it's not worth spinning.
- */
-
-
-bool CardTableModRefBS::claim_card(size_t card_index) {
-  jbyte val = _byte_map[card_index];
-  assert(val != dirty_card_val(), "Shouldn't claim a dirty card");
-  while (val == clean_card_val() ||
-         (val & (clean_card_mask_val() | claimed_card_val())) != claimed_card_val()) {
-    jbyte new_val = val;
-    if (val == clean_card_val()) {
-      new_val = (jbyte)claimed_card_val();
-    } else {
-      new_val = val | (jbyte)claimed_card_val();
-    }
-    jbyte res = Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
-    if (res == val) {
-      return true;
-    }
-    val = res;
-  }
-  return false;
-}
-
-bool CardTableModRefBS::mark_card_deferred(size_t card_index) {
-  jbyte val = _byte_map[card_index];
-  // It's already processed
-  if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
-    return false;
-  }
-  // Cached bit can be installed either on a clean card or on a claimed card.
-  jbyte new_val = val;
-  if (val == clean_card_val()) {
-    new_val = (jbyte)deferred_card_val();
-  } else {
-    if (val & claimed_card_val()) {
-      new_val = val | (jbyte)deferred_card_val();
-    }
-  }
-  if (new_val != val) {
-    Atomic::cmpxchg(new_val, &_byte_map[card_index], val);
-  }
-  return true;
-}
 
 void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
                                                                  MemRegion mr,
--- a/src/share/vm/memory/cardTableModRefBS.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/memory/cardTableModRefBS.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -339,34 +339,10 @@
     _byte_map[card_index] = dirty_card_val();
   }
 
-  bool is_card_claimed(size_t card_index) {
-    jbyte val = _byte_map[card_index];
-    return (val & (clean_card_mask_val() | claimed_card_val())) == claimed_card_val();
-  }
-
-  void set_card_claimed(size_t card_index) {
-      jbyte val = _byte_map[card_index];
-      if (val == clean_card_val()) {
-        val = (jbyte)claimed_card_val();
-      } else {
-        val |= (jbyte)claimed_card_val();
-      }
-      _byte_map[card_index] = val;
-  }
-
-  bool claim_card(size_t card_index);
-
   bool is_card_clean(size_t card_index) {
     return _byte_map[card_index] == clean_card_val();
   }
 
-  bool is_card_deferred(size_t card_index) {
-    jbyte val = _byte_map[card_index];
-    return (val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val();
-  }
-
-  bool mark_card_deferred(size_t card_index);
-
   // Card marking array base (adjusted for heap low boundary)
   // This would be the 0th element of _byte_map, if the heap started at 0x0.
   // But since the heap starts at some higher address, this points to somewhere
--- a/src/share/vm/memory/collectorPolicy.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/memory/collectorPolicy.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -47,83 +47,53 @@
 
 // CollectorPolicy methods.
 
-// Align down. If the aligning result in 0, return 'alignment'.
-static size_t restricted_align_down(size_t size, size_t alignment) {
-  return MAX2(alignment, align_size_down_(size, alignment));
-}
-
 void CollectorPolicy::initialize_flags() {
-  assert(max_alignment() >= min_alignment(),
-      err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
-          max_alignment(), min_alignment()));
-  assert(max_alignment() % min_alignment() == 0,
-      err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
-          max_alignment(), min_alignment()));
+  assert(_max_alignment >= _min_alignment,
+         err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
+                 _max_alignment, _min_alignment));
+  assert(_max_alignment % _min_alignment == 0,
+         err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
+                 _max_alignment, _min_alignment));
 
   if (MaxHeapSize < InitialHeapSize) {
     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   }
 
-  if (!is_size_aligned(MaxMetaspaceSize, max_alignment())) {
-    FLAG_SET_ERGO(uintx, MaxMetaspaceSize,
-        restricted_align_down(MaxMetaspaceSize, max_alignment()));
-  }
-
-  if (MetaspaceSize > MaxMetaspaceSize) {
-    FLAG_SET_ERGO(uintx, MetaspaceSize, MaxMetaspaceSize);
-  }
-
-  if (!is_size_aligned(MetaspaceSize, min_alignment())) {
-    FLAG_SET_ERGO(uintx, MetaspaceSize,
-        restricted_align_down(MetaspaceSize, min_alignment()));
-  }
-
-  assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
-
-  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
-  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
-
-  MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
-
-  assert(MetaspaceSize    % min_alignment() == 0, "metapace alignment");
-  assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
-  if (MetaspaceSize < 256*K) {
-    vm_exit_during_initialization("Too small initial Metaspace size");
-  }
+  MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment);
 }
 
 void CollectorPolicy::initialize_size_info() {
   // User inputs from -mx and ms must be aligned
-  set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment()));
-  set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment()));
-  set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
+  _min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment);
+  _initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment);
+  _max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment);
 
   // Check heap parameter properties
-  if (initial_heap_byte_size() < M) {
+  if (_initial_heap_byte_size < M) {
     vm_exit_during_initialization("Too small initial heap");
   }
   // Check heap parameter properties
-  if (min_heap_byte_size() < M) {
+  if (_min_heap_byte_size < M) {
     vm_exit_during_initialization("Too small minimum heap");
   }
-  if (initial_heap_byte_size() <= NewSize) {
+  if (_initial_heap_byte_size <= NewSize) {
      // make sure there is at least some room in old space
     vm_exit_during_initialization("Too small initial heap for new size specified");
   }
-  if (max_heap_byte_size() < min_heap_byte_size()) {
+  if (_max_heap_byte_size < _min_heap_byte_size) {
     vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
   }
-  if (initial_heap_byte_size() < min_heap_byte_size()) {
+  if (_initial_heap_byte_size < _min_heap_byte_size) {
     vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
   }
-  if (max_heap_byte_size() < initial_heap_byte_size()) {
+  if (_max_heap_byte_size < _initial_heap_byte_size) {
     vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   }
 
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT "  Initial heap "
       SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,
-      min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
+      _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
   }
 }
 
@@ -135,15 +105,8 @@
 
 GenRemSet* CollectorPolicy::create_rem_set(MemRegion whole_heap,
                                            int max_covered_regions) {
-  switch (rem_set_name()) {
-  case GenRemSet::CardTable: {
-    CardTableRS* res = new CardTableRS(whole_heap, max_covered_regions);
-    return res;
-  }
-  default:
-    guarantee(false, "unrecognized GenRemSet::Name");
-    return NULL;
-  }
+  assert(rem_set_name() == GenRemSet::CardTable, "unrecognized GenRemSet::Name");
+  return new CardTableRS(whole_heap, max_covered_regions);
 }
 
 void CollectorPolicy::cleared_all_soft_refs() {
@@ -185,15 +148,15 @@
 
 size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
   size_t x = base_size / (NewRatio+1);
-  size_t new_gen_size = x > min_alignment() ?
-                     align_size_down(x, min_alignment()) :
-                     min_alignment();
+  size_t new_gen_size = x > _min_alignment ?
+                     align_size_down(x, _min_alignment) :
+                     _min_alignment;
   return new_gen_size;
 }
 
 size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
                                                  size_t maximum_size) {
-  size_t alignment = min_alignment();
+  size_t alignment = _min_alignment;
   size_t max_minus = maximum_size - alignment;
   return desired_size < max_minus ? desired_size : max_minus;
 }
@@ -212,8 +175,8 @@
 
 void GenCollectorPolicy::initialize_flags() {
   // All sizes must be multiples of the generation granularity.
-  set_min_alignment((uintx) Generation::GenGrain);
-  set_max_alignment(compute_max_alignment());
+  _min_alignment = (uintx) Generation::GenGrain;
+  _max_alignment = compute_max_alignment();
 
   CollectorPolicy::initialize_flags();
 
@@ -223,26 +186,26 @@
   if (NewSize > MaxNewSize) {
     MaxNewSize = NewSize;
   }
-  NewSize = align_size_down(NewSize, min_alignment());
-  MaxNewSize = align_size_down(MaxNewSize, min_alignment());
+  NewSize = align_size_down(NewSize, _min_alignment);
+  MaxNewSize = align_size_down(MaxNewSize, _min_alignment);
 
   // Check validity of heap flags
-  assert(NewSize     % min_alignment() == 0, "eden space alignment");
-  assert(MaxNewSize  % min_alignment() == 0, "survivor space alignment");
+  assert(NewSize     % _min_alignment == 0, "eden space alignment");
+  assert(MaxNewSize  % _min_alignment == 0, "survivor space alignment");
 
-  if (NewSize < 3*min_alignment()) {
+  if (NewSize < 3 * _min_alignment) {
      // make sure there room for eden and two survivor spaces
     vm_exit_during_initialization("Too small new size specified");
   }
   if (SurvivorRatio < 1 || NewRatio < 1) {
-    vm_exit_during_initialization("Invalid heap ratio specified");
+    vm_exit_during_initialization("Invalid young gen ratio specified");
   }
 }
 
 void TwoGenerationCollectorPolicy::initialize_flags() {
   GenCollectorPolicy::initialize_flags();
 
-  OldSize = align_size_down(OldSize, min_alignment());
+  OldSize = align_size_down(OldSize, _min_alignment);
 
   if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
     // NewRatio will be used later to set the young generation size so we use
@@ -251,11 +214,11 @@
     assert(NewRatio > 0, "NewRatio should have been set up earlier");
     size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
 
-    calculated_heapsize = align_size_up(calculated_heapsize, max_alignment());
+    calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment);
     MaxHeapSize = calculated_heapsize;
     InitialHeapSize = calculated_heapsize;
   }
-  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
+  MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
 
   // adjust max heap size if necessary
   if (NewSize + OldSize > MaxHeapSize) {
@@ -265,18 +228,18 @@
       uintx calculated_size = NewSize + OldSize;
       double shrink_factor = (double) MaxHeapSize / calculated_size;
       // align
-      NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
+      NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
       // OldSize is already aligned because above we aligned MaxHeapSize to
-      // max_alignment(), and we just made sure that NewSize is aligned to
-      // min_alignment(). In initialize_flags() we verified that max_alignment()
-      // is a multiple of min_alignment().
+      // _max_alignment, and we just made sure that NewSize is aligned to
+      // _min_alignment. In initialize_flags() we verified that _max_alignment
+      // is a multiple of _min_alignment.
       OldSize = MaxHeapSize - NewSize;
     } else {
       MaxHeapSize = NewSize + OldSize;
     }
   }
   // need to do this again
-  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
+  MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
 
   // adjust max heap size if necessary
   if (NewSize + OldSize > MaxHeapSize) {
@@ -286,24 +249,24 @@
       uintx calculated_size = NewSize + OldSize;
       double shrink_factor = (double) MaxHeapSize / calculated_size;
       // align
-      NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
+      NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
       // OldSize is already aligned because above we aligned MaxHeapSize to
-      // max_alignment(), and we just made sure that NewSize is aligned to
-      // min_alignment(). In initialize_flags() we verified that max_alignment()
-      // is a multiple of min_alignment().
+      // _max_alignment, and we just made sure that NewSize is aligned to
+      // _min_alignment. In initialize_flags() we verified that _max_alignment
+      // is a multiple of _min_alignment.
       OldSize = MaxHeapSize - NewSize;
     } else {
       MaxHeapSize = NewSize + OldSize;
     }
   }
   // need to do this again
-  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
+  MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
 
   always_do_update_barrier = UseConcMarkSweepGC;
 
   // Check validity of heap flags
-  assert(OldSize     % min_alignment() == 0, "old space alignment");
-  assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
+  assert(OldSize     % _min_alignment == 0, "old space alignment");
+  assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment");
 }
 
 // Values set on the command line win over any ergonomically
@@ -318,7 +281,7 @@
 void GenCollectorPolicy::initialize_size_info() {
   CollectorPolicy::initialize_size_info();
 
-  // min_alignment() is used for alignment within a generation.
+  // _min_alignment is used for alignment within a generation.
   // There is additional alignment done down stream for some
   // collectors that sometimes causes unwanted rounding up of
   // generations sizes.
@@ -327,18 +290,18 @@
 
   size_t max_new_size = 0;
   if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
-    if (MaxNewSize < min_alignment()) {
-      max_new_size = min_alignment();
+    if (MaxNewSize < _min_alignment) {
+      max_new_size = _min_alignment;
     }
-    if (MaxNewSize >= max_heap_byte_size()) {
-      max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
-                                     min_alignment());
+    if (MaxNewSize >= _max_heap_byte_size) {
+      max_new_size = align_size_down(_max_heap_byte_size - _min_alignment,
+                                     _min_alignment);
       warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
         "greater than the entire heap (" SIZE_FORMAT "k).  A "
         "new generation size of " SIZE_FORMAT "k will be used.",
-        MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
+        MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K);
     } else {
-      max_new_size = align_size_down(MaxNewSize, min_alignment());
+      max_new_size = align_size_down(MaxNewSize, _min_alignment);
     }
 
   // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
@@ -356,7 +319,7 @@
   // just accept those choices.  The choices currently made are
   // not always "wise".
   } else {
-    max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
+    max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
     // Bound the maximum size by NewSize below (since it historically
     // would have been NewSize and because the NewRatio calculation could
     // yield a size that is too small) and bound it by MaxNewSize above.
@@ -369,13 +332,13 @@
   // Given the maximum gen0 size, determine the initial and
   // minimum gen0 sizes.
 
-  if (max_heap_byte_size() == min_heap_byte_size()) {
+  if (_max_heap_byte_size == _min_heap_byte_size) {
     // The maximum and minimum heap sizes are the same so
     // the generations minimum and initial must be the
     // same as its maximum.
-    set_min_gen0_size(max_new_size);
-    set_initial_gen0_size(max_new_size);
-    set_max_gen0_size(max_new_size);
+    _min_gen0_size = max_new_size;
+    _initial_gen0_size = max_new_size;
+    _max_gen0_size = max_new_size;
   } else {
     size_t desired_new_size = 0;
     if (!FLAG_IS_DEFAULT(NewSize)) {
@@ -396,43 +359,37 @@
       // Use the default NewSize as the floor for these values.  If
       // NewRatio is overly large, the resulting sizes can be too
       // small.
-      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
-                          NewSize);
+      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
       desired_new_size =
-        MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
-             NewSize);
+        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
     }
 
     assert(_min_gen0_size > 0, "Sanity check");
-    set_initial_gen0_size(desired_new_size);
-    set_max_gen0_size(max_new_size);
+    _initial_gen0_size = desired_new_size;
+    _max_gen0_size = max_new_size;
 
     // At this point the desirable initial and minimum sizes have been
     // determined without regard to the maximum sizes.
 
     // Bound the sizes by the corresponding overall heap sizes.
-    set_min_gen0_size(
-      bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
-    set_initial_gen0_size(
-      bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
-    set_max_gen0_size(
-      bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
+    _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
+    _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
+    _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
 
     // At this point all three sizes have been checked against the
     // maximum sizes but have not been checked for consistency
     // among the three.
 
     // Final check min <= initial <= max
-    set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
-    set_initial_gen0_size(
-      MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
-    set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
+    _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
+    _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
+    _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
   }
 
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
       SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
-      min_gen0_size(), initial_gen0_size(), max_gen0_size());
+      _min_gen0_size, _initial_gen0_size, _max_gen0_size);
   }
 }
 
@@ -452,19 +409,17 @@
 
   if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
     if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
-        (heap_size >= min_gen1_size + min_alignment())) {
+        (heap_size >= min_gen1_size + _min_alignment)) {
       // Adjust gen0 down to accommodate min_gen1_size
       *gen0_size_ptr = heap_size - min_gen1_size;
       *gen0_size_ptr =
-        MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
-             min_alignment());
+        MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment);
       assert(*gen0_size_ptr > 0, "Min gen0 is too large");
       result = true;
     } else {
       *gen1_size_ptr = heap_size - *gen0_size_ptr;
       *gen1_size_ptr =
-        MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
-                       min_alignment());
+        MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment);
     }
   }
   return result;
@@ -485,10 +440,9 @@
   // The maximum gen1 size can be determined from the maximum gen0
   // and maximum heap size since no explicit flags exits
   // for setting the gen1 maximum.
-  _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
+  _max_gen1_size = _max_heap_byte_size - _max_gen0_size;
   _max_gen1_size =
-    MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
-         min_alignment());
+    MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment);
   // If no explicit command line flag has been set for the
   // gen1 size, use what is left for gen1.
   if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
@@ -497,70 +451,66 @@
     // with the overall heap size).  In either case make
     // the minimum, maximum and initial sizes consistent
     // with the gen0 sizes and the overall heap sizes.
-    assert(min_heap_byte_size() > _min_gen0_size,
+    assert(_min_heap_byte_size > _min_gen0_size,
       "gen0 has an unexpected minimum size");
-    set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
-    set_min_gen1_size(
-      MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
-           min_alignment()));
-    set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
-    set_initial_gen1_size(
-      MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
-           min_alignment()));
-
+    _min_gen1_size = _min_heap_byte_size - _min_gen0_size;
+    _min_gen1_size =
+      MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment);
+    _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
+    _initial_gen1_size =
+      MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment);
   } else {
     // It's been explicitly set on the command line.  Use the
     // OldSize and then determine the consequences.
-    set_min_gen1_size(OldSize);
-    set_initial_gen1_size(OldSize);
+    _min_gen1_size = OldSize;
+    _initial_gen1_size = OldSize;
 
     // If the user has explicitly set an OldSize that is inconsistent
     // with other command line flags, issue a warning.
     // The generation minimums and the overall heap mimimum should
     // be within one heap alignment.
-    if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
-           min_heap_byte_size()) {
+    if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) {
       warning("Inconsistency between minimum heap size and minimum "
-          "generation sizes: using minimum heap = " SIZE_FORMAT,
-          min_heap_byte_size());
+              "generation sizes: using minimum heap = " SIZE_FORMAT,
+              _min_heap_byte_size);
     }
     if ((OldSize > _max_gen1_size)) {
       warning("Inconsistency between maximum heap size and maximum "
-          "generation sizes: using maximum heap = " SIZE_FORMAT
-          " -XX:OldSize flag is being ignored",
-          max_heap_byte_size());
+              "generation sizes: using maximum heap = " SIZE_FORMAT
+              " -XX:OldSize flag is being ignored",
+              _max_heap_byte_size);
     }
     // If there is an inconsistency between the OldSize and the minimum and/or
     // initial size of gen0, since OldSize was explicitly set, OldSize wins.
     if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
-                          min_heap_byte_size(), OldSize)) {
+                          _min_heap_byte_size, OldSize)) {
       if (PrintGCDetails && Verbose) {
         gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
               SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
-              min_gen0_size(), initial_gen0_size(), max_gen0_size());
+              _min_gen0_size, _initial_gen0_size, _max_gen0_size);
       }
     }
     // Initial size
     if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
-                         initial_heap_byte_size(), OldSize)) {
+                          _initial_heap_byte_size, OldSize)) {
       if (PrintGCDetails && Verbose) {
         gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
           SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
-          min_gen0_size(), initial_gen0_size(), max_gen0_size());
+          _min_gen0_size, _initial_gen0_size, _max_gen0_size);
       }
     }
   }
   // Enforce the maximum gen1 size.
-  set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
+  _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
 
   // Check that min gen1 <= initial gen1 <= max gen1
-  set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
-  set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
+  _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
+  _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
 
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT "  Initial gen1 "
       SIZE_FORMAT "  Maximum gen1 " SIZE_FORMAT,
-      min_gen1_size(), initial_gen1_size(), max_gen1_size());
+      _min_gen1_size, _initial_gen1_size, _max_gen1_size);
   }
 }
 
--- a/src/share/vm/memory/collectorPolicy.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/memory/collectorPolicy.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -101,17 +101,12 @@
   // Return maximum heap alignment that may be imposed by the policy
   static size_t compute_max_alignment();
 
-  void set_min_alignment(size_t align)         { _min_alignment = align; }
   size_t min_alignment()                       { return _min_alignment; }
-  void set_max_alignment(size_t align)         { _max_alignment = align; }
   size_t max_alignment()                       { return _max_alignment; }
 
   size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
-  void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
   size_t max_heap_byte_size()     { return _max_heap_byte_size; }
-  void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
   size_t min_heap_byte_size()     { return _min_heap_byte_size; }
-  void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
 
   enum Name {
     CollectorPolicyKind,
@@ -248,12 +243,9 @@
 
  public:
   // Accessors
-  size_t min_gen0_size() { return _min_gen0_size; }
-  void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
+  size_t min_gen0_size()     { return _min_gen0_size; }
   size_t initial_gen0_size() { return _initial_gen0_size; }
-  void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; }
-  size_t max_gen0_size() { return _max_gen0_size; }
-  void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
+  size_t max_gen0_size()     { return _max_gen0_size; }
 
   virtual int number_of_generations() = 0;
 
@@ -302,12 +294,9 @@
 
  public:
   // Accessors
-  size_t min_gen1_size() { return _min_gen1_size; }
-  void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
+  size_t min_gen1_size()     { return _min_gen1_size; }
   size_t initial_gen1_size() { return _initial_gen1_size; }
-  void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; }
-  size_t max_gen1_size() { return _max_gen1_size; }
-  void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
+  size_t max_gen1_size()     { return _max_gen1_size; }
 
   // Inherited methods
   TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
--- a/src/share/vm/memory/filemap.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/memory/filemap.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -26,6 +26,7 @@
 #define SHARE_VM_MEMORY_FILEMAP_HPP
 
 #include "memory/metaspaceShared.hpp"
+#include "memory/metaspace.hpp"
 
 // Layout of the file:
 //  header: dump of archive instance plus versioning info, datestamp, etc.
--- a/src/share/vm/memory/genRemSet.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/memory/genRemSet.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -32,13 +32,8 @@
 // enumeration.)
 
 uintx GenRemSet::max_alignment_constraint(Name nm) {
-  switch (nm) {
-  case GenRemSet::CardTable:
-    return CardTableRS::ct_max_alignment_constraint();
-  default:
-    guarantee(false, "Unrecognized GenRemSet type.");
-    return (0); // Make Windows compiler happy
-  }
+  assert(nm == GenRemSet::CardTable, "Unrecognized GenRemSet type.");
+  return CardTableRS::ct_max_alignment_constraint();
 }
 
 class HasAccumulatedModifiedOopsClosure : public KlassClosure {
--- a/src/share/vm/memory/heapInspection.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/memory/heapInspection.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -73,6 +73,10 @@
         "Number of bytes used by the InstanceKlass::methods() array") \
     f(method_ordering_bytes, IK_method_ordering, \
         "Number of bytes used by the InstanceKlass::method_ordering() array") \
+    f(default_methods_array_bytes, IK_default_methods, \
+        "Number of bytes used by the InstanceKlass::default_methods() array") \
+    f(default_vtable_indices_bytes, IK_default_vtable_indices, \
+        "Number of bytes used by the InstanceKlass::default_vtable_indices() array") \
     f(local_interfaces_bytes, IK_local_interfaces, \
         "Number of bytes used by the InstanceKlass::local_interfaces() array") \
     f(transitive_interfaces_bytes, IK_transitive_interfaces, \
@@ -150,11 +154,11 @@
   HEAP_INSPECTION_COLUMNS_DO(DECLARE_KLASS_SIZE_STATS_FIELD)
 
   static int count(oop x) {
-    return (HeapWordSize * ((x) ? (x)->size() : 0));
+    return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
   }
 
   static int count_array(objArrayOop x) {
-    return (HeapWordSize * ((x) ? (x)->size() : 0));
+    return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
   }
 
   template <class T> static int count(T* x) {
--- a/src/share/vm/memory/metaspace.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/memory/metaspace.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -29,17 +29,21 @@
 #include "memory/collectorPolicy.hpp"
 #include "memory/filemap.hpp"
 #include "memory/freeList.hpp"
+#include "memory/gcLocker.hpp"
 #include "memory/metablock.hpp"
 #include "memory/metachunk.hpp"
 #include "memory/metaspace.hpp"
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "runtime/atomic.inline.hpp"
 #include "runtime/globals.hpp"
+#include "runtime/init.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/orderAccess.hpp"
 #include "services/memTracker.hpp"
+#include "services/memoryService.hpp"
 #include "utilities/copy.hpp"
 #include "utilities/debug.hpp"
 
@@ -84,13 +88,7 @@
   return (ChunkIndex) (i+1);
 }
 
-// Originally _capacity_until_GC was set to MetaspaceSize here but
-// the default MetaspaceSize before argument processing was being
-// used which was not the desired value.  See the code
-// in should_expand() to see how the initialization is handled
-// now.
-size_t MetaspaceGC::_capacity_until_GC = 0;
-bool MetaspaceGC::_expand_after_GC = false;
+volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
 uint MetaspaceGC::_shrink_factor = 0;
 bool MetaspaceGC::_should_concurrent_collect = false;
 
@@ -293,9 +291,10 @@
   MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
 
   size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
-  size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
   size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
 
+  bool is_pre_committed() const { return _virtual_space.special(); }
+
   // address of next available space in _virtual_space;
   // Accessors
   VirtualSpaceNode* next() { return _next; }
@@ -337,7 +336,7 @@
 
   // Expands/shrinks the committed space in a virtual space.  Delegates
   // to Virtualspace
-  bool expand_by(size_t words, bool pre_touch = false);
+  bool expand_by(size_t min_words, size_t preferred_words);
 
   // In preparation for deleting this node, remove all the chunks
   // in the node from any freelist.
@@ -351,29 +350,64 @@
   void print_on(outputStream* st) const;
 };
 
+#define assert_is_ptr_aligned(ptr, alignment) \
+  assert(is_ptr_aligned(ptr, alignment),      \
+    err_msg(PTR_FORMAT " is not aligned to "  \
+      SIZE_FORMAT, ptr, alignment))
+
+#define assert_is_size_aligned(size, alignment) \
+  assert(is_size_aligned(size, alignment),      \
+    err_msg(SIZE_FORMAT " is not aligned to "   \
+       SIZE_FORMAT, size, alignment))
+
+
+// Decide if large pages should be committed when the memory is reserved.
+static bool should_commit_large_pages_when_reserving(size_t bytes) {
+  if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
+    size_t words = bytes / BytesPerWord;
+    bool is_class = false; // We never reserve large pages for the class space.
+    if (MetaspaceGC::can_expand(words, is_class) &&
+        MetaspaceGC::allowed_expansion() >= words) {
+      return true;
+    }
+  }
+
+  return false;
+}
+
   // byte_size is the size of the associated virtualspace.
-VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
-  // align up to vm allocation granularity
-  byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
+VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
+  assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
 
   // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   // configurable address, generally at the top of the Java heap so other
   // memory addresses don't conflict.
   if (DumpSharedSpaces) {
-    char* shared_base = (char*)SharedBaseAddress;
-    _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
+    bool large_pages = false; // No large pages when dumping the CDS archive.
+    char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
+
+    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
     if (_rs.is_reserved()) {
       assert(shared_base == 0 || _rs.base() == shared_base, "should match");
     } else {
       // Get a mmap region anywhere if the SharedBaseAddress fails.
-      _rs = ReservedSpace(byte_size);
+      _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
     }
     MetaspaceShared::set_shared_rs(&_rs);
   } else {
-    _rs = ReservedSpace(byte_size);
+    bool large_pages = should_commit_large_pages_when_reserving(bytes);
+
+    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
   }
 
-  MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
+  if (_rs.is_reserved()) {
+    assert(_rs.base() != NULL, "Catch if we get a NULL address");
+    assert(_rs.size() != 0, "Catch if we get a 0 size");
+    assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
+    assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
+
+    MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
+  }
 }
 
 void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
@@ -410,8 +444,6 @@
 #endif
 
 // List of VirtualSpaces for metadata allocation.
-// It has a  _next link for singly linked list and a MemRegion
-// for total space in the VirtualSpace.
 class VirtualSpaceList : public CHeapObj<mtClass> {
   friend class VirtualSpaceNode;
 
@@ -419,16 +451,13 @@
     VirtualSpaceSize = 256 * K
   };
 
-  // Global list of virtual spaces
   // Head of the list
   VirtualSpaceNode* _virtual_space_list;
   // virtual space currently being used for allocations
   VirtualSpaceNode* _current_virtual_space;
 
-  // Can this virtual list allocate >1 spaces?  Also, used to determine
-  // whether to allocate unlimited small chunks in this virtual space
+  // Is this VirtualSpaceList used for the compressed class space
   bool _is_class;
-  bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
 
   // Sum of reserved and committed memory in the virtual spaces
   size_t _reserved_words;
@@ -453,7 +482,7 @@
   // Get another virtual space and add it to the list.  This
   // is typically prompted by a failed attempt to allocate a chunk
   // and is typically followed by the allocation of a chunk.
-  bool grow_vs(size_t vs_word_size);
+  bool create_new_virtual_space(size_t vs_word_size);
 
  public:
   VirtualSpaceList(size_t word_size);
@@ -465,12 +494,12 @@
                            size_t grow_chunks_by_words,
                            size_t medium_chunk_bunch);
 
-  bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
-
-  // Get the first chunk for a Metaspace.  Used for
-  // special cases such as the boot class loader, reflection
-  // class loader and anonymous class loader.
-  Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
+  bool expand_node_by(VirtualSpaceNode* node,
+                      size_t min_words,
+                      size_t preferred_words);
+
+  bool expand_by(size_t min_words,
+                 size_t preferred_words);
 
   VirtualSpaceNode* current_virtual_space() {
     return _current_virtual_space;
@@ -478,8 +507,7 @@
 
   bool is_class() const { return _is_class; }
 
-  // Allocate the first virtualspace.
-  void initialize(size_t word_size);
+  bool initialization_succeeded() { return _virtual_space_list != NULL; }
 
   size_t reserved_words()  { return _reserved_words; }
   size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
@@ -708,6 +736,9 @@
   // and allocates from that chunk.
   MetaWord* grow_and_allocate(size_t word_size);
 
+  // Notify memory usage to MemoryService.
+  void track_metaspace_memory_usage();
+
   // debugging support.
 
   void dump(outputStream* const out) const;
@@ -869,6 +900,12 @@
   MetaWord* chunk_limit = top();
   assert(chunk_limit != NULL, "Not safe to call this method");
 
+  // The virtual spaces are always expanded by the
+  // commit granularity to enforce the following condition.
+  // Without this the is_available check will not work correctly.
+  assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
+      "The committed memory doesn't match the expanded memory.");
+
   if (!is_available(chunk_word_size)) {
     if (TraceMetadataChunkAllocation) {
       gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
@@ -888,14 +925,21 @@
 
 
 // Expand the virtual space (commit more of the reserved space)
-bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
-  size_t bytes = words * BytesPerWord;
-  bool result =  virtual_space()->expand_by(bytes, pre_touch);
-  if (TraceMetavirtualspaceAllocation && !result) {
-    gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
-                           "for byte size " SIZE_FORMAT, bytes);
-    virtual_space()->print_on(gclog_or_tty);
+bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
+  size_t min_bytes = min_words * BytesPerWord;
+  size_t preferred_bytes = preferred_words * BytesPerWord;
+
+  size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
+
+  if (uncommitted < min_bytes) {
+    return false;
   }
+
+  size_t commit = MIN2(preferred_bytes, uncommitted);
+  bool result = virtual_space()->expand_by(commit, false);
+
+  assert(result, "Failed to commit memory");
+
   return result;
 }
 
@@ -914,12 +958,23 @@
     return false;
   }
 
-  // An allocation out of this Virtualspace that is larger
-  // than an initial commit size can waste that initial committed
-  // space.
-  size_t committed_byte_size = 0;
-  bool result = virtual_space()->initialize(_rs, committed_byte_size);
+  // These are necessary restriction to make sure that the virtual space always
+  // grows in steps of Metaspace::commit_alignment(). If both base and size are
+  // aligned only the middle alignment of the VirtualSpace is used.
+  assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
+  assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
+
+  // ReservedSpaces marked as special will have the entire memory
+  // pre-committed. Setting a committed size will make sure that
+  // committed_size and actual_committed_size agrees.
+  size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
+
+  bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
+                                            Metaspace::commit_alignment());
   if (result) {
+    assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
+        "Checking that the pre-committed memory was registered by the VirtualSpace");
+
     set_top((MetaWord*)virtual_space()->low());
     set_reserved(MemRegion((HeapWord*)_rs.base(),
                  (HeapWord*)(_rs.base() + _rs.size())));
@@ -976,13 +1031,23 @@
   _reserved_words = _reserved_words - v;
 }
 
+#define assert_committed_below_limit()                             \
+  assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
+      err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
+              " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
+          MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
+
 void VirtualSpaceList::inc_committed_words(size_t v) {
   assert_lock_strong(SpaceManager::expand_lock());
   _committed_words = _committed_words + v;
+
+  assert_committed_below_limit();
 }
 void VirtualSpaceList::dec_committed_words(size_t v) {
   assert_lock_strong(SpaceManager::expand_lock());
   _committed_words = _committed_words - v;
+
+  assert_committed_below_limit();
 }
 
 void VirtualSpaceList::inc_virtual_space_count() {
@@ -1025,8 +1090,8 @@
     if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
       // Unlink it from the list
       if (prev_vsl == vsl) {
-        // This is the case of the current note being the first note.
-        assert(vsl == virtual_space_list(), "Expected to be the first note");
+        // This is the case of the current node being the first node.
+        assert(vsl == virtual_space_list(), "Expected to be the first node");
         set_virtual_space_list(vsl->next());
       } else {
         prev_vsl->set_next(vsl->next());
@@ -1054,7 +1119,7 @@
 #endif
 }
 
-VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
+VirtualSpaceList::VirtualSpaceList(size_t word_size) :
                                    _is_class(false),
                                    _virtual_space_list(NULL),
                                    _current_virtual_space(NULL),
@@ -1063,9 +1128,7 @@
                                    _virtual_space_count(0) {
   MutexLockerEx cl(SpaceManager::expand_lock(),
                    Mutex::_no_safepoint_check_flag);
-  bool initialization_succeeded = grow_vs(word_size);
-  assert(initialization_succeeded,
-    " VirtualSpaceList initialization should not fail");
+  create_new_virtual_space(word_size);
 }
 
 VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
@@ -1079,8 +1142,9 @@
                    Mutex::_no_safepoint_check_flag);
   VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
   bool succeeded = class_entry->initialize();
-  assert(succeeded, " VirtualSpaceList initialization should not fail");
-  link_vs(class_entry);
+  if (succeeded) {
+    link_vs(class_entry);
+  }
 }
 
 size_t VirtualSpaceList::free_bytes() {
@@ -1088,14 +1152,24 @@
 }
 
 // Allocate another meta virtual space and add it to the list.
-bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
+bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
   assert_lock_strong(SpaceManager::expand_lock());
-  if (vs_word_size == 0) {
+
+  if (is_class()) {
+    assert(false, "We currently don't support more than one VirtualSpace for"
+                  " the compressed class space. The initialization of the"
+                  " CCS uses another code path and should not hit this path.");
     return false;
   }
+
+  if (vs_word_size == 0) {
+    assert(false, "vs_word_size should always be at least _reserve_alignment large.");
+    return false;
+  }
+
   // Reserve the space
   size_t vs_byte_size = vs_word_size * BytesPerWord;
-  assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
+  assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
 
   // Allocate the meta virtual space and initialize it.
   VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
@@ -1103,7 +1177,8 @@
     delete new_entry;
     return false;
   } else {
-    assert(new_entry->reserved_words() == vs_word_size, "Must be");
+    assert(new_entry->reserved_words() == vs_word_size,
+        "Reserved memory size differs from requested memory size");
     // ensure lock-free iteration sees fully initialized node
     OrderAccess::storestore();
     link_vs(new_entry);
@@ -1130,20 +1205,67 @@
   }
 }
 
-bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
+bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
+                                      size_t min_words,
+                                      size_t preferred_words) {
   size_t before = node->committed_words();
 
-  bool result = node->expand_by(word_size, pre_touch);
+  bool result = node->expand_by(min_words, preferred_words);
 
   size_t after = node->committed_words();
 
   // after and before can be the same if the memory was pre-committed.
-  assert(after >= before, "Must be");
+  assert(after >= before, "Inconsistency");
   inc_committed_words(after - before);
 
   return result;
 }
 
+bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
+  assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
+  assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
+  assert(min_words <= preferred_words, "Invalid arguments");
+
+  if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
+    return  false;
+  }
+
+  size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
+  if (allowed_expansion_words < min_words) {
+    return false;
+  }
+
+  size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
+
+  // Commit more memory from the the current virtual space.
+  bool vs_expanded = expand_node_by(current_virtual_space(),
+                                    min_words,
+                                    max_expansion_words);
+  if (vs_expanded) {
+    return true;
+  }
+
+  // Get another virtual space.
+  size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
+  grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
+
+  if (create_new_virtual_space(grow_vs_words)) {
+    if (current_virtual_space()->is_pre_committed()) {
+      // The memory was pre-committed, so we are done here.
+      assert(min_words <= current_virtual_space()->committed_words(),
+          "The new VirtualSpace was pre-committed, so it"
+          "should be large enough to fit the alloc request.");
+      return true;
+    }
+
+    return expand_node_by(current_virtual_space(),
+                          min_words,
+                          max_expansion_words);
+  }
+
+  return false;
+}
+
 Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
                                            size_t grow_chunks_by_words,
                                            size_t medium_chunk_bunch) {
@@ -1151,63 +1273,27 @@
   // Allocate a chunk out of the current virtual space.
   Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
 
-  if (next == NULL) {
-    // Not enough room in current virtual space.  Try to commit
-    // more space.
-    size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
-                                     grow_chunks_by_words);
-    size_t page_size_words = os::vm_page_size() / BytesPerWord;
-    size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
-                                                        page_size_words);
-    bool vs_expanded =
-      expand_by(current_virtual_space(), aligned_expand_vs_by_words);
-    if (!vs_expanded) {
-      // Should the capacity of the metaspaces be expanded for
-      // this allocation?  If it's the virtual space for classes and is
-      // being used for CompressedHeaders, don't allocate a new virtualspace.
-      if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
-        // Get another virtual space.
-        size_t allocation_aligned_expand_words =
-            align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
-        size_t grow_vs_words =
-            MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
-        if (grow_vs(grow_vs_words)) {
-          // Got it.  It's on the list now.  Get a chunk from it.
-          assert(current_virtual_space()->expanded_words() == 0,
-              "New virtual space nodes should not have expanded");
-
-          size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
-                                                              page_size_words);
-          // We probably want to expand by aligned_expand_vs_by_words here.
-          expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
-          next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
-        }
-      } else {
-        // Allocation will fail and induce a GC
-        if (TraceMetadataChunkAllocation && Verbose) {
-          gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
-            " Fail instead of expand the metaspace");
-        }
-      }
-    } else {
-      // The virtual space expanded, get a new chunk
-      next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
-      assert(next != NULL, "Just expanded, should succeed");
-    }
+  if (next != NULL) {
+    return next;
   }
 
-  assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
-         "New chunk is still on some list");
-  return next;
-}
-
-Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
-                                                      size_t chunk_bunch) {
-  // Get a chunk from the chunk freelist
-  Metachunk* new_chunk = get_new_chunk(chunk_word_size,
-                                       chunk_word_size,
-                                       chunk_bunch);
-  return new_chunk;
+  // The expand amount is currently only determined by the requested sizes
+  // and not how much committed memory is left in the current virtual space.
+
+  size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
+  size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
+  if (min_word_size >= preferred_word_size) {
+    // Can happen when humongous chunks are allocated.
+    preferred_word_size = min_word_size;
+  }
+
+  bool expanded = expand_by(min_word_size, preferred_word_size);
+  if (expanded) {
+    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
+    assert(next != NULL, "The allocation was expected to succeed after the expansion");
+  }
+
+   return next;
 }
 
 void VirtualSpaceList::print_on(outputStream* st) const {
@@ -1256,96 +1342,96 @@
 // Calculate the amount to increase the high water mark (HWM).
 // Increase by a minimum amount (MinMetaspaceExpansion) so that
 // another expansion is not requested too soon.  If that is not
-// enough to satisfy the allocation (i.e. big enough for a word_size
-// allocation), increase by MaxMetaspaceExpansion.  If that is still
-// not enough, expand by the size of the allocation (word_size) plus
-// some.
-size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
-  size_t before_inc = MetaspaceGC::capacity_until_GC();
-  size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
-  size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
-  size_t page_size_words = os::vm_page_size() / BytesPerWord;
-  size_t size_delta_words = align_size_up(word_size, page_size_words);
-  size_t delta_words = MAX2(size_delta_words, min_delta_words);
-  if (delta_words > min_delta_words) {
+// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
+// If that is still not enough, expand by the size of the allocation
+// plus some.
+size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
+  size_t min_delta = MinMetaspaceExpansion;
+  size_t max_delta = MaxMetaspaceExpansion;
+  size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
+
+  if (delta <= min_delta) {
+    delta = min_delta;
+  } else if (delta <= max_delta) {
     // Don't want to hit the high water mark on the next
     // allocation so make the delta greater than just enough
     // for this allocation.
-    delta_words = MAX2(delta_words, max_delta_words);
-    if (delta_words > max_delta_words) {
-      // This allocation is large but the next ones are probably not
-      // so increase by the minimum.
-      delta_words = delta_words + min_delta_words;
-    }
+    delta = max_delta;
+  } else {
+    // This allocation is large but the next ones are probably not
+    // so increase by the minimum.
+    delta = delta + min_delta;
   }
-  return delta_words;
+
+  assert_is_size_aligned(delta, Metaspace::commit_alignment());
+
+  return delta;
 }
 
-bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
-
-  // If the user wants a limit, impose one.
-  // The reason for someone using this flag is to limit reserved space.  So
-  // for non-class virtual space, compare against virtual spaces that are reserved.
-  // For class virtual space, we only compare against the committed space, not
-  // reserved space, because this is a larger space prereserved for compressed
-  // class pointers.
-  if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
-    size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
-    size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
-    size_t real_allocated     = nonclass_allocated + class_allocated;
-    if (real_allocated >= MaxMetaspaceSize) {
+size_t MetaspaceGC::capacity_until_GC() {
+  size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
+  assert(value >= MetaspaceSize, "Not initialied properly?");
+  return value;
+}
+
+size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
+  assert_is_size_aligned(v, Metaspace::commit_alignment());
+
+  return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
+}
+
+size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
+  assert_is_size_aligned(v, Metaspace::commit_alignment());
+
+  return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
+}
+
+bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
+  // Check if the compressed class space is full.
+  if (is_class && Metaspace::using_class_space()) {
+    size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
+    if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
       return false;
     }
   }
 
-  // Class virtual space should always be expanded.  Call GC for the other
-  // metadata virtual space.
-  if (Metaspace::using_class_space() &&
-      (vsl == Metaspace::class_space_list())) return true;
-
-  // If this is part of an allocation after a GC, expand
-  // unconditionally.
-  if (MetaspaceGC::expand_after_GC()) {
-    return true;
+  // Check if the user has imposed a limit on the metaspace memory.
+  size_t committed_bytes = MetaspaceAux::committed_bytes();
+  if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
+    return false;
   }
 
-
-  // If the capacity is below the minimum capacity, allow the
-  // expansion.  Also set the high-water-mark (capacity_until_GC)
-  // to that minimum capacity so that a GC will not be induced
-  // until that minimum capacity is exceeded.
-  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
-  size_t metaspace_size_bytes = MetaspaceSize;
-  if (committed_capacity_bytes < metaspace_size_bytes ||
-      capacity_until_GC() == 0) {
-    set_capacity_until_GC(metaspace_size_bytes);
-    return true;
-  } else {
-    if (committed_capacity_bytes < capacity_until_GC()) {
-      return true;
-    } else {
-      if (TraceMetadataChunkAllocation && Verbose) {
-        gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
-                        "  capacity_until_GC " SIZE_FORMAT
-                        "  allocated_capacity_bytes " SIZE_FORMAT,
-                        word_size,
-                        capacity_until_GC(),
-                        MetaspaceAux::allocated_capacity_bytes());
-      }
-      return false;
-    }
+  return true;
+}
+
+size_t MetaspaceGC::allowed_expansion() {
+  size_t committed_bytes = MetaspaceAux::committed_bytes();
+
+  size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
+
+  // Always grant expansion if we are initiating the JVM,
+  // or if the GC_locker is preventing GCs.
+  if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
+    return left_until_max / BytesPerWord;
   }
+
+  size_t capacity_until_gc = capacity_until_GC();
+
+  if (capacity_until_gc <= committed_bytes) {
+    return 0;
+  }
+
+  size_t left_until_GC = capacity_until_gc - committed_bytes;
+  size_t left_to_commit = MIN2(left_until_GC, left_until_max);
+
+  return left_to_commit / BytesPerWord;
 }
 
-
-
 void MetaspaceGC::compute_new_size() {
   assert(_shrink_factor <= 100, "invalid shrink factor");
   uint current_shrink_factor = _shrink_factor;
   _shrink_factor = 0;
 
-  // Until a faster way of calculating the "used" quantity is implemented,
-  // use "capacity".
   const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
   const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
 
@@ -1377,9 +1463,10 @@
     // If we have less capacity below the metaspace HWM, then
     // increment the HWM.
     size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
+    expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
     // Don't expand unless it's significant
     if (expand_bytes >= MinMetaspaceExpansion) {
-      MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
+      MetaspaceGC::inc_capacity_until_GC(expand_bytes);
     }
     if (PrintGCDetails && Verbose) {
       size_t new_capacity_until_GC = capacity_until_GC;
@@ -1436,6 +1523,9 @@
       // on the third call, and 100% by the fourth call.  But if we recompute
       // size without shrinking, it goes back to 0%.
       shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
+
+      shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
+
       assert(shrink_bytes <= max_shrink_bytes,
         err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
           shrink_bytes, max_shrink_bytes));
@@ -1467,7 +1557,7 @@
   // Don't shrink unless it's significant
   if (shrink_bytes >= MinMetaspaceExpansion &&
       ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
-    MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
+    MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
   }
 }
 
@@ -1700,7 +1790,6 @@
     assert(free_list != NULL, "Sanity check");
 
     chunk = free_list->head();
-    debug_only(Metachunk* debug_head = chunk;)
 
     if (chunk == NULL) {
       return NULL;
@@ -1709,9 +1798,6 @@
     // Remove the chunk as the head of the list.
     free_list->remove_chunk(chunk);
 
-    // Chunk is being removed from the chunks free list.
-    dec_free_chunks_total(chunk->capacity_word_size());
-
     if (TraceMetadataChunkAllocation && Verbose) {
       gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
                              PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
@@ -1722,21 +1808,22 @@
       word_size,
       FreeBlockDictionary<Metachunk>::atLeast);
 
-    if (chunk != NULL) {
-      if (TraceMetadataHumongousAllocation) {
-        size_t waste = chunk->word_size() - word_size;
-        gclog_or_tty->print_cr("Free list allocate humongous chunk size "
-                               SIZE_FORMAT " for requested size " SIZE_FORMAT
-                               " waste " SIZE_FORMAT,
-                               chunk->word_size(), word_size, waste);
-      }
-      // Chunk is being removed from the chunks free list.
-      dec_free_chunks_total(chunk->capacity_word_size());
-    } else {
+    if (chunk == NULL) {
       return NULL;
     }
+
+    if (TraceMetadataHumongousAllocation) {
+      size_t waste = chunk->word_size() - word_size;
+      gclog_or_tty->print_cr("Free list allocate humongous chunk size "
+                             SIZE_FORMAT " for requested size " SIZE_FORMAT
+                             " waste " SIZE_FORMAT,
+                             chunk->word_size(), word_size, waste);
+    }
   }
 
+  // Chunk is being removed from the chunks free list.
+  dec_free_chunks_total(chunk->capacity_word_size());
+
   // Remove it from the links to this freelist
   chunk->set_next(NULL);
   chunk->set_prev(NULL);
@@ -1977,6 +2064,15 @@
   return chunk_word_size;
 }
 
+void SpaceManager::track_metaspace_memory_usage() {
+  if (is_init_completed()) {
+    if (is_class()) {
+      MemoryService::track_compressed_class_memory_usage();
+    }
+    MemoryService::track_metaspace_memory_usage();
+  }
+}
+
 MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
   assert(vs_list()->current_virtual_space() != NULL,
          "Should have been set");
@@ -2002,15 +2098,24 @@
   size_t grow_chunks_by_words = calc_chunk_size(word_size);
   Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
 
+  if (next != NULL) {
+    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
+  }
+
+  MetaWord* mem = NULL;
+
   // If a chunk was available, add it to the in-use chunk list
   // and do an allocation from it.
   if (next != NULL) {
-    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
     // Add to this manager's list of chunks in use.
     add_chunk(next, false);
-    return next->allocate(word_size);
+    mem = next->allocate(word_size);
   }
-  return NULL;
+
+  // Track metaspace memory usage statistic.
+  track_metaspace_memory_usage();
+
+  return mem;
 }
 
 void SpaceManager::print_on(outputStream* st) const {
@@ -2366,6 +2471,7 @@
     inc_used_metrics(word_size);
     return current_chunk()->allocate(word_size); // caller handles null result
   }
+
   if (current_chunk() != NULL) {
     result = current_chunk()->allocate(word_size);
   }
@@ -2373,7 +2479,8 @@
   if (result == NULL) {
     result = grow_and_allocate(word_size);
   }
-  if (result != 0) {
+
+  if (result != NULL) {
     inc_used_metrics(word_size);
     assert(result != (MetaWord*) chunks_in_use(MediumIndex),
            "Head of the list is being allocated");
@@ -2639,24 +2746,26 @@
 void MetaspaceAux::print_on(outputStream* out) {
   Metaspace::MetadataType nct = Metaspace::NonClassType;
 
-  out->print_cr(" Metaspace total "
-                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                " reserved " SIZE_FORMAT "K",
-                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
-
-  out->print_cr("  data space     "
-                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                " reserved " SIZE_FORMAT "K",
-                allocated_capacity_bytes(nct)/K,
-                allocated_used_bytes(nct)/K,
-                reserved_bytes(nct)/K);
+  out->print_cr(" Metaspace       "
+                "used "      SIZE_FORMAT "K, "
+                "capacity "  SIZE_FORMAT "K, "
+                "committed " SIZE_FORMAT "K, "
+                "reserved "  SIZE_FORMAT "K",
+                allocated_used_bytes()/K,
+                allocated_capacity_bytes()/K,
+                committed_bytes()/K,
+                reserved_bytes()/K);
+
   if (Metaspace::using_class_space()) {
     Metaspace::MetadataType ct = Metaspace::ClassType;
     out->print_cr("  class space    "
-                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
-                  " reserved " SIZE_FORMAT "K",
+                  "used "      SIZE_FORMAT "K, "
+                  "capacity "  SIZE_FORMAT "K, "
+                  "committed " SIZE_FORMAT "K, "
+                  "reserved "  SIZE_FORMAT "K",
+                  allocated_used_bytes(ct)/K,
                   allocated_capacity_bytes(ct)/K,
-                  allocated_used_bytes(ct)/K,
+                  committed_bytes(ct)/K,
                   reserved_bytes(ct)/K);
   }
 }
@@ -2808,6 +2917,9 @@
 size_t Metaspace::_first_chunk_word_size = 0;
 size_t Metaspace::_first_class_chunk_word_size = 0;
 
+size_t Metaspace::_commit_alignment = 0;
+size_t Metaspace::_reserve_alignment = 0;
+
 Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
   initialize(lock, type);
 }
@@ -2869,21 +2981,30 @@
   assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
   assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
          "Metaspace size is too big");
+  assert_is_ptr_aligned(requested_addr,          _reserve_alignment);
+  assert_is_ptr_aligned(cds_base,                _reserve_alignment);
+  assert_is_size_aligned(class_metaspace_size(), _reserve_alignment);
+
+  // Don't use large pages for the class space.
+  bool large_pages = false;
 
   ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
-                                             os::vm_allocation_granularity(),
-                                             false, requested_addr, 0);
+                                             _reserve_alignment,
+                                             large_pages,
+                                             requested_addr, 0);
   if (!metaspace_rs.is_reserved()) {
     if (UseSharedSpaces) {
+      size_t increment = align_size_up(1*G, _reserve_alignment);
+
       // Keep trying to allocate the metaspace, increasing the requested_addr
       // by 1GB each time, until we reach an address that will no longer allow
       // use of CDS with compressed klass pointers.
       char *addr = requested_addr;
-      while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
-             can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
-        addr = addr + 1*G;
+      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
+             can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
+        addr = addr + increment;
         metaspace_rs = ReservedSpace(class_metaspace_size(),
-                                     os::vm_allocation_granularity(), false, addr, 0);
+                                     _reserve_alignment, large_pages, addr, 0);
       }
     }
 
@@ -2894,7 +3015,7 @@
     // So, UseCompressedClassPointers cannot be turned off at this point.
     if (!metaspace_rs.is_reserved()) {
       metaspace_rs = ReservedSpace(class_metaspace_size(),
-                                   os::vm_allocation_granularity(), false);
+                                   _reserve_alignment, large_pages);
       if (!metaspace_rs.is_reserved()) {
         vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
                                               class_metaspace_size()));
@@ -2933,34 +3054,96 @@
   assert(using_class_space(), "Must be using class space");
   _class_space_list = new VirtualSpaceList(rs);
   _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
+
+  if (!_class_space_list->initialization_succeeded()) {
+    vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
+  }
 }
 
 #endif
 
+// Align down. If the aligning result in 0, return 'alignment'.
+static size_t restricted_align_down(size_t size, size_t alignment) {
+  return MAX2(alignment, align_size_down_(size, alignment));
+}
+
+void Metaspace::ergo_initialize() {
+  if (DumpSharedSpaces) {
+    // Using large pages when dumping the shared archive is currently not implemented.
+    FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
+  }
+
+  size_t page_size = os::vm_page_size();
+  if (UseLargePages && UseLargePagesInMetaspace) {
+    page_size = os::large_page_size();
+  }
+
+  _commit_alignment  = page_size;
+  _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
+
+  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
+  // override if MaxMetaspaceSize was set on the command line or not.
+  // This information is needed later to conform to the specification of the
+  // java.lang.management.MemoryUsage API.
+  //
+  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
+  // globals.hpp to the aligned value, but this is not possible, since the
+  // alignment depends on other flags being parsed.
+  MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
+
+  if (MetaspaceSize > MaxMetaspaceSize) {
+    MetaspaceSize = MaxMetaspaceSize;
+  }
+
+  MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
+
+  assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
+
+  if (MetaspaceSize < 256*K) {
+    vm_exit_during_initialization("Too small initial Metaspace size");
+  }
+
+  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
+  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
+
+  CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
+  set_class_metaspace_size(CompressedClassSpaceSize);
+}
+
 void Metaspace::global_initialize() {
   // Initialize the alignment for shared spaces.
   int max_alignment = os::vm_page_size();
   size_t cds_total = 0;
 
-  set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
-                                         os::vm_allocation_granularity()));
-
   MetaspaceShared::set_max_alignment(max_alignment);
 
   if (DumpSharedSpaces) {
-    SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
+    SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
     SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
-    SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
-    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
+    SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
+    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
 
     // Initialize with the sum of the shared space sizes.  The read-only
     // and read write metaspace chunks will be allocated out of this and the
     // remainder is the misc code and data chunks.
     cds_total = FileMapInfo::shared_spaces_size();
+    cds_total = align_size_up(cds_total, _reserve_alignment);
     _space_list = new VirtualSpaceList(cds_total/wordSize);
     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
 
+    if (!_space_list->initialization_succeeded()) {
+      vm_exit_during_initialization("Unable to dump shared archive.", NULL);
+    }
+
 #ifdef _LP64
+    if (cds_total + class_metaspace_size() > (uint64_t)max_juint) {
+      vm_exit_during_initialization("Unable to dump shared archive.",
+          err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
+                  SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
+                  "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(),
+                  cds_total + class_metaspace_size(), (size_t)max_juint));
+    }
+
     // Set the compressed klass pointer base so that decoding of these pointers works
     // properly when creating the shared archive.
     assert(UseCompressedOops && UseCompressedClassPointers,
@@ -2971,9 +3154,6 @@
                              _space_list->current_virtual_space()->bottom());
     }
 
-    // Set the shift to zero.
-    assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
-           "CDS region is too large");
     Universe::set_narrow_klass_shift(0);
 #endif
 
@@ -2992,12 +3172,12 @@
       // Map in spaces now also
       if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
         FileMapInfo::set_current_info(mapinfo);
+        cds_total = FileMapInfo::shared_spaces_size();
+        cds_address = (address)mapinfo->region_base(0);
       } else {
         assert(!mapinfo->is_open() && !UseSharedSpaces,
                "archive file not closed or shared spaces not disabled.");
       }
-      cds_total = FileMapInfo::shared_spaces_size();
-      cds_address = (address)mapinfo->region_base(0);
     }
 
 #ifdef _LP64
@@ -3005,7 +3185,9 @@
     // above the heap and above the CDS area (if it exists).
     if (using_class_space()) {
       if (UseSharedSpaces) {
-        allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
+        char* cds_end = (char*)(cds_address + cds_total);
+        cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
+        allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
       } else {
         allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
       }
@@ -3023,11 +3205,19 @@
     _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
     // Arbitrarily set the initial virtual space to a multiple
     // of the boot class loader size.
-    size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
+    size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
+    word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
+
     // Initialize the list of virtual spaces.
     _space_list = new VirtualSpaceList(word_size);
     _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
+
+    if (!_space_list->initialization_succeeded()) {
+      vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
+    }
   }
+
+  MetaspaceGC::initialize();
 }
 
 Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
@@ -3039,7 +3229,7 @@
     return chunk;
   }
 
-  return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
+  return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
 }
 
 void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
@@ -3104,7 +3294,7 @@
 MetaWord* Metaspace::allocate(size_t word_size, MetadataType mdtype) {
   // DumpSharedSpaces doesn't use class metadata area (yet)
   // Also, don't use class_vsm() unless UseCompressedClassPointers is true.
-  if (mdtype == ClassType && using_class_space()) {
+  if (is_class_space_allocation(mdtype)) {
     return  class_vsm()->allocate(word_size);
   } else {
     return  vsm()->allocate(word_size);
@@ -3112,19 +3302,18 @@
 }
 
 MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
-  MetaWord* result;
-  MetaspaceGC::set_expand_after_GC(true);
-  size_t before_inc = MetaspaceGC::capacity_until_GC();
-  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
-  MetaspaceGC::inc_capacity_until_GC(delta_bytes);
+  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
+  assert(delta_bytes > 0, "Must be");
+
+  size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
+  size_t before_inc = after_inc - delta_bytes;
+
   if (PrintGCDetails && Verbose) {
     gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
-      " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
+        " to " SIZE_FORMAT, before_inc, after_inc);
   }
 
-  result = allocate(word_size, mdtype);
-
-  return result;
+  return allocate(word_size, mdtype);
 }
 
 // Space allocated in the Metaspace.  This may
@@ -3206,6 +3395,7 @@
   }
 }
 
+
 Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
                               bool read_only, MetaspaceObj::Type type, TRAPS) {
   if (HAS_PENDING_EXCEPTION) {
@@ -3213,20 +3403,16 @@
     return NULL;  // caller does a CHECK_NULL too
   }
 
-  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
-
-  // SSS: Should we align the allocations and make sure the sizes are aligned.
-  MetaWord* result = NULL;
-
   assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
         "ClassLoaderData::the_null_class_loader_data() should have been used.");
+
   // Allocate in metaspaces without taking out a lock, because it deadlocks
   // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
   // to revisit this for application class data sharing.
   if (DumpSharedSpaces) {
     assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
     Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
-    result = space->allocate(word_size, NonClassType);
+    MetaWord* result = space->allocate(word_size, NonClassType);
     if (result == NULL) {
       report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
     } else {
@@ -3235,42 +3421,64 @@
     return Metablock::initialize(result, word_size);
   }
 
-  result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
+  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
+
+  // Try to allocate metadata.
+  MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 
   if (result == NULL) {
-    // Try to clean out some memory and retry.
-    result =
-      Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
-        loader_data, word_size, mdtype);
-
-    // If result is still null, we are out of memory.
-    if (result == NULL) {
-      if (Verbose && TraceMetadataChunkAllocation) {
-        gclog_or_tty->print_cr("Metaspace allocation failed for size "
-          SIZE_FORMAT, word_size);
-        if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
-        MetaspaceAux::dump(gclog_or_tty);
-      }
-      // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
-      const char* space_string = (mdtype == ClassType) ? "Compressed class space" :
-                                                         "Metadata space";
-      report_java_out_of_memory(space_string);
-
-      if (JvmtiExport::should_post_resource_exhausted()) {
-        JvmtiExport::post_resource_exhausted(
-            JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
-            space_string);
-      }
-      if (mdtype == ClassType) {
-        THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
-      } else {
-        THROW_OOP_0(Universe::out_of_memory_error_metaspace());
-      }
+    // Allocation failed.
+    if (is_init_completed()) {
+      // Only start a GC if the bootstrapping has completed.
+
+      // Try to clean out some memory and retry.
+      result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
+          loader_data, word_size, mdtype);
     }
   }
+
+  if (result == NULL) {
+    report_metadata_oome(loader_data, word_size, mdtype, THREAD);
+    // Will not reach here.
+    return NULL;
+  }
+
   return Metablock::initialize(result, word_size);
 }
 
+void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
+  // If result is still null, we are out of memory.
+  if (Verbose && TraceMetadataChunkAllocation) {
+    gclog_or_tty->print_cr("Metaspace allocation failed for size "
+        SIZE_FORMAT, word_size);
+    if (loader_data->metaspace_or_null() != NULL) {
+      loader_data->dump(gclog_or_tty);
+    }
+    MetaspaceAux::dump(gclog_or_tty);
+  }
+
+  // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
+  const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
+                                                                 "Metadata space";
+  report_java_out_of_memory(space_string);
+
+  if (JvmtiExport::should_post_resource_exhausted()) {
+    JvmtiExport::post_resource_exhausted(
+        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
+        space_string);
+  }
+
+  if (!is_init_completed()) {
+    vm_exit_during_initialization("OutOfMemoryError", space_string);
+  }
+
+  if (is_class_space_allocation(mdtype)) {
+    THROW_OOP(Universe::out_of_memory_error_class_metaspace());
+  } else {
+    THROW_OOP(Universe::out_of_memory_error_metaspace());
+  }
+}
+
 void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
   assert(DumpSharedSpaces, "sanity");
 
--- a/src/share/vm/memory/metaspace.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/memory/metaspace.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -87,9 +87,10 @@
   friend class MetaspaceAux;
 
  public:
-  enum MetadataType {ClassType = 0,
-                     NonClassType = ClassType + 1,
-                     MetadataTypeCount = ClassType + 2
+  enum MetadataType {
+    ClassType,
+    NonClassType,
+    MetadataTypeCount
   };
   enum MetaspaceType {
     StandardMetaspaceType,
@@ -103,6 +104,9 @@
  private:
   void initialize(Mutex* lock, MetaspaceType type);
 
+  // Get the first chunk for a Metaspace.  Used for
+  // special cases such as the boot class loader, reflection
+  // class loader and anonymous class loader.
   Metachunk* get_initialization_chunk(MetadataType mdtype,
                                       size_t chunk_word_size,
                                       size_t chunk_bunch);
@@ -123,6 +127,9 @@
   static size_t _first_chunk_word_size;
   static size_t _first_class_chunk_word_size;
 
+  static size_t _commit_alignment;
+  static size_t _reserve_alignment;
+
   SpaceManager* _vsm;
   SpaceManager* vsm() const { return _vsm; }
 
@@ -191,12 +198,17 @@
   Metaspace(Mutex* lock, MetaspaceType type);
   ~Metaspace();
 
-  // Initialize globals for Metaspace
+  static void ergo_initialize();
   static void global_initialize();
 
   static size_t first_chunk_word_size() { return _first_chunk_word_size; }
   static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
 
+  static size_t reserve_alignment()       { return _reserve_alignment; }
+  static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; }
+  static size_t commit_alignment()        { return _commit_alignment; }
+  static size_t commit_alignment_words()  { return _commit_alignment / BytesPerWord; }
+
   char*  bottom() const;
   size_t used_words_slow(MetadataType mdtype) const;
   size_t free_words_slow(MetadataType mdtype) const;
@@ -219,6 +231,9 @@
   static void purge(MetadataType mdtype);
   static void purge();
 
+  static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size,
+                                   MetadataType mdtype, TRAPS);
+
   void print_on(outputStream* st) const;
   // Debugging support
   void verify();
@@ -235,6 +250,9 @@
     return NOT_LP64(false) LP64_ONLY(UseCompressedClassPointers && !DumpSharedSpaces);
   }
 
+  static bool is_class_space_allocation(MetadataType mdType) {
+    return mdType == ClassType && using_class_space();
+  }
 };
 
 class MetaspaceAux : AllStatic {
@@ -349,17 +367,10 @@
 
 class MetaspaceGC : AllStatic {
 
-  // The current high-water-mark for inducing a GC.  When
-  // the capacity of all space in the virtual lists reaches this value,
-  // a GC is induced and the value is increased.  This should be changed
-  // to the space actually used for allocations to avoid affects of
-  // fragmentation losses to partially used chunks.  Size is in words.
-  static size_t _capacity_until_GC;
-
-  // After a GC is done any allocation that fails should try to expand
-  // the capacity of the Metaspaces.  This flag is set during attempts
-  // to allocate in the VMGCOperation that does the GC.
-  static bool _expand_after_GC;
+  // The current high-water-mark for inducing a GC.
+  // When committed memory of all metaspaces reaches this value,
+  // a GC is induced and the value is increased. Size is in bytes.
+  static volatile intptr_t _capacity_until_GC;
 
   // For a CMS collection, signal that a concurrent collection should
   // be started.
@@ -367,20 +378,16 @@
 
   static uint _shrink_factor;
 
-  static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; }
-
   static size_t shrink_factor() { return _shrink_factor; }
   void set_shrink_factor(uint v) { _shrink_factor = v; }
 
  public:
 
-  static size_t capacity_until_GC() { return _capacity_until_GC; }
-  static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
-  static void dec_capacity_until_GC(size_t v) {
-    _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
-  }
-  static bool expand_after_GC()           { return _expand_after_GC; }
-  static void set_expand_after_GC(bool v) { _expand_after_GC = v; }
+  static void initialize() { _capacity_until_GC = MetaspaceSize; }
+
+  static size_t capacity_until_GC();
+  static size_t inc_capacity_until_GC(size_t v);
+  static size_t dec_capacity_until_GC(size_t v);
 
   static bool should_concurrent_collect() { return _should_concurrent_collect; }
   static void set_should_concurrent_collect(bool v) {
@@ -388,11 +395,14 @@
   }
 
   // The amount to increase the high-water-mark (_capacity_until_GC)
-  static size_t delta_capacity_until_GC(size_t word_size);
+  static size_t delta_capacity_until_GC(size_t bytes);
 
-  // It is expected that this will be called when the current capacity
-  // has been used and a GC should be considered.
-  static bool should_expand(VirtualSpaceList* vsl, size_t word_size);
+  // Tells if we have can expand metaspace without hitting set limits.
+  static bool can_expand(size_t words, bool is_class);
+
+  // Returns amount that we can expand without hitting a GC,
+  // measured in words.
+  static size_t allowed_expansion();
 
   // Calculate the new high-water mark at which to induce
   // a GC.
--- a/src/share/vm/memory/referenceProcessor.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/memory/referenceProcessor.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -367,7 +367,7 @@
       next_d = java_lang_ref_Reference::discovered(obj);
       if (TraceReferenceGC && PrintGCDetails) {
         gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
-                               obj, next_d);
+                               (void *)obj, (void *)next_d);
       }
       assert(java_lang_ref_Reference::next(obj) == NULL,
              "Reference not active; should not be discovered");
@@ -392,7 +392,7 @@
       next_d = java_lang_ref_Reference::discovered(obj);
       if (TraceReferenceGC && PrintGCDetails) {
         gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
-                               obj, next_d);
+                               (void *)obj, (void *)next_d);
       }
       assert(java_lang_ref_Reference::next(obj) == NULL,
              "The reference should not be enqueued");
@@ -562,7 +562,7 @@
         !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
       if (TraceReferenceGC) {
         gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
-                               iter.obj(), iter.obj()->klass()->internal_name());
+                               (void *)iter.obj(), iter.obj()->klass()->internal_name());
       }
       // Remove Reference object from list
       iter.remove();
@@ -601,7 +601,7 @@
     if (iter.is_referent_alive()) {
       if (TraceReferenceGC) {
         gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
-                               iter.obj(), iter.obj()->klass()->internal_name());
+                               (void *)iter.obj(), iter.obj()->klass()->internal_name());
       }
       // The referent is reachable after all.
       // Remove Reference object from list.
@@ -687,7 +687,7 @@
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
                              clear_referent ? "cleared " : "",
-                             iter.obj(), iter.obj()->klass()->internal_name());
+                             (void *)iter.obj(), iter.obj()->klass()->internal_name());
     }
     assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
     iter.next();
@@ -1003,7 +1003,7 @@
           gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
             INTPTR_FORMAT " with next field: " INTPTR_FORMAT
             " and referent: " INTPTR_FORMAT,
-            iter.obj(), next, iter.referent());
+            (void *)iter.obj(), (void *)next, (void *)iter.referent());
         }
       )
       // Remove Reference object from list
@@ -1103,14 +1103,14 @@
 
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
-                             obj, obj->klass()->internal_name());
+                             (void *)obj, obj->klass()->internal_name());
     }
   } else {
     // If retest was non NULL, another thread beat us to it:
     // The reference has already been discovered...
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
-                             obj, obj->klass()->internal_name());
+                             (void *)obj, obj->klass()->internal_name());
     }
   }
 }
@@ -1125,7 +1125,7 @@
   assert(da ? referent->is_oop() : referent->is_oop_or_null(),
          err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
                  INTPTR_FORMAT " during %satomic discovery ",
-                 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
+                 (void *)referent, (void *)obj, da ? "" : "non-"));
 }
 #endif
 
@@ -1205,7 +1205,7 @@
     // The reference has already been discovered...
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
-                             obj, obj->klass()->internal_name());
+                             (void *)obj, obj->klass()->internal_name());
     }
     if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
       // assumes that an object is not processed twice;
@@ -1273,7 +1273,7 @@
 
     if (TraceReferenceGC) {
       gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)",
-                                obj, obj->klass()->internal_name());
+                                (void *)obj, obj->klass()->internal_name());
     }
   }
   assert(obj->is_oop(), "Discovered a bad reference");
@@ -1372,7 +1372,7 @@
       // active; we need to trace and mark its cohort.
       if (TraceReferenceGC) {
         gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
-                               iter.obj(), iter.obj()->klass()->internal_name());
+                               (void *)iter.obj(), iter.obj()->klass()->internal_name());
       }
       // Remove Reference object from list
       iter.remove();
--- a/src/share/vm/oops/constantPool.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/oops/constantPool.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1918,7 +1918,7 @@
     st->print_cr(" - holder: " INTPTR_FORMAT, pool_holder());
   }
   st->print_cr(" - cache: " INTPTR_FORMAT, cache());
-  st->print_cr(" - resolved_references: " INTPTR_FORMAT, resolved_references());
+  st->print_cr(" - resolved_references: " INTPTR_FORMAT, (void *)resolved_references());
   st->print_cr(" - reference_map: " INTPTR_FORMAT, reference_map());
 
   for (int index = 1; index < length(); index++) {      // Index 0 is unused
--- a/src/share/vm/oops/cpCache.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/oops/cpCache.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -306,8 +306,8 @@
   if (TraceInvokeDynamic) {
     tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
                   invoke_code,
-                  (intptr_t)appendix(),    (has_appendix    ? "" : " (unused)"),
-                  (intptr_t)method_type(), (has_method_type ? "" : " (unused)"),
+                  (void *)appendix(),    (has_appendix    ? "" : " (unused)"),
+                  (void *)method_type(), (has_method_type ? "" : " (unused)"),
                   (intptr_t)adapter());
     adapter->print();
     if (has_appendix)  appendix()->print();
--- a/src/share/vm/oops/instanceKlass.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/oops/instanceKlass.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -106,7 +106,7 @@
       len = name->utf8_length();                                 \
     }                                                            \
     HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
-      data, len, (clss)->class_loader(), thread_type);           \
+      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type);           \
   }
 
 #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
@@ -119,7 +119,7 @@
       len = name->utf8_length();                                 \
     }                                                            \
     HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
-      data, len, (clss)->class_loader(), thread_type, wait);     \
+      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type, wait);     \
   }
 #else /* USDT2 */
 
@@ -238,6 +238,13 @@
   }
 }
 
+// create a new array of vtable_indices for default methods
+Array<int>* InstanceKlass::create_new_default_vtable_indices(int len, TRAPS) {
+  Array<int>* vtable_indices = MetadataFactory::new_array<int>(class_loader_data(), len, CHECK_NULL);
+  assert(default_vtable_indices() == NULL, "only create once");
+  set_default_vtable_indices(vtable_indices);
+  return vtable_indices;
+}
 
 InstanceKlass::InstanceKlass(int vtable_len,
                              int itable_len,
@@ -263,6 +270,8 @@
   set_array_klasses(NULL);
   set_methods(NULL);
   set_method_ordering(NULL);
+  set_default_methods(NULL);
+  set_default_vtable_indices(NULL);
   set_local_interfaces(NULL);
   set_transitive_interfaces(NULL);
   init_implementor();
@@ -376,6 +385,21 @@
   }
   set_method_ordering(NULL);
 
+  // default methods can be empty
+  if (default_methods() != NULL &&
+      default_methods() != Universe::the_empty_method_array()) {
+    MetadataFactory::free_array<Method*>(loader_data, default_methods());
+  }
+  // Do NOT deallocate the default methods, they are owned by superinterfaces.
+  set_default_methods(NULL);
+
+  // default methods vtable indices can be empty
+  if (default_vtable_indices() != NULL) {
+    MetadataFactory::free_array<int>(loader_data, default_vtable_indices());
+  }
+  set_default_vtable_indices(NULL);
+
+
   // This array is in Klass, but remove it with the InstanceKlass since
   // this place would be the only caller and it can share memory with transitive
   // interfaces.
@@ -456,14 +480,14 @@
   return java_lang_Class::signers(java_mirror());
 }
 
-volatile oop InstanceKlass::init_lock() const {
+oop InstanceKlass::init_lock() const {
   // return the init lock from the mirror
   return java_lang_Class::init_lock(java_mirror());
 }
 
 void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
   EXCEPTION_MARK;
-  volatile oop init_lock = this_oop->init_lock();
+  oop init_lock = this_oop->init_lock();
   ObjectLocker ol(init_lock, THREAD);
 
   // abort if someone beat us to the initialization
@@ -608,7 +632,7 @@
 
   // verification & rewriting
   {
-    volatile oop init_lock = this_oop->init_lock();
+    oop init_lock = this_oop->init_lock();
     ObjectLocker ol(init_lock, THREAD);
     // rewritten will have been set if loader constraint error found
     // on an earlier link attempt
@@ -731,7 +755,7 @@
   // refer to the JVM book page 47 for description of steps
   // Step 1
   {
-    volatile oop init_lock = this_oop->init_lock();
+    oop init_lock = this_oop->init_lock();
     ObjectLocker ol(init_lock, THREAD);
 
     Thread *self = THREAD; // it's passed the current thread
@@ -879,7 +903,7 @@
 }
 
 void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
-  volatile oop init_lock = this_oop->init_lock();
+  oop init_lock = this_oop->init_lock();
   ObjectLocker ol(init_lock, THREAD);
   this_oop->set_init_state(state);
   ol.notify_all(CHECK);
@@ -1354,32 +1378,44 @@
   return -1;
 }
 
+// find_method looks up the name/signature in the local methods array
 Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const {
   return InstanceKlass::find_method(methods(), name, signature);
 }
 
+// find_method looks up the name/signature in the local methods array
 Method* InstanceKlass::find_method(
     Array<Method*>* methods, Symbol* name, Symbol* signature) {
+  int hit = find_method_index(methods, name, signature);
+  return hit >= 0 ? methods->at(hit): NULL;
+}
+
+// Used directly for default_methods to find the index into the
+// default_vtable_indices, and indirectly by find_method
+// find_method_index looks in the local methods array to return the index
+// of the matching name/signature
+int InstanceKlass::find_method_index(
+    Array<Method*>* methods, Symbol* name, Symbol* signature) {
   int hit = binary_search(methods, name);
   if (hit != -1) {
     Method* m = methods->at(hit);
     // Do linear search to find matching signature.  First, quick check
     // for common case
-    if (m->signature() == signature) return m;
+    if (m->signature() == signature) return hit;
     // search downwards through overloaded methods
     int i;
     for (i = hit - 1; i >= 0; --i) {
         Method* m = methods->at(i);
         assert(m->is_method(), "must be method");
         if (m->name() != name) break;
-        if (m->signature() == signature) return m;
+        if (m->signature() == signature) return i;
     }
     // search upwards
     for (i = hit + 1; i < methods->length(); ++i) {
         Method* m = methods->at(i);
         assert(m->is_method(), "must be method");
         if (m->name() != name) break;
-        if (m->signature() == signature) return m;
+        if (m->signature() == signature) return i;
     }
     // not found
 #ifdef ASSERT
@@ -1387,9 +1423,8 @@
     assert(index == -1, err_msg("binary search should have found entry %d", index));
 #endif
   }
-  return NULL;
+  return -1;
 }
-
 int InstanceKlass::find_method_by_name(Symbol* name, int* end) {
   return find_method_by_name(methods(), name, end);
 }
@@ -1408,6 +1443,7 @@
   return -1;
 }
 
+// lookup_method searches both the local methods array and all superclasses methods arrays
 Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
   Klass* klass = const_cast<InstanceKlass*>(this);
   while (klass != NULL) {
@@ -1418,7 +1454,24 @@
   return NULL;
 }
 
+// lookup a method in the default methods list then in all transitive interfaces
+// Do NOT return private or static methods
+Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name,
+                                                         Symbol* signature) const {
+  Method* m = NULL;
+  if (default_methods() != NULL) {
+    m = find_method(default_methods(), name, signature);
+  }
+  // Look up interfaces
+  if (m == NULL) {
+    m = lookup_method_in_all_interfaces(name, signature);
+  }
+  return m;
+}
+
 // lookup a method in all the interfaces that this class implements
+// Do NOT return private or static methods, new in JDK8 which are not externally visible
+// They should only be found in the initial InterfaceMethodRef
 Method* InstanceKlass::lookup_method_in_all_interfaces(Symbol* name,
                                                          Symbol* signature) const {
   Array<Klass*>* all_ifs = transitive_interfaces();
@@ -1427,7 +1480,7 @@
   for (int i = 0; i < num_ifs; i++) {
     ik = InstanceKlass::cast(all_ifs->at(i));
     Method* m = ik->lookup_method(name, signature);
-    if (m != NULL) {
+    if (m != NULL && m->is_public() && !m->is_static()) {
       return m;
     }
   }
@@ -2303,7 +2356,7 @@
 }
 
 address InstanceKlass::static_field_addr(int offset) {
-  return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror());
+  return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + cast_from_oop<intptr_t>(java_mirror()));
 }
 
 
@@ -2546,6 +2599,42 @@
   return m;
 }
 
+
+#if INCLUDE_JVMTI
+// update default_methods for redefineclasses for methods that are
+// not yet in the vtable due to concurrent subclass define and superinterface
+// redefinition
+// Note: those in the vtable, should have been updated via adjust_method_entries
+void InstanceKlass::adjust_default_methods(Method** old_methods, Method** new_methods,
+                                           int methods_length, bool* trace_name_printed) {
+  // search the default_methods for uses of either obsolete or EMCP methods
+  if (default_methods() != NULL) {
+    for (int j = 0; j < methods_length; j++) {
+      Method* old_method = old_methods[j];
+      Method* new_method = new_methods[j];
+
+      for (int index = 0; index < default_methods()->length(); index ++) {
+        if (default_methods()->at(index) == old_method) {
+          default_methods()->at_put(index, new_method);
+          if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
+            if (!(*trace_name_printed)) {
+              // RC_TRACE_MESG macro has an embedded ResourceMark
+              RC_TRACE_MESG(("adjust: klassname=%s default methods from name=%s",
+                             external_name(),
+                             old_method->method_holder()->external_name()));
+              *trace_name_printed = true;
+            }
+            RC_TRACE(0x00100000, ("default method update: %s(%s) ",
+                                  new_method->name()->as_C_string(),
+                                  new_method->signature()->as_C_string()));
+          }
+        }
+      }
+    }
+  }
+}
+#endif // INCLUDE_JVMTI
+
 // On-stack replacement stuff
 void InstanceKlass::add_osr_nmethod(nmethod* n) {
   // only one compilation can be active
@@ -2740,11 +2829,21 @@
   st->print(BULLET"methods:           "); methods()->print_value_on(st);                  st->cr();
   if (Verbose || WizardMode) {
     Array<Method*>* method_array = methods();
-    for(int i = 0; i < method_array->length(); i++) {
+    for (int i = 0; i < method_array->length(); i++) {
       st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
     }
   }
-  st->print(BULLET"method ordering:   "); method_ordering()->print_value_on(st);       st->cr();
+  st->print(BULLET"method ordering:   "); method_ordering()->print_value_on(st);      st->cr();
+  st->print(BULLET"default_methods:   "); default_methods()->print_value_on(st);      st->cr();
+  if (Verbose && default_methods() != NULL) {
+    Array<Method*>* method_array = default_methods();
+    for (int i = 0; i < method_array->length(); i++) {
+      st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
+    }
+  }
+  if (default_vtable_indices() != NULL) {
+    st->print(BULLET"default vtable indices:   "); default_vtable_indices()->print_value_on(st);       st->cr();
+  }
   st->print(BULLET"local interfaces:  "); local_interfaces()->print_value_on(st);      st->cr();
   st->print(BULLET"trans. interfaces: "); transitive_interfaces()->print_value_on(st); st->cr();
   st->print(BULLET"constants:         "); constants()->print_value_on(st);         st->cr();
@@ -3097,6 +3196,19 @@
     }
   }
 
+  // Verify default methods
+  if (default_methods() != NULL) {
+    Array<Method*>* methods = this->default_methods();
+    for (int j = 0; j < methods->length(); j++) {
+      guarantee(methods->at(j)->is_method(), "non-method in methods array");
+    }
+    for (int j = 0; j < methods->length() - 1; j++) {
+      Method* m1 = methods->at(j);
+      Method* m2 = methods->at(j + 1);
+      guarantee(m1->name()->fast_compare(m2->name()) <= 0, "methods not sorted correctly");
+    }
+  }
+
   // Verify JNI static field identifiers
   if (jni_ids() != NULL) {
     jni_ids()->verify(this);
--- a/src/share/vm/oops/instanceKlass.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/oops/instanceKlass.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -269,12 +269,18 @@
 
   // Method array.
   Array<Method*>* _methods;
+  // Default Method Array, concrete methods inherited from interfaces
+  Array<Method*>* _default_methods;
   // Interface (Klass*s) this class declares locally to implement.
   Array<Klass*>* _local_interfaces;
   // Interface (Klass*s) this class implements transitively.
   Array<Klass*>* _transitive_interfaces;
   // Int array containing the original order of method in the class file (for JVMTI).
   Array<int>*     _method_ordering;
+  // Int array containing the vtable_indices for default_methods
+  // offset matches _default_methods offset
+  Array<int>*     _default_vtable_indices;
+
   // Instance and static variable information, starts with 6-tuples of shorts
   // [access, name index, sig index, initval index, low_offset, high_offset]
   // for all fields, followed by the generic signature data at the end of
@@ -356,6 +362,15 @@
   void set_method_ordering(Array<int>* m) { _method_ordering = m; }
   void copy_method_ordering(intArray* m, TRAPS);
 
+  // default_methods
+  Array<Method*>* default_methods() const  { return _default_methods; }
+  void set_default_methods(Array<Method*>* a) { _default_methods = a; }
+
+  // default method vtable_indices
+  Array<int>* default_vtable_indices() const { return _default_vtable_indices; }
+  void set_default_vtable_indices(Array<int>* v) { _default_vtable_indices = v; }
+  Array<int>* create_new_default_vtable_indices(int len, TRAPS);
+
   // interfaces
   Array<Klass*>* local_interfaces() const          { return _local_interfaces; }
   void set_local_interfaces(Array<Klass*>* a)      {
@@ -501,12 +516,18 @@
   Method* find_method(Symbol* name, Symbol* signature) const;
   static Method* find_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
 
+  // find a local method index in default_methods (returns -1 if not found)
+  static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature);
+
   // lookup operation (returns NULL if not found)
   Method* uncached_lookup_method(Symbol* name, Symbol* signature) const;
 
   // lookup a method in all the interfaces that this class implements
   // (returns NULL if not found)
   Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature) const;
+  // lookup a method in local defaults then in all interfaces
+  // (returns NULL if not found)
+  Method* lookup_method_in_ordered_interfaces(Symbol* name, Symbol* signature) const;
 
   // Find method indices by name.  If a method with the specified name is
   // found the index to the first method is returned, and 'end' is filled in
@@ -910,6 +931,11 @@
   klassItable* itable() const;        // return new klassItable wrapper
   Method* method_at_itable(Klass* holder, int index, TRAPS);
 
+#if INCLUDE_JVMTI
+  void adjust_default_methods(Method** old_methods, Method** new_methods,
+                              int methods_length, bool* trace_name_printed);
+#endif // INCLUDE_JVMTI
+
   // Garbage collection
   void oop_follow_contents(oop obj);
   int  oop_adjust_pointers(oop obj);
@@ -995,7 +1021,7 @@
   // Must be one per class and it has to be a VM internal object so java code
   // cannot lock it (like the mirror).
   // It has to be an object not a Mutex because it's held through java calls.
-  volatile oop init_lock() const;
+  oop init_lock() const;
 private:
 
   // Static methods that are used to implement member methods where an exposed this pointer
--- a/src/share/vm/oops/instanceMirrorKlass.hpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/oops/instanceMirrorKlass.hpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,7 @@
   // Static field offset is an offset into the Heap, should be converted by
   // based on UseCompressedOop for traversal
   static HeapWord* start_of_static_fields(oop obj) {
-    return (HeapWord*)((intptr_t)obj + offset_of_static_fields());
+    return (HeapWord*)(cast_from_oop<intptr_t>(obj) + offset_of_static_fields());
   }
 
   static void init_offset_of_static_fields() {
--- a/src/share/vm/oops/instanceRefKlass.cpp	Thu Oct 03 19:13:12 2013 +0100
+++ b/src/share/vm/oops/instanceRefKlass.cpp	Mon Oct 21 14:08:09 2013 +0100
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All