changeset 13653:06b85e94a022 mvt

Merge
author dsimms
date Tue, 26 Sep 2017 13:09:56 +0200
parents 488f0d311b80 5ab7a67bc155
children
files src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64RawNativeCallNode.java src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64RawNativeCallNode.java src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp src/share/vm/classfile/classLoader.cpp src/share/vm/classfile/classLoaderData.cpp src/share/vm/classfile/classLoaderData.hpp src/share/vm/classfile/systemDictionary.cpp src/share/vm/classfile/systemDictionary.hpp src/share/vm/code/nmethod.cpp src/share/vm/compiler/compileBroker.cpp src/share/vm/gc/parallel/psParallelCompact.cpp src/share/vm/interpreter/oopMapCache.cpp src/share/vm/interpreter/rewriter.cpp src/share/vm/logging/logTag.hpp src/share/vm/oops/arrayKlass.cpp src/share/vm/oops/arrayKlass.hpp src/share/vm/oops/constantPool.cpp src/share/vm/oops/cpCache.cpp src/share/vm/oops/cpCache.hpp src/share/vm/oops/instanceKlass.cpp src/share/vm/oops/instanceKlass.hpp src/share/vm/oops/klass.hpp src/share/vm/oops/method.cpp src/share/vm/opto/lcm.cpp src/share/vm/opto/runtime.cpp src/share/vm/prims/jni.cpp src/share/vm/runtime/arguments.cpp src/share/vm/runtime/deoptimization.cpp src/share/vm/runtime/deoptimization.hpp src/share/vm/runtime/fprofiler.cpp src/share/vm/runtime/fprofiler.hpp src/share/vm/runtime/globals.hpp src/share/vm/runtime/javaCalls.cpp src/share/vm/runtime/sharedRuntime.cpp src/share/vm/runtime/thread.cpp src/share/vm/runtime/thread.hpp src/share/vm/runtime/vmStructs.cpp src/share/vm/services/diagnosticCommand.cpp test/runtime/MinimalVM/Xprof.java
diffstat 205 files changed, 3095 insertions(+), 4188 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Fri Sep 22 13:51:12 2017 +0200
+++ b/.hgtags	Tue Sep 26 13:09:56 2017 +0200
@@ -605,3 +605,6 @@
 d7baadc223e790c08bc69bf7e553bce65b4e7e40 jdk-9+180
 4a443796f6f57842d6a0434ac27ca3d1033ccc20 jdk-9+181
 e93ed1a092409351c90b3a76d80b9aa8b44d5e6a jdk-10+20
+bdb2dbc43ff065b74c2121bdfb0d6e1fa8684b73 jdk-10+21
+71337910df60ff2b62daf10357f553def25e2d0b jdk-10+22
+1a9c2e07a82682c3eff42492d7e32f1d8c6639b8 jdk-10+23
--- a/make/lib/JvmFeatures.gmk	Fri Sep 22 13:51:12 2017 +0200
+++ b/make/lib/JvmFeatures.gmk	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -88,11 +88,6 @@
   JVM_EXCLUDE_FILES += jvmciCodeInstaller_$(HOTSPOT_TARGET_CPU_ARCH).cpp
 endif
 
-ifneq ($(call check-jvm-feature, fprof), true)
-  JVM_CFLAGS_FEATURES += -DINCLUDE_FPROF=0
-  JVM_EXCLUDE_FILES += fprofiler.cpp
-endif
-
 ifneq ($(call check-jvm-feature, vm-structs), true)
   JVM_CFLAGS_FEATURES += -DINCLUDE_VM_STRUCTS=0
   JVM_EXCLUDE_FILES += vmStructs.cpp
--- a/src/cpu/aarch64/vm/aarch64.ad	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/cpu/aarch64/vm/aarch64.ad	Tue Sep 26 13:09:56 2017 +0200
@@ -12614,7 +12614,7 @@
   match(Set dst (AndI (URShiftI src rshift) mask));
 
   ins_cost(INSN_COST);
-  format %{ "ubfxw $dst, $src, $mask" %}
+  format %{ "ubfxw $dst, $src, $rshift, $mask" %}
   ins_encode %{
     int rshift = $rshift$$constant;
     long mask = $mask$$constant;
@@ -12629,7 +12629,7 @@
   match(Set dst (AndL (URShiftL src rshift) mask));
 
   ins_cost(INSN_COST);
-  format %{ "ubfx $dst, $src, $mask" %}
+  format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
     int rshift = $rshift$$constant;
     long mask = $mask$$constant;
@@ -12647,7 +12647,7 @@
   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
 
   ins_cost(INSN_COST * 2);
-  format %{ "ubfx $dst, $src, $mask" %}
+  format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
     int rshift = $rshift$$constant;
     long mask = $mask$$constant;
@@ -12658,6 +12658,64 @@
   ins_pipe(ialu_reg_shift);
 %}
 
+// We can use ubfiz when masking by a positive number and then left shifting the result.
+// We know that the mask is positive because immI_bitmask guarantees it.
+instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
+%{
+  match(Set dst (LShiftI (AndI src mask) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= 31 &&
+    (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1));
+
+  ins_cost(INSN_COST);
+  format %{ "ubfizw $dst, $src, $lshift, $mask" %}
+  ins_encode %{
+    int lshift = $lshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfizw(as_Register($dst$$reg),
+          as_Register($src$$reg), lshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+// We can use ubfiz when masking by a positive number and then left shifting the result.
+// We know that the mask is positive because immL_bitmask guarantees it.
+instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
+%{
+  match(Set dst (LShiftL (AndL src mask) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= 63 &&
+    (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1));
+
+  ins_cost(INSN_COST);
+  format %{ "ubfiz $dst, $src, $lshift, $mask" %}
+  ins_encode %{
+    int lshift = $lshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfiz(as_Register($dst$$reg),
+          as_Register($src$$reg), lshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+
+// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
+instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
+%{
+  match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= 31 &&
+    (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
+
+  ins_cost(INSN_COST);
+  format %{ "ubfiz $dst, $src, $lshift, $mask" %}
+  ins_encode %{
+    int lshift = $lshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfiz(as_Register($dst$$reg),
+             as_Register($src$$reg), lshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+
 // Rotations
 
 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
--- a/src/cpu/aarch64/vm/aarch64_ad.m4	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/cpu/aarch64/vm/aarch64_ad.m4	Tue Sep 26 13:09:56 2017 +0200
@@ -183,7 +183,7 @@
   match(Set dst (And$1 ($2$1 src rshift) mask));
 
   ins_cost(INSN_COST);
-  format %{ "$3 $dst, $src, $mask" %}
+  format %{ "$3 $dst, $src, $rshift, $mask" %}
   ins_encode %{
     int rshift = $rshift$$constant;
     long mask = $mask$$constant;
@@ -203,7 +203,7 @@
   match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask)));
 
   ins_cost(INSN_COST * 2);
-  format %{ "ubfx $dst, $src, $mask" %}
+  format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
     int rshift = $rshift$$constant;
     long mask = $mask$$constant;
@@ -214,6 +214,48 @@
   ins_pipe(ialu_reg_shift);
 %}
 
+define(`UBFIZ_INSN',
+// We can use ubfiz when masking by a positive number and then left shifting the result.
+// We know that the mask is positive because imm$1_bitmask guarantees it.
+`instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
+%{
+  match(Set dst (LShift$1 (And$1 src mask) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= $3 &&
+    (exact_log2$5(n->in(1)->in(2)->get_$4()+1) + (unsigned int)n->in(2)->get_int()) <= ($3+1));
+
+  ins_cost(INSN_COST);
+  format %{ "$2 $dst, $src, $lshift, $mask" %}
+  ins_encode %{
+    int lshift = $lshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ $2(as_Register($dst$$reg),
+          as_Register($src$$reg), lshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}')
+UBFIZ_INSN(I, ubfizw, 31, int)
+UBFIZ_INSN(L, ubfiz, 63, long, _long)
+
+// If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
+instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
+%{
+  match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift));
+  predicate((unsigned int)n->in(2)->get_int() <= 31 &&
+    (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32);
+
+  ins_cost(INSN_COST);
+  format %{ "ubfiz $dst, $src, $lshift, $mask" %}
+  ins_encode %{
+    int lshift = $lshift$$constant;
+    long mask = $mask$$constant;
+    int width = exact_log2(mask+1);
+    __ ubfiz(as_Register($dst$$reg),
+             as_Register($src$$reg), lshift, width);
+  %}
+  ins_pipe(ialu_reg_shift);
+%}
+
 // Rotations
 
 define(`EXTRACT_INSN',
--- a/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/cpu/aarch64/vm/macroAssembler_aarch64.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -3630,6 +3630,12 @@
 }
 
 #if INCLUDE_ALL_GCS
+/*
+ * g1_write_barrier_pre -- G1GC pre-write barrier for store of new_val at
+ * store_addr.
+ *
+ * Allocates rscratch1
+ */
 void MacroAssembler::g1_write_barrier_pre(Register obj,
                                           Register pre_val,
                                           Register thread,
@@ -3645,10 +3651,8 @@
   Label done;
   Label runtime;
 
-  assert(pre_val != noreg, "check this code");
-
-  if (obj != noreg)
-    assert_different_registers(obj, pre_val, tmp);
+  assert_different_registers(obj, pre_val, tmp, rscratch1);
+  assert(pre_val != noreg &&  tmp != noreg, "expecting a register");
 
   Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
                                        SATBMarkQueue::byte_offset_of_active()));
@@ -3722,12 +3726,22 @@
   bind(done);
 }
 
+/*
+ * g1_write_barrier_post -- G1GC post-write barrier for store of new_val at
+ * store_addr
+ *
+ * Allocates rscratch1
+ */
 void MacroAssembler::g1_write_barrier_post(Register store_addr,
                                            Register new_val,
                                            Register thread,
                                            Register tmp,
                                            Register tmp2) {
   assert(thread == rthread, "must be");
+  assert_different_registers(store_addr, new_val, thread, tmp, tmp2,
+                             rscratch1);
+  assert(store_addr != noreg && new_val != noreg && tmp != noreg
+         && tmp2 != noreg, "expecting a register");
 
   Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
                                        DirtyCardQueue::byte_offset_of_index()));
--- a/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/cpu/aarch64/vm/sharedRuntime_aarch64.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -2067,7 +2067,7 @@
       __ g1_write_barrier_pre(noreg /* obj */,
                               r0 /* pre_val */,
                               rthread /* thread */,
-                              rscratch1 /* tmp */,
+                              rscratch2 /* tmp */,
                               true /* tosca_live */,
                               true /* expand_call */);
     }
--- a/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/cpu/aarch64/vm/templateTable_aarch64.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -170,7 +170,7 @@
           // G1 barrier needs uncompressed oop for region cross check.
           Register new_val = val;
           if (UseCompressedOops) {
-            new_val = rscratch1;
+            new_val = rscratch2;
             __ mov(new_val, val);
           }
           __ store_heap_oop(Address(r3, 0), val);
--- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -292,7 +292,7 @@
   if (VerifyThread) {
     // NOTE: this chops off the heads of the 64-bit O registers.
     // make sure G2_thread contains the right value
-    save_frame_and_mov(0, Lmethod, Lmethod);   // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
+    save_frame_and_mov(0, Lmethod, Lmethod);   // to avoid clobbering O0 (and propagate Lmethod)
     mov(G1, L1);                // avoid clobbering G1
     // G2 saved below
     mov(G3, L3);                // avoid clobbering G3
@@ -398,7 +398,7 @@
 
 #ifdef ASSERT
   // check that it WAS previously set
-    save_frame_and_mov(0, Lmethod, Lmethod);     // Propagate Lmethod to helper frame for -Xprof
+    save_frame_and_mov(0, Lmethod, Lmethod);     // Propagate Lmethod to helper frame
     ld_ptr(sp_addr, L0);
     tst(L0);
     breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
@@ -618,7 +618,7 @@
 
 # ifdef ASSERT
     // Check that we are not overwriting any other oop.
-    save_frame_and_mov(0, Lmethod, Lmethod);     // Propagate Lmethod for -Xprof
+    save_frame_and_mov(0, Lmethod, Lmethod);     // Propagate Lmethod
     ld_ptr(vm_result_addr, L0);
     tst(L0);
     restore();
--- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -2928,7 +2928,7 @@
   __ br(Assembler::zero, false, Assembler::pt, notFinal);
   __ delayed()->and3(Rret, 0xFF, G4_scratch);      // gets number of parameters
 
-  if (RewriteBytecodes && !UseSharedSpaces) {
+  if (RewriteBytecodes && !UseSharedSpaces && !DumpSharedSpaces) {
     patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
   }
 
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/BinaryContainer.java	Tue Sep 26 13:09:56 2017 +0200
@@ -187,11 +187,10 @@
         {"StubRoutines::_arrayof_oop_disjoint_arraycopy", "_aot_stub_routines_arrayof_oop_disjoint_arraycopy"},
         {"StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit", "_aot_stub_routines_arrayof_oop_disjoint_arraycopy_uninit"},
 
+        {"StubRoutines::_unsafe_arraycopy", "_aot_stub_routines_unsafe_arraycopy"},
+
         {"StubRoutines::_checkcast_arraycopy", "_aot_stub_routines_checkcast_arraycopy"},
 
-
-
-
         {"StubRoutines::_aescrypt_encryptBlock", "_aot_stub_routines_aescrypt_encryptBlock"},
         {"StubRoutines::_aescrypt_decryptBlock", "_aot_stub_routines_aescrypt_decryptBlock"},
         {"StubRoutines::_cipherBlockChaining_encryptAESCrypt", "_aot_stub_routines_cipherBlockChaining_encryptAESCrypt"},
@@ -478,8 +477,8 @@
     }
 
     /**
-     * Creates a global symbol of the form {@code "A" + container name}.
-     * Note, linker on Windows does not allow names which start with '.'
+     * Creates a global symbol of the form {@code "A" + container name}. Note, linker on Windows
+     * does not allow names which start with '.'
      *
      * @param container container to create a symbol for
      */
@@ -685,7 +684,8 @@
     }
 
     /**
-     * Add oop symbol by as follows. Extend the oop.got section with another slot for the VM to patch.
+     * Add oop symbol by as follows. Extend the oop.got section with another slot for the VM to
+     * patch.
      *
      * @param oopName name of the oop symbol
      */
@@ -728,10 +728,9 @@
     }
 
     /**
-     * Add klass symbol by as follows.
-     *   - Adding the symbol name to the metaspace.names section
-     *   - Add the offset of the name in metaspace.names to metaspace.offsets
-     *   - Extend the klasses.got section with another slot for the VM to patch
+     * Add klass symbol by as follows. - Adding the symbol name to the metaspace.names section - Add
+     * the offset of the name in metaspace.names to metaspace.offsets - Extend the klasses.got
+     * section with another slot for the VM to patch
      *
      * @param klassName name of the metaspace symbol
      * @return the got offset in the klasses.got of the metaspace symbol
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSection.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc.binformat/src/jdk/tools/jaotc/binformat/pecoff/PECoffSection.java	Tue Sep 26 13:09:56 2017 +0200
@@ -50,7 +50,7 @@
         byte[] Name = sectName.getBytes();
         int max = Name.length <= IMAGE_SECTION_HEADER.Name.sz ? Name.length : IMAGE_SECTION_HEADER.Name.sz;
 
-        assert (sectAlign < 1 || sectAlign > 1024 || (sectAlign & (sectAlign - 1)) != 0) : "section alignment is not valid: " + sectAlign;
+        assert !(sectAlign < 1 || sectAlign > 1024 || (sectAlign & (sectAlign - 1)) != 0) : "section alignment is not valid: " + sectAlign;
         align = sectAlign;
 
         // Using 32 because IMAGE_SCN_ALIGN_*BYTES is value + 1
--- a/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTBackend.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.aot/share/classes/jdk.tools.jaotc/src/jdk/tools/jaotc/AOTBackend.java	Tue Sep 26 13:09:56 2017 +0200
@@ -27,6 +27,8 @@
 
 import org.graalvm.compiler.code.CompilationResult;
 import org.graalvm.compiler.core.GraalCompiler;
+import org.graalvm.compiler.core.common.CompilationIdentifier;
+import org.graalvm.compiler.core.common.CompilationIdentifier.Verbosity;
 import org.graalvm.compiler.debug.DebugContext;
 import org.graalvm.compiler.hotspot.HotSpotBackend;
 import org.graalvm.compiler.hotspot.HotSpotCompiledCodeBuilder;
@@ -127,7 +129,13 @@
             ProfilingInfo profilingInfo = DefaultProfilingInfo.get(TriState.FALSE);
 
             final boolean isImmutablePIC = true;
-            CompilationResult compilationResult = new CompilationResult(resolvedMethod.getName(), isImmutablePIC);
+            CompilationIdentifier id = new CompilationIdentifier() {
+                @Override
+                public String toString(Verbosity verbosity) {
+                    return resolvedMethod.getName();
+                }
+            };
+            CompilationResult compilationResult = new CompilationResult(id, isImmutablePIC);
 
             return GraalCompiler.compileGraph(graph, resolvedMethod, providers, backend, graphBuilderSuite, OptimisticOptimizations.ALL, profilingInfo, getSuites(), getLirSuites(),
                             compilationResult, CompilationResultBuilderFactory.Default);
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java	Tue Sep 26 13:09:56 2017 +0200
@@ -153,9 +153,9 @@
      * @param resolve force resolution to a {@link ResolvedJavaType}. If true, this method will
      *            either return a {@link ResolvedJavaType} or throw an exception
      * @return the type for {@code name} or 0 if resolution failed and {@code resolve == false}
-     * @throws LinkageError if {@code resolve == true} and the resolution failed
+     * @throws ClassNotFoundException if {@code resolve == true} and the resolution failed
      */
-    native HotSpotResolvedObjectTypeImpl lookupType(String name, Class<?> accessingClass, boolean resolve);
+    native HotSpotResolvedObjectTypeImpl lookupType(String name, Class<?> accessingClass, boolean resolve) throws ClassNotFoundException;
 
     /**
      * Resolves the entry at index {@code cpi} in {@code constantPool} to an object.
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCIRuntime.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCIRuntime.java	Tue Sep 26 13:09:56 2017 +0200
@@ -367,13 +367,17 @@
 
         // Resolve non-primitive types in the VM.
         HotSpotResolvedObjectTypeImpl hsAccessingType = (HotSpotResolvedObjectTypeImpl) accessingType;
-        final HotSpotResolvedObjectTypeImpl klass = compilerToVm.lookupType(name, hsAccessingType.mirror(), resolve);
+        try {
+            final HotSpotResolvedObjectTypeImpl klass = compilerToVm.lookupType(name, hsAccessingType.mirror(), resolve);
 
-        if (klass == null) {
-            assert resolve == false;
-            return HotSpotUnresolvedJavaType.create(this, name);
+            if (klass == null) {
+                assert resolve == false;
+                return HotSpotUnresolvedJavaType.create(this, name);
+            }
+            return klass;
+        } catch (ClassNotFoundException e) {
+            throw (NoClassDefFoundError) new NoClassDefFoundError().initCause(e);
         }
-        return klass;
     }
 
     public JVMCIBackend getHostJVMCIBackend() {
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotSignature.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotSignature.java	Tue Sep 26 13:09:56 2017 +0200
@@ -45,7 +45,9 @@
 
     public HotSpotSignature(HotSpotJVMCIRuntimeProvider runtime, String signature) {
         this.runtime = runtime;
-        assert signature.length() > 0;
+        if (signature.length() == 0) {
+            throw new IllegalArgumentException("Signature cannot be empty");
+        }
         this.originalString = signature;
 
         if (signature.charAt(0) == '(') {
@@ -59,9 +61,11 @@
             cur++;
             int nextCur = parseSignature(signature, cur);
             returnType = signature.substring(cur, nextCur);
-            assert nextCur == signature.length();
+            if (nextCur != signature.length()) {
+                throw new IllegalArgumentException("Extra characters at end of signature: " + signature);
+            }
         } else {
-            returnType = null;
+            throw new IllegalArgumentException("Signature must start with a '(': " + signature);
         }
     }
 
@@ -81,33 +85,41 @@
     }
 
     private static int parseSignature(String signature, int start) {
-        int cur = start;
-        char first;
-        do {
-            first = signature.charAt(cur++);
-        } while (first == '[');
+        try {
+            int cur = start;
+            char first;
+            do {
+                first = signature.charAt(cur);
+                cur++;
+            } while (first == '[');
 
-        switch (first) {
-            case 'L':
-                while (signature.charAt(cur) != ';') {
+            switch (first) {
+                case 'L':
+                    while (signature.charAt(cur) != ';') {
+                        if (signature.charAt(cur) == '.') {
+                            throw new IllegalArgumentException("Class name in signature contains '.' at index " + cur + ": " + signature);
+                        }
+                        cur++;
+                    }
                     cur++;
-                }
-                cur++;
-                break;
-            case 'V':
-            case 'I':
-            case 'B':
-            case 'C':
-            case 'D':
-            case 'F':
-            case 'J':
-            case 'S':
-            case 'Z':
-                break;
-            default:
-                throw new JVMCIError("Invalid character at index %d in signature: %s", cur, signature);
+                    break;
+                case 'V':
+                case 'I':
+                case 'B':
+                case 'C':
+                case 'D':
+                case 'F':
+                case 'J':
+                case 'S':
+                case 'Z':
+                    break;
+                default:
+                    throw new IllegalArgumentException("Invalid character '" + signature.charAt(cur - 1) + "' at index " + (cur - 1) + " in signature: " + signature);
+            }
+            return cur;
+        } catch (StringIndexOutOfBoundsException e) {
+            throw new IllegalArgumentException("Truncated signature: " + signature);
         }
-        return cur;
     }
 
     @Override
--- a/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/MetaAccessProvider.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.ci/share/classes/jdk.vm.ci.meta/src/jdk/vm/ci/meta/MetaAccessProvider.java	Tue Sep 26 13:09:56 2017 +0200
@@ -83,8 +83,9 @@
     /**
      * Parses a
      * <a href="http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.3.3">method
-     * descriptor</a> into a {@link Signature}. The behavior of this method is undefined if the
-     * method descriptor is not well formed.
+     * descriptor</a> into a {@link Signature}.
+     *
+     * @throws IllegalArgumentException if the method descriptor is not well formed
      */
     Signature parseMethodDescriptor(String methodDescriptor);
 
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.api.replacements/src/org/graalvm/compiler/api/replacements/ClassSubstitution.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.api.replacements/src/org/graalvm/compiler/api/replacements/ClassSubstitution.java	Tue Sep 26 13:09:56 2017 +0200
@@ -57,7 +57,9 @@
 
     /**
      * Determines if the substitutions are for classes that may not be part of the runtime.
-     * Substitutions for such classes are omitted if the original classes cannot be found.
+     * Substitutions for such classes are omitted if the original classes cannot be found. If
+     * multiple classes are specified using {@link #className()} and {@link #optional()} is false,
+     * then at least one of the classes is required to be reachable.
      */
     boolean optional() default false;
 }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.test/src/org/graalvm/compiler/asm/test/AssemblerTest.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.asm.test/src/org/graalvm/compiler/asm/test/AssemblerTest.java	Tue Sep 26 13:09:56 2017 +0200
@@ -90,7 +90,7 @@
             StructuredGraph graph = new StructuredGraph.Builder(options, debug).method(method).compilationId(compilationId).build();
             CallingConvention cc = backend.newLIRGenerationResult(compilationId, null, null, graph, null).getCallingConvention();
 
-            CompilationResult compResult = new CompilationResult();
+            CompilationResult compResult = new CompilationResult(graph.compilationId());
             byte[] targetCode = test.generateCode(compResult, codeCache.getTarget(), registerConfig, cc);
             compResult.setTargetCode(targetCode, targetCode.length);
             compResult.setTotalFrameSize(0);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.code/src/org/graalvm/compiler/code/CompilationResult.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.code/src/org/graalvm/compiler/code/CompilationResult.java	Tue Sep 26 13:09:56 2017 +0200
@@ -33,6 +33,7 @@
 import java.util.List;
 import java.util.Objects;
 
+import org.graalvm.compiler.core.common.CompilationIdentifier;
 import org.graalvm.compiler.graph.NodeSourcePosition;
 import org.graalvm.util.EconomicSet;
 
@@ -190,6 +191,8 @@
 
     private final String name;
 
+    private final CompilationIdentifier compilationId;
+
     /**
      * The buffer containing the emitted machine code.
      */
@@ -222,21 +225,26 @@
 
     private boolean isImmutablePIC;
 
-    public CompilationResult() {
-        this(null, false);
+    public CompilationResult(CompilationIdentifier compilationId) {
+        this(compilationId, compilationId.toString(CompilationIdentifier.Verbosity.NAME), false);
+    }
+
+    public CompilationResult(CompilationIdentifier compilationId, String name) {
+        this(compilationId, name, false);
+    }
+
+    public CompilationResult(CompilationIdentifier compilationId, boolean isImmutablePIC) {
+        this(compilationId, null, isImmutablePIC);
+    }
+
+    public CompilationResult(CompilationIdentifier compilationId, String name, boolean isImmutablePIC) {
+        this.compilationId = compilationId;
+        this.name = name;
+        this.isImmutablePIC = isImmutablePIC;
     }
 
     public CompilationResult(String name) {
-        this(name, false);
-    }
-
-    public CompilationResult(boolean isImmutablePIC) {
-        this(null, isImmutablePIC);
-    }
-
-    public CompilationResult(String name, boolean isImmutablePIC) {
-        this.name = name;
-        this.isImmutablePIC = isImmutablePIC;
+        this(null, name);
     }
 
     @Override
@@ -266,6 +274,7 @@
                 this.totalFrameSize == that.totalFrameSize &&
                 this.targetCodeSize == that.targetCodeSize &&
                 Objects.equals(this.name, that.name) &&
+                Objects.equals(this.compilationId, that.compilationId) &&
                 Objects.equals(this.annotations, that.annotations) &&
                 Objects.equals(this.dataSection, that.dataSection) &&
                 Objects.equals(this.exceptionHandlers, that.exceptionHandlers) &&
@@ -670,6 +679,10 @@
         return name;
     }
 
+    public CompilationIdentifier getCompilationId() {
+        return compilationId;
+    }
+
     public void setHasUnsafeAccess(boolean hasUnsafeAccess) {
         checkOpen();
         this.hasUnsafeAccess = hasUnsafeAccess;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraalCompilerTest.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/GraalCompilerTest.java	Tue Sep 26 13:09:56 2017 +0200
@@ -949,7 +949,7 @@
 
             try (AllocSpy spy = AllocSpy.open(installedCodeOwner); DebugContext.Scope ds = debug.scope("Compiling", new DebugDumpScope(id.toString(CompilationIdentifier.Verbosity.ID), true))) {
                 CompilationPrinter printer = CompilationPrinter.begin(options, id, installedCodeOwner, INVOCATION_ENTRY_BCI);
-                CompilationResult compResult = compile(installedCodeOwner, graphToCompile, new CompilationResult(), id, options);
+                CompilationResult compResult = compile(installedCodeOwner, graphToCompile, new CompilationResult(graphToCompile.compilationId()), id, options);
                 printer.finish(compResult);
 
                 try (DebugContext.Scope s = debug.scope("CodeInstall", getCodeCache(), installedCodeOwner, compResult);
@@ -1019,17 +1019,19 @@
      */
     protected final CompilationResult compile(ResolvedJavaMethod installedCodeOwner, StructuredGraph graph) {
         OptionValues options = graph == null ? getInitialOptions() : graph.getOptions();
-        return compile(installedCodeOwner, graph, new CompilationResult(), getOrCreateCompilationId(installedCodeOwner, graph), options);
+        CompilationIdentifier compilationId = getOrCreateCompilationId(installedCodeOwner, graph);
+        return compile(installedCodeOwner, graph, new CompilationResult(compilationId), compilationId, options);
     }
 
     protected final CompilationResult compile(ResolvedJavaMethod installedCodeOwner, StructuredGraph graph, CompilationIdentifier compilationId) {
         OptionValues options = graph == null ? getInitialOptions() : graph.getOptions();
-        return compile(installedCodeOwner, graph, new CompilationResult(), compilationId, options);
+        return compile(installedCodeOwner, graph, new CompilationResult(compilationId), compilationId, options);
     }
 
     protected final CompilationResult compile(ResolvedJavaMethod installedCodeOwner, StructuredGraph graph, OptionValues options) {
         assert graph == null || graph.getOptions() == options;
-        return compile(installedCodeOwner, graph, new CompilationResult(), getOrCreateCompilationId(installedCodeOwner, graph), options);
+        CompilationIdentifier compilationId = getOrCreateCompilationId(installedCodeOwner, graph);
+        return compile(installedCodeOwner, graph, new CompilationResult(compilationId), compilationId, options);
     }
 
     /**
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/InfopointReasonTest.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/InfopointReasonTest.java	Tue Sep 26 13:09:56 2017 +0200
@@ -64,7 +64,7 @@
         final ResolvedJavaMethod method = getResolvedJavaMethod("testMethod");
         final StructuredGraph graph = parseEager(method, AllowAssumptions.YES);
         final CompilationResult cr = compileGraph(graph, graph.method(), getProviders(), getBackend(), getDefaultGraphBuilderSuite(), OptimisticOptimizations.ALL, graph.getProfilingInfo(),
-                        createSuites(graph.getOptions()), createLIRSuites(graph.getOptions()), new CompilationResult(), CompilationResultBuilderFactory.Default);
+                        createSuites(graph.getOptions()), createLIRSuites(graph.getOptions()), new CompilationResult(graph.compilationId()), CompilationResultBuilderFactory.Default);
         for (Infopoint sp : cr.getInfopoints()) {
             assertNotNull(sp.reason);
             if (sp instanceof Call) {
@@ -86,7 +86,7 @@
         assertTrue(graphLineSPs > 0);
         PhaseSuite<HighTierContext> graphBuilderSuite = getCustomGraphBuilderSuite(GraphBuilderConfiguration.getDefault(getDefaultGraphBuilderPlugins()).withFullInfopoints(true));
         final CompilationResult cr = compileGraph(graph, graph.method(), getProviders(), getBackend(), graphBuilderSuite, OptimisticOptimizations.ALL, graph.getProfilingInfo(),
-                        createSuites(graph.getOptions()), createLIRSuites(graph.getOptions()), new CompilationResult(), CompilationResultBuilderFactory.Default);
+                        createSuites(graph.getOptions()), createLIRSuites(graph.getOptions()), new CompilationResult(graph.compilationId()), CompilationResultBuilderFactory.Default);
         int lineSPs = 0;
         for (Infopoint sp : cr.getInfopoints()) {
             assertNotNull(sp.reason);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/tutorial/InvokeGraal.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.core.test/src/org/graalvm/compiler/core/test/tutorial/InvokeGraal.java	Tue Sep 26 13:09:56 2017 +0200
@@ -123,7 +123,7 @@
             ProfilingInfo profilingInfo = graph.getProfilingInfo(method);
 
             /* The default class and configuration for compilation results. */
-            CompilationResult compilationResult = new CompilationResult();
+            CompilationResult compilationResult = new CompilationResult(graph.compilationId());
             CompilationResultBuilderFactory factory = CompilationResultBuilderFactory.Default;
 
             /* Invoke the whole Graal compilation pipeline. */
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph.test/src/org/graalvm/compiler/graph/test/NodeBitMapTest.java	Tue Sep 26 13:09:56 2017 +0200
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.graph.test;
+
+import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_IGNORED;
+import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_IGNORED;
+
+import java.util.ConcurrentModificationException;
+import java.util.Iterator;
+import java.util.NoSuchElementException;
+
+import org.graalvm.compiler.api.test.Graal;
+import org.graalvm.compiler.graph.Graph;
+import org.graalvm.compiler.graph.Node;
+import org.graalvm.compiler.graph.NodeBitMap;
+import org.graalvm.compiler.graph.NodeClass;
+import org.graalvm.compiler.nodeinfo.NodeInfo;
+import org.graalvm.compiler.options.OptionValues;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+
+public class NodeBitMapTest extends GraphTest {
+
+    @NodeInfo(cycles = CYCLES_IGNORED, size = SIZE_IGNORED)
+    static final class TestNode extends Node {
+        public static final NodeClass<TestNode> TYPE = NodeClass.create(TestNode.class);
+
+        protected TestNode() {
+            super(TYPE);
+        }
+    }
+
+    private Graph graph;
+    private TestNode[] nodes = new TestNode[100];
+    private NodeBitMap map;
+
+    @Before
+    public void before() {
+        // Need to initialize HotSpotGraalRuntime before any Node class is initialized.
+        Graal.getRuntime();
+
+        OptionValues options = getOptions();
+        graph = new Graph(options, getDebug(options));
+        for (int i = 0; i < nodes.length; i++) {
+            nodes[i] = graph.add(new TestNode());
+        }
+        map = graph.createNodeBitMap();
+    }
+
+    @Test
+    public void iterateEmpty() {
+        for (Node n : map) {
+            Assert.fail("no elements expected: " + n);
+        }
+    }
+
+    @Test
+    public void iterateMarkedNodes() {
+        map.mark(nodes[99]);
+        map.mark(nodes[0]);
+        map.mark(nodes[7]);
+        map.mark(nodes[1]);
+        map.mark(nodes[53]);
+
+        Iterator<Node> iter = map.iterator();
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[0], iter.next());
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[1], iter.next());
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[7], iter.next());
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[53], iter.next());
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[99], iter.next());
+        Assert.assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void deleteNodeWhileIterating() {
+        map.mark(nodes[99]);
+        map.mark(nodes[0]);
+        map.mark(nodes[7]);
+        map.mark(nodes[1]);
+        map.mark(nodes[53]);
+
+        Iterator<Node> iter = map.iterator();
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[0], iter.next());
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[1], iter.next());
+        nodes[7].markDeleted();
+        nodes[53].markDeleted();
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[99], iter.next());
+        Assert.assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void deleteAllNodesBeforeIterating() {
+        for (int i = 0; i < nodes.length; i++) {
+            map.mark(nodes[i]);
+            nodes[i].markDeleted();
+        }
+
+        Iterator<Node> iter = map.iterator();
+        Assert.assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void multipleHasNextInvocations() {
+        map.mark(nodes[7]);
+
+        Iterator<Node> iter = map.iterator();
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[7], iter.next());
+        Assert.assertFalse(iter.hasNext());
+    }
+
+    @Test(expected = NoSuchElementException.class)
+    public void noSuchElement() {
+        map.iterator().next();
+    }
+
+    @Test(expected = ConcurrentModificationException.class)
+    public void concurrentModification() {
+        map.mark(nodes[7]);
+
+        map.mark(nodes[99]);
+        map.mark(nodes[0]);
+        map.mark(nodes[7]);
+        map.mark(nodes[1]);
+        map.mark(nodes[53]);
+
+        Iterator<Node> iter = map.iterator();
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[0], iter.next());
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[1], iter.next());
+        Assert.assertTrue(iter.hasNext());
+        nodes[7].markDeleted();
+        iter.next();
+    }
+
+    @Test
+    public void nextWithoutHasNext() {
+        map.mark(nodes[99]);
+        map.mark(nodes[0]);
+        map.mark(nodes[7]);
+        map.mark(nodes[1]);
+        map.mark(nodes[53]);
+
+        Iterator<Node> iter = map.iterator();
+        Assert.assertEquals(nodes[0], iter.next());
+        Assert.assertEquals(nodes[1], iter.next());
+        Assert.assertEquals(nodes[7], iter.next());
+        Assert.assertEquals(nodes[53], iter.next());
+        Assert.assertEquals(nodes[99], iter.next());
+        Assert.assertFalse(iter.hasNext());
+    }
+
+    @Test
+    public void markWhileIterating() {
+        map.mark(nodes[0]);
+
+        Iterator<Node> iter = map.iterator();
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[0], iter.next());
+        map.mark(nodes[7]);
+        Assert.assertTrue(iter.hasNext());
+        map.mark(nodes[1]);
+        Assert.assertEquals(nodes[7], iter.next());
+        map.mark(nodes[99]);
+        map.mark(nodes[53]);
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[53], iter.next());
+        Assert.assertTrue(iter.hasNext());
+        Assert.assertEquals(nodes[99], iter.next());
+        Assert.assertFalse(iter.hasNext());
+    }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeBitMap.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.graph/src/org/graalvm/compiler/graph/NodeBitMap.java	Tue Sep 26 13:09:56 2017 +0200
@@ -23,22 +23,23 @@
 package org.graalvm.compiler.graph;
 
 import java.util.Arrays;
+import java.util.ConcurrentModificationException;
 import java.util.Iterator;
+import java.util.NoSuchElementException;
 
 import org.graalvm.compiler.graph.iterators.NodeIterable;
 
-public final class NodeBitMap implements NodeIterable<Node> {
+public final class NodeBitMap extends NodeIdAccessor implements NodeIterable<Node> {
     private static final int SHIFT = 6;
 
     private long[] bits;
     private int nodeCount;
     private int counter;
-    private final Graph graph;
 
     public NodeBitMap(Graph graph) {
+        super(graph);
         this.nodeCount = graph.nodeIdCount();
         this.bits = new long[sizeForNodeCount(nodeCount)];
-        this.graph = graph;
     }
 
     private static int sizeForNodeCount(int nodeCount) {
@@ -50,9 +51,9 @@
     }
 
     private NodeBitMap(NodeBitMap other) {
+        super(other.graph);
         this.bits = other.bits.clone();
         this.nodeCount = other.nodeCount;
-        this.graph = other.graph;
     }
 
     public Graph graph() {
@@ -60,12 +61,12 @@
     }
 
     public boolean isNew(Node node) {
-        return node.id() >= nodeCount;
+        return getNodeId(node) >= nodeCount;
     }
 
     public boolean isMarked(Node node) {
         assert check(node, false);
-        return isMarked(node.id());
+        return isMarked(getNodeId(node));
     }
 
     public boolean checkAndMarkInc(Node node) {
@@ -84,33 +85,33 @@
 
     public boolean isMarkedAndGrow(Node node) {
         assert check(node, true);
-        int id = node.id();
+        int id = getNodeId(node);
         checkGrow(id);
         return isMarked(id);
     }
 
     public void mark(Node node) {
         assert check(node, false);
-        int id = node.id();
+        int id = getNodeId(node);
         bits[id >> SHIFT] |= (1L << id);
     }
 
     public void markAndGrow(Node node) {
         assert check(node, true);
-        int id = node.id();
+        int id = getNodeId(node);
         checkGrow(id);
         bits[id >> SHIFT] |= (1L << id);
     }
 
     public void clear(Node node) {
         assert check(node, false);
-        int id = node.id();
+        int id = getNodeId(node);
         bits[id >> SHIFT] &= ~(1L << id);
     }
 
     public void clearAndGrow(Node node) {
         assert check(node, true);
-        int id = node.id();
+        int id = getNodeId(node);
         checkGrow(id);
         bits[id >> SHIFT] &= ~(1L << id);
     }
@@ -181,15 +182,30 @@
         }
     }
 
-    protected int nextMarkedNodeId(int fromNodeId) {
+    protected Node nextMarkedNode(int fromNodeId) {
         assert fromNodeId >= 0;
         int wordIndex = fromNodeId >> SHIFT;
         int wordsInUse = bits.length;
         if (wordIndex < wordsInUse) {
-            long word = bits[wordIndex] & (0xFFFFFFFFFFFFFFFFL << fromNodeId);
+            long word = getPartOfWord(bits[wordIndex], fromNodeId);
             while (true) {
-                if (word != 0) {
-                    return wordIndex * Long.SIZE + Long.numberOfTrailingZeros(word);
+                while (word != 0) {
+                    int bitIndex = Long.numberOfTrailingZeros(word);
+                    int nodeId = wordIndex * Long.SIZE + bitIndex;
+                    Node result = graph.getNode(nodeId);
+                    if (result == null) {
+                        // node was deleted -> clear the bit and continue searching
+                        bits[wordIndex] = bits[wordIndex] & ~(1 << bitIndex);
+                        int nextNodeId = nodeId + 1;
+                        if ((nextNodeId & (Long.SIZE - 1)) == 0) {
+                            // we reached the end of this word
+                            break;
+                        } else {
+                            word = getPartOfWord(word, nextNodeId);
+                        }
+                    } else {
+                        return result;
+                    }
                 }
                 if (++wordIndex == wordsInUse) {
                     break;
@@ -197,30 +213,56 @@
                 word = bits[wordIndex];
             }
         }
-        return -2;
+        return null;
     }
 
+    private static long getPartOfWord(long word, int firstNodeIdToInclude) {
+        return word & (0xFFFFFFFFFFFFFFFFL << firstNodeIdToInclude);
+    }
+
+    /**
+     * This iterator only returns nodes that are marked in the {@link NodeBitMap} and are alive in
+     * the corresponding {@link Graph}.
+     */
     private class MarkedNodeIterator implements Iterator<Node> {
-        private int nextNodeId;
+        private int currentNodeId;
+        private Node currentNode;
 
         MarkedNodeIterator() {
-            nextNodeId = -1;
+            currentNodeId = -1;
             forward();
         }
 
         private void forward() {
-            nextNodeId = NodeBitMap.this.nextMarkedNodeId(nextNodeId + 1);
+            assert currentNode == null;
+            currentNode = NodeBitMap.this.nextMarkedNode(currentNodeId + 1);
+            if (currentNode != null) {
+                assert currentNode.isAlive();
+                currentNodeId = getNodeId(currentNode);
+            } else {
+                currentNodeId = -1;
+            }
         }
 
         @Override
         public boolean hasNext() {
-            return nextNodeId >= 0;
+            if (currentNode == null && currentNodeId >= 0) {
+                forward();
+            }
+            return currentNodeId >= 0;
         }
 
         @Override
         public Node next() {
-            Node result = graph.getNode(nextNodeId);
-            forward();
+            if (!hasNext()) {
+                throw new NoSuchElementException();
+            }
+            if (!currentNode.isAlive()) {
+                throw new ConcurrentModificationException("NodeBitMap was modified between the calls to hasNext() and next()");
+            }
+
+            Node result = currentNode;
+            currentNode = null;
             return result;
         }
 
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.aarch64/src/org/graalvm/compiler/hotspot/aarch64/AArch64RawNativeCallNode.java	Fri Sep 22 13:51:12 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package org.graalvm.compiler.hotspot.aarch64;
-
-import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_UNKNOWN;
-import static org.graalvm.compiler.nodeinfo.NodeSize.SIZE_UNKNOWN;
-
-import org.graalvm.compiler.core.aarch64.AArch64NodeLIRBuilder;
-import org.graalvm.compiler.core.common.type.RawPointerStamp;
-import org.graalvm.compiler.core.common.type.Stamp;
-import org.graalvm.compiler.core.common.type.StampFactory;
-import org.graalvm.compiler.graph.NodeClass;
-import org.graalvm.compiler.graph.NodeInputList;
-import org.graalvm.compiler.nodeinfo.NodeInfo;
-import org.graalvm.compiler.nodes.FixedWithNextNode;
-import org.graalvm.compiler.nodes.ValueNode;
-import org.graalvm.compiler.nodes.spi.LIRLowerable;
-import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
-
-import jdk.vm.ci.code.CallingConvention;
-import jdk.vm.ci.hotspot.HotSpotCallingConventionType;
-import jdk.vm.ci.meta.JavaConstant;
-import jdk.vm.ci.meta.JavaKind;
-import jdk.vm.ci.meta.JavaType;
-import jdk.vm.ci.meta.MetaAccessProvider;
-import jdk.vm.ci.meta.ResolvedJavaType;
-import jdk.vm.ci.meta.Value;
-
-@NodeInfo(cycles = CYCLES_UNKNOWN, cyclesRationale = "Native call is a block hole", size = SIZE_UNKNOWN)
-public final class AArch64RawNativeCallNode extends FixedWithNextNode implements LIRLowerable {
-    public static final NodeClass<AArch64RawNativeCallNode> TYPE = NodeClass.create(AArch64RawNativeCallNode.class);
-
-    protected final JavaConstant functionPointer;
-    @Input NodeInputList<ValueNode> args;
-
-    public AArch64RawNativeCallNode(JavaKind returnType, JavaConstant functionPointer, ValueNode[] args) {
-        super(TYPE, StampFactory.forKind(returnType));
-        this.functionPointer = functionPointer;
-        this.args = new NodeInputList<>(this, args);
-    }
-
-    private static class PointerType implements JavaType {
-
-        @Override
-        public String getName() {
-            return "void*";
-        }
-
-        @Override
-        public JavaType getComponentType() {
-            return null;
-        }
-
-        @Override
-        public JavaType getArrayClass() {
-            return null;
-        }
-
-        @Override
-        public JavaKind getJavaKind() {
-            // native pointers and java objects use the same registers in the calling convention
-            return JavaKind.Object;
-        }
-
-        @Override
-        public ResolvedJavaType resolve(ResolvedJavaType accessingClass) {
-            return null;
-        }
-    }
-
-    private static JavaType toJavaType(Stamp stamp, MetaAccessProvider metaAccess) {
-        if (stamp instanceof RawPointerStamp) {
-            return new PointerType();
-        } else {
-            return stamp.javaType(metaAccess);
-        }
-    }
-
-    @Override
-    public void generate(NodeLIRBuilderTool generator) {
-        AArch64NodeLIRBuilder gen = (AArch64NodeLIRBuilder) generator;
-        Value[] parameter = new Value[args.count()];
-        JavaType[] parameterTypes = new JavaType[args.count()];
-        for (int i = 0; i < args.count(); i++) {
-            parameter[i] = generator.operand(args.get(i));
-            parameterTypes[i] = toJavaType(args.get(i).stamp(), gen.getLIRGeneratorTool().getMetaAccess());
-        }
-        JavaType returnType = toJavaType(stamp(), gen.getLIRGeneratorTool().getMetaAccess());
-        CallingConvention cc = generator.getLIRGeneratorTool().getCodeCache().getRegisterConfig().getCallingConvention(HotSpotCallingConventionType.NativeCall, returnType, parameterTypes,
-                        generator.getLIRGeneratorTool());
-        gen.getLIRGeneratorTool().emitCCall(functionPointer.asLong(), cc, parameter);
-        if (this.getStackKind() != JavaKind.Void) {
-            generator.setResult(this, gen.getLIRGeneratorTool().emitMove(cc.getReturn()));
-        }
-    }
-
-}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.amd64/src/org/graalvm/compiler/hotspot/amd64/AMD64RawNativeCallNode.java	Fri Sep 22 13:51:12 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,130 +0,0 @@
-/*
- * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-package org.graalvm.compiler.hotspot.amd64;
-
-import static org.graalvm.compiler.nodeinfo.NodeCycles.CYCLES_UNKNOWN;
-
-import org.graalvm.compiler.core.amd64.AMD64NodeLIRBuilder;
-import org.graalvm.compiler.core.common.type.RawPointerStamp;
-import org.graalvm.compiler.core.common.type.Stamp;
-import org.graalvm.compiler.core.common.type.StampFactory;
-import org.graalvm.compiler.graph.NodeClass;
-import org.graalvm.compiler.graph.NodeInputList;
-import org.graalvm.compiler.nodeinfo.NodeInfo;
-import org.graalvm.compiler.nodeinfo.NodeSize;
-import org.graalvm.compiler.nodes.FixedWithNextNode;
-import org.graalvm.compiler.nodes.ValueNode;
-import org.graalvm.compiler.nodes.spi.LIRLowerable;
-import org.graalvm.compiler.nodes.spi.NodeLIRBuilderTool;
-
-import jdk.vm.ci.code.CallingConvention;
-import jdk.vm.ci.hotspot.HotSpotCallingConventionType;
-import jdk.vm.ci.meta.JavaConstant;
-import jdk.vm.ci.meta.JavaKind;
-import jdk.vm.ci.meta.JavaType;
-import jdk.vm.ci.meta.MetaAccessProvider;
-import jdk.vm.ci.meta.ResolvedJavaType;
-import jdk.vm.ci.meta.Value;
-
-@NodeInfo(cycles = CYCLES_UNKNOWN, cyclesRationale = "Native call is a block hole", size = NodeSize.SIZE_UNKNOWN)
-public final class AMD64RawNativeCallNode extends FixedWithNextNode implements LIRLowerable {
-    public static final NodeClass<AMD64RawNativeCallNode> TYPE = NodeClass.create(AMD64RawNativeCallNode.class);
-
-    protected final JavaConstant functionPointer;
-    @Input NodeInputList<ValueNode> args;
-
-    public AMD64RawNativeCallNode(JavaKind returnType, JavaConstant functionPointer, ValueNode[] args) {
-        super(TYPE, StampFactory.forKind(returnType));
-        this.functionPointer = functionPointer;
-        this.args = new NodeInputList<>(this, args);
-    }
-
-    private static class PointerType implements JavaType {
-
-        @Override
-        public String getName() {
-            return "void*";
-        }
-
-        @Override
-        public JavaType getComponentType() {
-            return null;
-        }
-
-        @Override
-        public JavaType getArrayClass() {
-            return null;
-        }
-
-        @Override
-        public JavaKind getJavaKind() {
-            // native pointers and java objects use the same registers in the calling convention
-            return JavaKind.Object;
-        }
-
-        @Override
-        public ResolvedJavaType resolve(ResolvedJavaType accessingClass) {
-            return null;
-        }
-    }
-
-    private static JavaType toJavaType(Stamp stamp, MetaAccessProvider metaAccess) {
-        if (stamp instanceof RawPointerStamp) {
-            return new PointerType();
-        } else {
-            return stamp.javaType(metaAccess);
-        }
-    }
-
-    @Override
-    public void generate(NodeLIRBuilderTool generator) {
-        AMD64NodeLIRBuilder gen = (AMD64NodeLIRBuilder) generator;
-        Value[] parameter = new Value[args.count()];
-        JavaType[] parameterTypes = new JavaType[args.count()];
-        for (int i = 0; i < args.count(); i++) {
-            parameter[i] = generator.operand(args.get(i));
-            parameterTypes[i] = toJavaType(args.get(i).stamp(), gen.getLIRGeneratorTool().getMetaAccess());
-        }
-        JavaType returnType = toJavaType(stamp(), gen.getLIRGeneratorTool().getMetaAccess());
-        CallingConvention cc = generator.getLIRGeneratorTool().getCodeCache().getRegisterConfig().getCallingConvention(HotSpotCallingConventionType.NativeCall, returnType, parameterTypes,
-                        generator.getLIRGeneratorTool());
-        gen.getLIRGeneratorTool().emitCCall(functionPointer.asLong(), cc, parameter, countFloatingTypeArguments(args));
-        if (this.getStackKind() != JavaKind.Void) {
-            generator.setResult(this, gen.getLIRGeneratorTool().emitMove(cc.getReturn()));
-        }
-    }
-
-    private static int countFloatingTypeArguments(NodeInputList<ValueNode> args) {
-        int count = 0;
-        for (ValueNode n : args) {
-            if (n.getStackKind() == JavaKind.Double || n.getStackKind() == JavaKind.Float) {
-                count++;
-            }
-        }
-        if (count > 8) {
-            return 8;
-        }
-        return count;
-    }
-
-}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CheckGraalIntrinsics.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/CheckGraalIntrinsics.java	Tue Sep 26 13:09:56 2017 +0200
@@ -207,7 +207,6 @@
                         "oracle/jrockit/jfr/Timing.counterTime()J",
                         "oracle/jrockit/jfr/VMJFR.classID0(Ljava/lang/Class;)J",
                         "oracle/jrockit/jfr/VMJFR.threadID()I",
-                        "sun/misc/Unsafe.copyMemory(Ljava/lang/Object;JLjava/lang/Object;JJ)V",
                         "sun/nio/cs/ISO_8859_1$Encoder.encodeISOArray([CI[BII)I",
                         "sun/security/provider/DigestBase.implCompressMultiBlock([BII)I",
                         "sun/security/provider/SHA.implCompress([BI)V",
@@ -273,7 +272,6 @@
                         "jdk/internal/misc/Unsafe.compareAndExchangeShortRelease(Ljava/lang/Object;JSS)S",
                         "jdk/internal/misc/Unsafe.compareAndSetByte(Ljava/lang/Object;JBB)Z",
                         "jdk/internal/misc/Unsafe.compareAndSetShort(Ljava/lang/Object;JSS)Z",
-                        "jdk/internal/misc/Unsafe.copyMemory0(Ljava/lang/Object;JLjava/lang/Object;JJ)V",
                         "jdk/internal/misc/Unsafe.getAndAddByte(Ljava/lang/Object;JB)B",
                         "jdk/internal/misc/Unsafe.getAndAddShort(Ljava/lang/Object;JS)S",
                         "jdk/internal/misc/Unsafe.getAndSetByte(Ljava/lang/Object;JB)B",
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot.test/src/org/graalvm/compiler/hotspot/test/HotSpotUnsafeSubstitutionTest.java	Tue Sep 26 13:09:56 2017 +0200
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.test;
+
+import org.graalvm.compiler.replacements.test.MethodSubstitutionTest;
+import org.junit.Test;
+
+import jdk.vm.ci.code.InstalledCode;
+import jdk.vm.ci.meta.ResolvedJavaMethod;
+import sun.misc.Unsafe;
+
+/**
+ * Tests the VM independent intrinsification of {@link Unsafe} methods.
+ */
+public class HotSpotUnsafeSubstitutionTest extends MethodSubstitutionTest {
+
+    public void testSubstitution(String testMethodName, Class<?> holder, String methodName, Class<?>[] parameterTypes, Object receiver, Object[] args1, Object[] args2) {
+        ResolvedJavaMethod testMethod = getResolvedJavaMethod(testMethodName);
+        ResolvedJavaMethod originalMethod = getResolvedJavaMethod(holder, methodName, parameterTypes);
+
+        // Force compilation
+        InstalledCode code = getCode(testMethod);
+        assert code != null;
+
+        // Verify that the original method and the substitution produce the same value
+        Object expected = invokeSafe(originalMethod, receiver, args1);
+        Object actual = invokeSafe(testMethod, null, args2);
+        assertDeepEquals(expected, actual);
+
+        // Verify that the generated code and the original produce the same value
+        expected = invokeSafe(originalMethod, receiver, args1);
+        actual = executeVarargsSafe(code, args2);
+        assertDeepEquals(expected, actual);
+
+    }
+
+    @Test
+    public void testUnsafeSubstitutions() throws Exception {
+        testGraph("unsafeCopyMemory");
+    }
+
+    public void unsafeCopyMemory(Object srcBase, long srcOffset, Object dstBase, long dstOffset, long bytes) {
+        UNSAFE.copyMemory(srcBase, srcOffset, dstBase, dstOffset, bytes);
+    }
+
+    public byte[] testCopyMemorySnippet(long src, int bytes) {
+        byte[] result = new byte[bytes];
+        UNSAFE.copyMemory(null, src, result, Unsafe.ARRAY_BYTE_BASE_OFFSET, bytes);
+        return result;
+    }
+
+    @Test
+    public void testCopyMemory() {
+        int size = 128;
+        long src = UNSAFE.allocateMemory(size);
+        for (int i = 0; i < size; i++) {
+            UNSAFE.putByte(null, src + i, (byte) i);
+        }
+        test("testCopyMemorySnippet", src, size);
+    }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotBackend.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotBackend.java	Tue Sep 26 13:09:56 2017 +0200
@@ -64,9 +64,9 @@
 import org.graalvm.compiler.options.OptionValues;
 import org.graalvm.compiler.phases.tiers.SuitesProvider;
 import org.graalvm.compiler.word.Word;
-import org.graalvm.util.Equivalence;
 import org.graalvm.util.EconomicMap;
 import org.graalvm.util.EconomicSet;
+import org.graalvm.util.Equivalence;
 import org.graalvm.util.MapCursor;
 import org.graalvm.word.Pointer;
 
@@ -258,6 +258,18 @@
     private static native void sha5ImplCompressStub(@ConstantNodeParameter ForeignCallDescriptor descriptor, Word bufAddr, Object state);
 
     /**
+     * @see org.graalvm.compiler.hotspot.meta.HotSpotUnsafeSubstitutions#copyMemory
+     */
+    public static final ForeignCallDescriptor UNSAFE_ARRAYCOPY = new ForeignCallDescriptor("unsafe_arraycopy", void.class, Word.class, Word.class, Word.class);
+
+    public static void unsafeArraycopy(Word srcAddr, Word dstAddr, Word size) {
+        unsafeArraycopyStub(HotSpotBackend.UNSAFE_ARRAYCOPY, srcAddr, dstAddr, size);
+    }
+
+    @NodeIntrinsic(ForeignCallNode.class)
+    private static native void unsafeArraycopyStub(@ConstantNodeParameter ForeignCallDescriptor descriptor, Word srcAddr, Word dstAddr, Word size);
+
+    /**
      * @see VMErrorNode
      */
     public static final ForeignCallDescriptor VM_ERROR = new ForeignCallDescriptor("vm_error", void.class, Object.class, Object.class, long.class);
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalCompiler.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/HotSpotGraalCompiler.java	Tue Sep 26 13:09:56 2017 +0200
@@ -198,7 +198,7 @@
 
     public CompilationResult compile(ResolvedJavaMethod method, int entryBCI, boolean useProfilingInfo, CompilationIdentifier compilationId, OptionValues options, DebugContext debug) {
         StructuredGraph graph = createGraph(method, entryBCI, useProfilingInfo, compilationId, options, debug);
-        CompilationResult result = new CompilationResult();
+        CompilationResult result = new CompilationResult(compilationId);
         return compileHelper(CompilationResultBuilderFactory.Default, result, graph, method, entryBCI, useProfilingInfo, options);
     }
 
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotGraphBuilderPlugins.java	Tue Sep 26 13:09:56 2017 +0200
@@ -111,6 +111,7 @@
 import jdk.vm.ci.meta.JavaKind;
 import jdk.vm.ci.meta.MetaAccessProvider;
 import jdk.vm.ci.meta.ResolvedJavaMethod;
+import sun.misc.Unsafe;
 
 /**
  * Defines the {@link Plugins} used when running on HotSpot.
@@ -202,6 +203,7 @@
                 registerCRC32Plugins(invocationPlugins, config, replacementBytecodeProvider);
                 registerBigIntegerPlugins(invocationPlugins, config, replacementBytecodeProvider);
                 registerSHAPlugins(invocationPlugins, config, replacementBytecodeProvider);
+                registerUnsafePlugins(invocationPlugins, replacementBytecodeProvider);
                 StandardGraphBuilderPlugins.registerInvocationPlugins(metaAccess, snippetReflection, invocationPlugins, replacementBytecodeProvider, true);
 
                 for (NodeIntrinsicPluginFactory factory : GraalServices.load(NodeIntrinsicPluginFactory.class)) {
@@ -313,6 +315,17 @@
         r.registerMethodSubstitution(ReflectionSubstitutions.class, "getClassAccessFlags", Class.class);
     }
 
+    private static void registerUnsafePlugins(InvocationPlugins plugins, BytecodeProvider replacementBytecodeProvider) {
+        Registration r;
+        if (Java8OrEarlier) {
+            r = new Registration(plugins, Unsafe.class, replacementBytecodeProvider);
+        } else {
+            r = new Registration(plugins, "jdk.internal.misc.Unsafe", replacementBytecodeProvider);
+        }
+        r.registerMethodSubstitution(HotSpotUnsafeSubstitutions.class, HotSpotUnsafeSubstitutions.copyMemoryName, "copyMemory", Receiver.class, Object.class, long.class, Object.class, long.class,
+                        long.class);
+    }
+
     private static final LocationIdentity INSTANCE_KLASS_CONSTANTS = NamedLocationIdentity.immutable("InstanceKlass::_constants");
     private static final LocationIdentity CONSTANT_POOL_LENGTH = NamedLocationIdentity.immutable("ConstantPool::_length");
 
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotHostForeignCallsProvider.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotHostForeignCallsProvider.java	Tue Sep 26 13:09:56 2017 +0200
@@ -51,6 +51,7 @@
 import static org.graalvm.compiler.hotspot.HotSpotBackend.SHA5_IMPL_COMPRESS;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.SHA_IMPL_COMPRESS;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.SQUARE_TO_LEN;
+import static org.graalvm.compiler.hotspot.HotSpotBackend.UNSAFE_ARRAYCOPY;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.UNWIND_EXCEPTION_TO_CALLER;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.VM_ERROR;
 import static org.graalvm.compiler.hotspot.HotSpotBackend.WRONG_METHOD_HANDLER;
@@ -330,6 +331,8 @@
         registerCheckcastArraycopyDescriptor(true, c.checkcastArraycopyUninit);
         registerCheckcastArraycopyDescriptor(false, c.checkcastArraycopy);
 
+        registerForeignCall(UNSAFE_ARRAYCOPY, c.unsafeArraycopy, NativeCall, DESTROYS_REGISTERS, LEAF_NOFP, NOT_REEXECUTABLE, NamedLocationIdentity.any());
+
         if (c.useMultiplyToLenIntrinsic()) {
             registerForeignCall(MULTIPLY_TO_LEN, c.multiplyToLen, NativeCall, DESTROYS_REGISTERS, LEAF_NOFP, NOT_REEXECUTABLE, NamedLocationIdentity.getArrayLocation(JavaKind.Int));
         }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/meta/HotSpotUnsafeSubstitutions.java	Tue Sep 26 13:09:56 2017 +0200
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+package org.graalvm.compiler.hotspot.meta;
+
+import static org.graalvm.compiler.serviceprovider.JDK9Method.Java8OrEarlier;
+
+import org.graalvm.compiler.api.replacements.ClassSubstitution;
+import org.graalvm.compiler.api.replacements.MethodSubstitution;
+import org.graalvm.compiler.hotspot.HotSpotBackend;
+import org.graalvm.compiler.hotspot.nodes.ComputeObjectAddressNode;
+import org.graalvm.compiler.word.Word;
+import org.graalvm.word.WordFactory;
+
+@ClassSubstitution(className = {"jdk.internal.misc.Unsafe", "sun.misc.Unsafe"})
+public class HotSpotUnsafeSubstitutions {
+
+    public static final String copyMemoryName = Java8OrEarlier ? "copyMemory" : "copyMemory0";
+
+    @SuppressWarnings("unused")
+    @MethodSubstitution(isStatic = false)
+    static void copyMemory(Object receiver, Object srcBase, long srcOffset, Object destBase, long destOffset, long bytes) {
+        Word srcAddr = WordFactory.unsigned(ComputeObjectAddressNode.get(srcBase, srcOffset));
+        Word dstAddr = WordFactory.unsigned(ComputeObjectAddressNode.get(destBase, destOffset));
+        Word size = Word.signed(bytes);
+        HotSpotBackend.unsafeArraycopy(srcAddr, dstAddr, size);
+    }
+}
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/stubs/Stub.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.hotspot/src/org/graalvm/compiler/hotspot/stubs/Stub.java	Tue Sep 26 13:09:56 2017 +0200
@@ -216,8 +216,9 @@
 
     @SuppressWarnings("try")
     private CompilationResult buildCompilationResult(DebugContext debug, final Backend backend) {
-        CompilationResult compResult = new CompilationResult(toString(), GeneratePIC.getValue(options));
-        final StructuredGraph graph = getGraph(debug, getStubCompilationId());
+        CompilationIdentifier compilationId = getStubCompilationId();
+        final StructuredGraph graph = getGraph(debug, compilationId);
+        CompilationResult compResult = new CompilationResult(compilationId, toString(), GeneratePIC.getValue(options));
 
         // Stubs cannot be recompiled so they cannot be compiled with assumptions
         assert graph.getAssumptions() == null;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceInterval.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceInterval.java	Tue Sep 26 13:09:56 2017 +0200
@@ -365,10 +365,6 @@
         return intTo;
     }
 
-    int numUsePositions() {
-        return numUsePos();
-    }
-
     public void setLocationHint(IntervalHint interval) {
         locationHint = interval;
     }
@@ -452,6 +448,10 @@
         return spillSt == SpillState.StartInMemory || (spillSt == SpillState.SpillStore && opId > spillDefinitionPos() && !canMaterialize());
     }
 
+    public boolean preSpilledAllocated() {
+        return spillState() == SpillState.StartInMemory && numUsePos() == 0 && !hasHint();
+    }
+
     // test intersection
     boolean intersects(TraceInterval i) {
         return intersectsAt(i) != -1;
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceLinearScanLifetimeAnalysisPhase.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceLinearScanLifetimeAnalysisPhase.java	Tue Sep 26 13:09:56 2017 +0200
@@ -541,16 +541,23 @@
                 assert instructionIndex == 0 : "not at start?" + instructionIndex;
                 handleTraceBegin(blocks[0]);
 
-                // fix spill state for phi/incoming intervals
-                for (TraceInterval interval : allocator.intervals()) {
-                    if (interval != null && interval.spillState().equals(SpillState.NoDefinitionFound) && interval.spillDefinitionPos() != -1) {
-                        // there was a definition in a phi/incoming
-                        interval.setSpillState(SpillState.NoSpillStore);
-                    }
-                }
                 if (TraceRAuseInterTraceHints.getValue(allocator.getLIR().getOptions())) {
                     addInterTraceHints();
                 }
+                // fix spill state for phi/incoming intervals
+                for (TraceInterval interval : allocator.intervals()) {
+                    if (interval != null) {
+                        if (interval.spillState().equals(SpillState.NoDefinitionFound) && interval.spillDefinitionPos() != -1) {
+                            // there was a definition in a phi/incoming
+                            interval.setSpillState(SpillState.NoSpillStore);
+                        }
+                        if (interval.preSpilledAllocated()) {
+                            // pre-spill unused, start in memory intervals
+                            allocator.assignSpillSlot(interval);
+                        }
+                    }
+                }
+
                 for (FixedInterval interval1 : allocator.fixedIntervals()) {
                     if (interval1 != null) {
                         /* We use [-1, 0] to avoid intersection with incoming values. */
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceLinearScanPhase.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.lir/src/org/graalvm/compiler/lir/alloc/trace/lsra/TraceLinearScanPhase.java	Tue Sep 26 13:09:56 2017 +0200
@@ -153,7 +153,7 @@
         @Override
         public boolean apply(TraceInterval i) {
             // all TraceIntervals are variable intervals
-            return true;
+            return !i.preSpilledAllocated();
         }
     };
     private static final Comparator<TraceInterval> SORT_BY_FROM_COMP = new Comparator<TraceInterval>() {
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.microbenchmarks/src/org/graalvm/compiler/microbenchmarks/lir/GraalCompilerState.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.microbenchmarks/src/org/graalvm/compiler/microbenchmarks/lir/GraalCompilerState.java	Tue Sep 26 13:09:56 2017 +0200
@@ -319,7 +319,7 @@
         assert !graph.isFrozen();
         ResolvedJavaMethod installedCodeOwner = graph.method();
         request = new Request<>(graph, installedCodeOwner, getProviders(), getBackend(), getDefaultGraphBuilderSuite(), OptimisticOptimizations.ALL,
-                        graph.getProfilingInfo(), createSuites(getOptions()), createLIRSuites(getOptions()), new CompilationResult(), CompilationResultBuilderFactory.Default);
+                        graph.getProfilingInfo(), createSuites(getOptions()), createLIRSuites(getOptions()), new CompilationResult(graph.compilationId()), CompilationResultBuilderFactory.Default);
     }
 
     /**
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GraphEncoder.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.nodes/src/org/graalvm/compiler/nodes/GraphEncoder.java	Tue Sep 26 13:09:56 2017 +0200
@@ -209,7 +209,6 @@
         int nodeCount = nodeOrder.nextOrderId;
         assert nodeOrder.orderIds.get(graph.start()) == START_NODE_ORDER_ID;
         assert nodeOrder.orderIds.get(graph.start().next()) == FIRST_NODE_ORDER_ID;
-        assert nodeCount == graph.getNodeCount() + 1;
 
         long[] nodeStartOffsets = new long[nodeCount];
         UnmodifiableMapCursor<Node, Integer> cursor = nodeOrder.orderIds.getEntries();
@@ -218,6 +217,7 @@
             Integer orderId = cursor.getValue();
 
             assert !(node instanceof AbstractBeginNode) || nodeOrder.orderIds.get(((AbstractBeginNode) node).next()) == orderId + BEGIN_NEXT_ORDER_ID_OFFSET;
+            assert nodeStartOffsets[orderId] == 0;
             nodeStartOffsets[orderId] = writer.getBytesWritten();
 
             /* Write out the type, properties, and edges. */
@@ -284,7 +284,6 @@
         writer.putUV(nodeOrder.maxFixedNodeOrderId);
         writer.putUV(nodeCount);
         for (int i = 0; i < nodeCount; i++) {
-            assert i == NULL_ORDER_ID || i == START_NODE_ORDER_ID || nodeStartOffsets[i] > 0;
             writer.putUV(metadataStart - nodeStartOffsets[i]);
         }
 
@@ -344,8 +343,25 @@
             } while (current != null);
 
             maxFixedNodeOrderId = nextOrderId - 1;
+
+            /*
+             * Emit all parameters consecutively at a known location (after all fixed nodes). This
+             * allows substituting parameters when inlining during decoding by pre-initializing the
+             * decoded node list.
+             *
+             * Note that not all parameters must be present (unused parameters are deleted after
+             * parsing). This leads to holes in the orderId, i.e., unused orderIds.
+             */
+            int parameterCount = graph.method().getSignature().getParameterCount(!graph.method().isStatic());
+            for (ParameterNode node : graph.getNodes(ParameterNode.TYPE)) {
+                assert orderIds.get(node) == null : "Parameter node must not be ordered yet";
+                assert node.index() < parameterCount : "Parameter index out of range";
+                orderIds.set(node, nextOrderId + node.index());
+            }
+            nextOrderId += parameterCount;
+
             for (Node node : graph.getNodes()) {
-                assert (node instanceof FixedNode) == (orderIds.get(node) != null) : "all fixed nodes must be ordered: " + node;
+                assert (node instanceof FixedNode || node instanceof ParameterNode) == (orderIds.get(node) != null) : "all fixed nodes and ParameterNodes must be ordered: " + node;
                 add(node);
             }
         }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/CFGPrinterObserver.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/CFGPrinterObserver.java	Tue Sep 26 13:09:56 2017 +0200
@@ -37,6 +37,7 @@
 import org.graalvm.compiler.bytecode.BytecodeDisassembler;
 import org.graalvm.compiler.code.CompilationResult;
 import org.graalvm.compiler.code.DisassemblerProvider;
+import org.graalvm.compiler.core.common.CompilationIdentifier;
 import org.graalvm.compiler.core.common.alloc.Trace;
 import org.graalvm.compiler.core.common.alloc.TraceBuilderResult;
 import org.graalvm.compiler.core.common.cfg.AbstractBlockBase;
@@ -72,6 +73,7 @@
     private CFGPrinter cfgPrinter;
     private File cfgFile;
     private JavaMethod curMethod;
+    private CompilationIdentifier curCompilation;
     private List<String> curDecorators = Collections.emptyList();
 
     @Override
@@ -92,6 +94,7 @@
      */
     private boolean checkMethodScope(DebugContext debug) {
         JavaMethod method = null;
+        CompilationIdentifier compilation = null;
         ArrayList<String> decorators = new ArrayList<>();
         for (Object o : debug.context()) {
             if (o instanceof JavaMethod) {
@@ -102,22 +105,33 @@
                 if (graph.method() != null) {
                     method = graph.method();
                     decorators.clear();
+                    compilation = graph.compilationId();
                 }
             } else if (o instanceof DebugDumpScope) {
                 DebugDumpScope debugDumpScope = (DebugDumpScope) o;
                 if (debugDumpScope.decorator) {
                     decorators.add(debugDumpScope.name);
                 }
+            } else if (o instanceof CompilationResult) {
+                CompilationResult compilationResult = (CompilationResult) o;
+                compilation = compilationResult.getCompilationId();
             }
         }
 
-        if (method == null) {
+        if (method == null && compilation == null) {
             return false;
         }
 
-        if (!method.equals(curMethod) || !curDecorators.equals(decorators)) {
-            cfgPrinter.printCompilation(method);
+        if (compilation != null) {
+            if (!compilation.equals(curCompilation) || !curDecorators.equals(decorators)) {
+                cfgPrinter.printCompilation(compilation);
+            }
+        } else {
+            if (!method.equals(curMethod) || !curDecorators.equals(decorators)) {
+                cfgPrinter.printCompilation(method);
+            }
         }
+        curCompilation = compilation;
         curMethod = method;
         curDecorators = decorators;
         return true;
@@ -277,6 +291,7 @@
             cfgPrinter = null;
             curDecorators = Collections.emptyList();
             curMethod = null;
+            curCompilation = null;
         }
     }
 
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/CompilationPrinter.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.printer/src/org/graalvm/compiler/printer/CompilationPrinter.java	Tue Sep 26 13:09:56 2017 +0200
@@ -31,6 +31,7 @@
 import java.util.List;
 import java.util.Map;
 
+import org.graalvm.compiler.core.common.CompilationIdentifier;
 import org.graalvm.compiler.debug.LogStream;
 import org.graalvm.compiler.debug.TTY;
 import org.graalvm.compiler.lir.util.IndexedValueMap;
@@ -115,12 +116,25 @@
     /**
      * Prints a compilation timestamp for a given method.
      *
-     * @param method the method for which a timestamp will be printed
+     * @param javaMethod the method for which a timestamp will be printed
      */
-    public void printCompilation(JavaMethod method) {
+    public void printCompilation(JavaMethod javaMethod) {
+        printCompilation(javaMethod.format("%H::%n"), javaMethod.format("%f %r %H.%n(%p)"));
+    }
+
+    /**
+     * Prints a compilation id.
+     *
+     * @param compilationId the compilation method for which an id will be printed
+     */
+    public void printCompilation(CompilationIdentifier compilationId) {
+        printCompilation(compilationId.toString(CompilationIdentifier.Verbosity.DETAILED), compilationId.toString(CompilationIdentifier.Verbosity.DETAILED));
+    }
+
+    private void printCompilation(final String name, String method) {
         begin("compilation");
-        out.print("name \" ").print(method.format("%H::%n")).println('"');
-        out.print("method \"").print(method.format("%f %r %H.%n(%p)")).println('"');
+        out.print("name \" ").print(name).println('"');
+        out.print("method \"").print(method).println('"');
         out.print("date ").println(System.currentTimeMillis());
         end("compilation");
     }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.verifier/src/org/graalvm/compiler/replacements/verifier/ClassSubstitutionVerifier.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements.verifier/src/org/graalvm/compiler/replacements/verifier/ClassSubstitutionVerifier.java	Tue Sep 26 13:09:56 2017 +0200
@@ -95,10 +95,13 @@
             TypeElement typeElement = null;
             for (String className : classNames) {
                 typeElement = env.getElementUtils().getTypeElement(className);
-                if (typeElement == null && !optional) {
-                    env.getMessager().printMessage(Kind.ERROR, String.format("The class '%s' was not found on the classpath.", stringValue), sourceElement, classSubstition, stringValue);
+                if (typeElement != null) {
+                    break;
                 }
             }
+            if (typeElement == null && !optional) {
+                env.getMessager().printMessage(Kind.ERROR, String.format("The class '%s' was not found on the classpath.", stringValue), sourceElement, classSubstition, stringValue);
+            }
 
             return typeElement;
         }
--- a/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/PEGraphDecoder.java	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/jdk.internal.vm.compiler/share/classes/org.graalvm.compiler.replacements/src/org/graalvm/compiler/replacements/PEGraphDecoder.java	Tue Sep 26 13:09:56 2017 +0200
@@ -707,10 +707,22 @@
             }
         }
 
+        LoopScope inlineLoopScope = createInitialLoopScope(inlineScope, predecessor);
+
+        /*
+         * The GraphEncoder assigns parameters a nodeId immediately after the fixed nodes.
+         * Initializing createdNodes here avoid decoding and immediately replacing the
+         * ParameterNodes.
+         */
+        int firstArgumentNodeId = inlineScope.maxFixedNodeOrderId + 1;
+        for (int i = 0; i < arguments.length; i++) {
+            inlineLoopScope.createdNodes[firstArgumentNodeId + i] = arguments[i];
+        }
+
         /*
          * Do the actual inlining by returning the initial loop scope for the inlined method scope.
          */
-        return createInitialLoopScope(inlineScope, predecessor);
+        return inlineLoopScope;
     }
 
     @Override
@@ -1028,9 +1040,7 @@
         if (node instanceof ParameterNode) {
             ParameterNode param = (ParameterNode) node;
             if (methodScope.isInlinedMethod()) {
-                Node result = methodScope.arguments[param.index()];
-                assert result != null;
-                return result;
+                throw GraalError.shouldNotReachHere("Parameter nodes are already registered when the inlined scope is created");
 
             } else if (parameterPlugin != null) {
                 assert !methodScope.isInlinedMethod();
--- a/src/os/aix/vm/os_aix.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os/aix/vm/os_aix.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1185,62 +1185,6 @@
 // directory not the java application's temp directory, ala java.io.tmpdir.
 const char* os::get_temp_directory() { return "/tmp"; }
 
-static bool file_exists(const char* filename) {
-  struct stat statbuf;
-  if (filename == NULL || strlen(filename) == 0) {
-    return false;
-  }
-  return os::stat(filename, &statbuf) == 0;
-}
-
-bool os::dll_build_name(char* buffer, size_t buflen,
-                        const char* pname, const char* fname) {
-  bool retval = false;
-  // Copied from libhpi
-  const size_t pnamelen = pname ? strlen(pname) : 0;
-
-  // Return error on buffer overflow.
-  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
-    *buffer = '\0';
-    return retval;
-  }
-
-  if (pnamelen == 0) {
-    snprintf(buffer, buflen, "lib%s.so", fname);
-    retval = true;
-  } else if (strchr(pname, *os::path_separator()) != NULL) {
-    int n;
-    char** pelements = split_path(pname, &n);
-    if (pelements == NULL) {
-      return false;
-    }
-    for (int i = 0; i < n; i++) {
-      // Really shouldn't be NULL, but check can't hurt
-      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
-        continue; // skip the empty path values
-      }
-      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
-      if (file_exists(buffer)) {
-        retval = true;
-        break;
-      }
-    }
-    // release the storage
-    for (int i = 0; i < n; i++) {
-      if (pelements[i] != NULL) {
-        FREE_C_HEAP_ARRAY(char, pelements[i]);
-      }
-    }
-    if (pelements != NULL) {
-      FREE_C_HEAP_ARRAY(char*, pelements);
-    }
-  } else {
-    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
-    retval = true;
-  }
-  return retval;
-}
-
 // Check if addr is inside libjvm.so.
 bool os::address_is_in_vm(address addr) {
 
@@ -1493,12 +1437,7 @@
 }
 
 void os::pd_print_cpu_info(outputStream* st, char* buf, size_t buflen) {
-  st->print("CPU:");
-  st->print("total %d", os::processor_count());
-  // It's not safe to query number of active processors after crash.
-  // st->print("(active %d)", os::active_processor_count());
-  st->print(" %s", VM_Version::features());
-  st->cr();
+  // Nothing to do beyond what os::print_cpu_info() does.
 }
 
 static void print_signal_handler(outputStream* st, int sig,
@@ -2668,11 +2607,10 @@
 ////////////////////////////////////////////////////////////////////////////////
 // suspend/resume support
 
-//  the low-level signal-based suspend/resume support is a remnant from the
+//  The low-level signal-based suspend/resume support is a remnant from the
 //  old VM-suspension that used to be for java-suspension, safepoints etc,
-//  within hotspot. Now there is a single use-case for this:
-//    - calling get_thread_pc() on the VMThread by the flat-profiler task
-//      that runs in the watcher thread.
+//  within hotspot. Currently used by JFR's OSThreadSampler
+//
 //  The remaining code is greatly simplified from the more general suspension
 //  code that used to be used.
 //
@@ -2688,7 +2626,13 @@
 //
 //  Note that the SR_lock plays no role in this suspend/resume protocol,
 //  but is checked for NULL in SR_handler as a thread termination indicator.
+//  The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs.
 //
+//  Note that resume_clear_context() and suspend_save_context() are needed
+//  by SR_handler(), so that fetch_frame_from_ucontext() works,
+//  which in part is used by:
+//    - Forte Analyzer: AsyncGetCallTrace()
+//    - StackBanging: get_frame_at_stack_banging_point()
 
 static void resume_clear_context(OSThread *osthread) {
   osthread->set_ucontext(NULL);
@@ -3695,44 +3639,6 @@
   }
 }
 
-class PcFetcher : public os::SuspendedThreadTask {
-public:
-  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
-  ExtendedPC result();
-protected:
-  void do_task(const os::SuspendedThreadTaskContext& context);
-private:
-  ExtendedPC _epc;
-};
-
-ExtendedPC PcFetcher::result() {
-  guarantee(is_done(), "task is not done yet.");
-  return _epc;
-}
-
-void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
-  Thread* thread = context.thread();
-  OSThread* osthread = thread->osthread();
-  if (osthread->ucontext() != NULL) {
-    _epc = os::Aix::ucontext_get_pc((const ucontext_t *) context.ucontext());
-  } else {
-    // NULL context is unexpected, double-check this is the VMThread.
-    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
-  }
-}
-
-// Suspends the target using the signal mechanism and then grabs the PC before
-// resuming the target. Used by the flat-profiler only
-ExtendedPC os::get_thread_pc(Thread* thread) {
-  // Make sure that it is called by the watcher for the VMThread.
-  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
-  assert(thread->is_VM_thread(), "Can only be called for VMThread");
-
-  PcFetcher fetcher(thread);
-  fetcher.run();
-  return fetcher.result();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // debug support
 
--- a/src/os/bsd/vm/os_bsd.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os/bsd/vm/os_bsd.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1172,13 +1172,6 @@
 
 // DLL functions
 
-#define JNI_LIB_PREFIX "lib"
-#ifdef __APPLE__
-  #define JNI_LIB_SUFFIX ".dylib"
-#else
-  #define JNI_LIB_SUFFIX ".so"
-#endif
-
 const char* os::dll_file_extension() { return JNI_LIB_SUFFIX; }
 
 // This must be hard coded because it's the system's temporary
@@ -1201,62 +1194,6 @@
 const char* os::get_temp_directory() { return "/tmp"; }
 #endif // __APPLE__
 
-static bool file_exists(const char* filename) {
-  struct stat statbuf;
-  if (filename == NULL || strlen(filename) == 0) {
-    return false;
-  }
-  return os::stat(filename, &statbuf) == 0;
-}
-
-bool os::dll_build_name(char* buffer, size_t buflen,
-                        const char* pname, const char* fname) {
-  bool retval = false;
-  // Copied from libhpi
-  const size_t pnamelen = pname ? strlen(pname) : 0;
-
-  // Return error on buffer overflow.
-  if (pnamelen + strlen(fname) + strlen(JNI_LIB_PREFIX) + strlen(JNI_LIB_SUFFIX) + 2 > buflen) {
-    return retval;
-  }
-
-  if (pnamelen == 0) {
-    snprintf(buffer, buflen, JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, fname);
-    retval = true;
-  } else if (strchr(pname, *os::path_separator()) != NULL) {
-    int n;
-    char** pelements = split_path(pname, &n);
-    if (pelements == NULL) {
-      return false;
-    }
-    for (int i = 0; i < n; i++) {
-      // Really shouldn't be NULL, but check can't hurt
-      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
-        continue; // skip the empty path values
-      }
-      snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX,
-               pelements[i], fname);
-      if (file_exists(buffer)) {
-        retval = true;
-        break;
-      }
-    }
-    // release the storage
-    for (int i = 0; i < n; i++) {
-      if (pelements[i] != NULL) {
-        FREE_C_HEAP_ARRAY(char, pelements[i]);
-      }
-    }
-    if (pelements != NULL) {
-      FREE_C_HEAP_ARRAY(char*, pelements);
-    }
-  } else {
-    snprintf(buffer, buflen, "%s/" JNI_LIB_PREFIX "%s" JNI_LIB_SUFFIX, pname, fname);
-    retval = true;
-  }
-  return retval;
-}
-
 // check if addr is inside libjvm.so
 bool os::address_is_in_vm(address addr) {
   static address libjvm_base_addr;
@@ -2666,11 +2603,10 @@
 ////////////////////////////////////////////////////////////////////////////////
 // suspend/resume support
 
-//  the low-level signal-based suspend/resume support is a remnant from the
+//  The low-level signal-based suspend/resume support is a remnant from the
 //  old VM-suspension that used to be for java-suspension, safepoints etc,
-//  within hotspot. Now there is a single use-case for this:
-//    - calling get_thread_pc() on the VMThread by the flat-profiler task
-//      that runs in the watcher thread.
+//  within hotspot. Currently used by JFR's OSThreadSampler
+//
 //  The remaining code is greatly simplified from the more general suspension
 //  code that used to be used.
 //
@@ -2686,6 +2622,13 @@
 //
 //  Note that the SR_lock plays no role in this suspend/resume protocol,
 //  but is checked for NULL in SR_handler as a thread termination indicator.
+//  The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs.
+//
+//  Note that resume_clear_context() and suspend_save_context() are needed
+//  by SR_handler(), so that fetch_frame_from_ucontext() works,
+//  which in part is used by:
+//    - Forte Analyzer: AsyncGetCallTrace()
+//    - StackBanging: get_frame_at_stack_banging_point()
 
 static void resume_clear_context(OSThread *osthread) {
   osthread->set_ucontext(NULL);
@@ -3584,45 +3527,6 @@
   }
 }
 
-///
-class PcFetcher : public os::SuspendedThreadTask {
- public:
-  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
-  ExtendedPC result();
- protected:
-  void do_task(const os::SuspendedThreadTaskContext& context);
- private:
-  ExtendedPC _epc;
-};
-
-ExtendedPC PcFetcher::result() {
-  guarantee(is_done(), "task is not done yet.");
-  return _epc;
-}
-
-void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
-  Thread* thread = context.thread();
-  OSThread* osthread = thread->osthread();
-  if (osthread->ucontext() != NULL) {
-    _epc = os::Bsd::ucontext_get_pc((const ucontext_t *) context.ucontext());
-  } else {
-    // NULL context is unexpected, double-check this is the VMThread
-    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
-  }
-}
-
-// Suspends the target using the signal mechanism and then grabs the PC before
-// resuming the target. Used by the flat-profiler only
-ExtendedPC os::get_thread_pc(Thread* thread) {
-  // Make sure that it is called by the watcher for the VMThread
-  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
-  assert(thread->is_VM_thread(), "Can only be called for VMThread");
-
-  PcFetcher fetcher(thread);
-  fetcher.run();
-  return fetcher.result();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // debug support
 
--- a/src/os/linux/vm/os_linux.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os/linux/vm/os_linux.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1419,53 +1419,6 @@
   return os::stat(filename, &statbuf) == 0;
 }
 
-bool os::dll_build_name(char* buffer, size_t buflen,
-                        const char* pname, const char* fname) {
-  bool retval = false;
-  // Copied from libhpi
-  const size_t pnamelen = pname ? strlen(pname) : 0;
-
-  // Return error on buffer overflow.
-  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
-    return retval;
-  }
-
-  if (pnamelen == 0) {
-    snprintf(buffer, buflen, "lib%s.so", fname);
-    retval = true;
-  } else if (strchr(pname, *os::path_separator()) != NULL) {
-    int n;
-    char** pelements = split_path(pname, &n);
-    if (pelements == NULL) {
-      return false;
-    }
-    for (int i = 0; i < n; i++) {
-      // Really shouldn't be NULL, but check can't hurt
-      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
-        continue; // skip the empty path values
-      }
-      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
-      if (file_exists(buffer)) {
-        retval = true;
-        break;
-      }
-    }
-    // release the storage
-    for (int i = 0; i < n; i++) {
-      if (pelements[i] != NULL) {
-        FREE_C_HEAP_ARRAY(char, pelements[i]);
-      }
-    }
-    if (pelements != NULL) {
-      FREE_C_HEAP_ARRAY(char*, pelements);
-    }
-  } else {
-    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
-    retval = true;
-  }
-  return retval;
-}
-
 // check if addr is inside libjvm.so
 bool os::address_is_in_vm(address addr) {
   static address libjvm_base_addr;
@@ -1748,8 +1701,10 @@
     {EM_PPC,         EM_PPC,     ELFCLASS32, ELFDATA2MSB, (char*)"Power PC 32"},
 #if defined(VM_LITTLE_ENDIAN)
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2LSB, (char*)"Power PC 64 LE"},
+    {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2LSB, (char*)"SuperH"},
 #else
     {EM_PPC64,       EM_PPC64,   ELFCLASS64, ELFDATA2MSB, (char*)"Power PC 64"},
+    {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2MSB, (char*)"SuperH BE"},
 #endif
     {EM_ARM,         EM_ARM,     ELFCLASS32,   ELFDATA2LSB, (char*)"ARM"},
     {EM_S390,        EM_S390,    ELFCLASSNONE, ELFDATA2MSB, (char*)"IBM System/390"},
@@ -1791,9 +1746,11 @@
   static  Elf32_Half running_arch_code=EM_MIPS;
 #elif  (defined M68K)
   static  Elf32_Half running_arch_code=EM_68K;
+#elif  (defined SH)
+  static  Elf32_Half running_arch_code=EM_SH;
 #else
     #error Method os::dll_load requires that one of following is defined:\
-        AARCH64, ALPHA, ARM, AMD64, IA32, IA64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, S390, __sparc
+        AARCH64, ALPHA, ARM, AMD64, IA32, IA64, M68K, MIPS, MIPSEL, PARISC, __powerpc__, __powerpc64__, S390, SH, __sparc
 #endif
 
   // Identify compatability class for VM's architecture and library's architecture
@@ -4043,11 +4000,10 @@
 ////////////////////////////////////////////////////////////////////////////////
 // suspend/resume support
 
-//  the low-level signal-based suspend/resume support is a remnant from the
+//  The low-level signal-based suspend/resume support is a remnant from the
 //  old VM-suspension that used to be for java-suspension, safepoints etc,
-//  within hotspot. Now there is a single use-case for this:
-//    - calling get_thread_pc() on the VMThread by the flat-profiler task
-//      that runs in the watcher thread.
+//  within hotspot. Currently used by JFR's OSThreadSampler
+//
 //  The remaining code is greatly simplified from the more general suspension
 //  code that used to be used.
 //
@@ -4063,6 +4019,13 @@
 //
 //  Note that the SR_lock plays no role in this suspend/resume protocol,
 //  but is checked for NULL in SR_handler as a thread termination indicator.
+//  The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs.
+//
+//  Note that resume_clear_context() and suspend_save_context() are needed
+//  by SR_handler(), so that fetch_frame_from_ucontext() works,
+//  which in part is used by:
+//    - Forte Analyzer: AsyncGetCallTrace()
+//    - StackBanging: get_frame_at_stack_banging_point()
 
 static void resume_clear_context(OSThread *osthread) {
   osthread->set_ucontext(NULL);
@@ -5103,44 +5066,6 @@
   }
 }
 
-class PcFetcher : public os::SuspendedThreadTask {
- public:
-  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
-  ExtendedPC result();
- protected:
-  void do_task(const os::SuspendedThreadTaskContext& context);
- private:
-  ExtendedPC _epc;
-};
-
-ExtendedPC PcFetcher::result() {
-  guarantee(is_done(), "task is not done yet.");
-  return _epc;
-}
-
-void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
-  Thread* thread = context.thread();
-  OSThread* osthread = thread->osthread();
-  if (osthread->ucontext() != NULL) {
-    _epc = os::Linux::ucontext_get_pc((const ucontext_t *) context.ucontext());
-  } else {
-    // NULL context is unexpected, double-check this is the VMThread
-    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
-  }
-}
-
-// Suspends the target using the signal mechanism and then grabs the PC before
-// resuming the target. Used by the flat-profiler only
-ExtendedPC os::get_thread_pc(Thread* thread) {
-  // Make sure that it is called by the watcher for the VMThread
-  assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
-  assert(thread->is_VM_thread(), "Can only be called for VMThread");
-
-  PcFetcher fetcher(thread);
-  fetcher.run();
-  return fetcher.result();
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 // debug support
 
--- a/src/os/posix/vm/os_posix.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os/posix/vm/os_posix.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -24,7 +24,6 @@
 
 #include "utilities/globalDefinitions.hpp"
 #include "prims/jvm.h"
-#include "semaphore_posix.hpp"
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/os.hpp"
@@ -32,6 +31,11 @@
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
 
+#ifndef __APPLE__
+// POSIX unamed semaphores are not supported on OS X.
+#include "semaphore_posix.hpp"
+#endif
+
 #include <dlfcn.h>
 #include <pthread.h>
 #include <semaphore.h>
--- a/src/os/solaris/vm/osThread_solaris.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os/solaris/vm/osThread_solaris.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,12 +65,6 @@
   void set_lwp_id(uint id)           { _lwp_id = id; }
   void set_native_priority(int prio) { _native_priority = prio; }
 
- // ***************************************************************
- // interrupt support.  interrupts (using signals) are used to get
- // the thread context (get_thread_pc), to set the thread context
- // (set_thread_pc), and to implement java.lang.Thread.interrupt.
- // ***************************************************************
-
  public:
   os::SuspendResume sr;
 
--- a/src/os/solaris/vm/os_solaris.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os/solaris/vm/os_solaris.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1356,60 +1356,6 @@
 // directory not the java application's temp directory, ala java.io.tmpdir.
 const char* os::get_temp_directory() { return "/tmp"; }
 
-static bool file_exists(const char* filename) {
-  struct stat statbuf;
-  if (filename == NULL || strlen(filename) == 0) {
-    return false;
-  }
-  return os::stat(filename, &statbuf) == 0;
-}
-
-bool os::dll_build_name(char* buffer, size_t buflen,
-                        const char* pname, const char* fname) {
-  bool retval = false;
-  const size_t pnamelen = pname ? strlen(pname) : 0;
-
-  // Return error on buffer overflow.
-  if (pnamelen + strlen(fname) + 10 > (size_t) buflen) {
-    return retval;
-  }
-
-  if (pnamelen == 0) {
-    snprintf(buffer, buflen, "lib%s.so", fname);
-    retval = true;
-  } else if (strchr(pname, *os::path_separator()) != NULL) {
-    int n;
-    char** pelements = split_path(pname, &n);
-    if (pelements == NULL) {
-      return false;
-    }
-    for (int i = 0; i < n; i++) {
-      // really shouldn't be NULL but what the heck, check can't hurt
-      if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
-        continue; // skip the empty path values
-      }
-      snprintf(buffer, buflen, "%s/lib%s.so", pelements[i], fname);
-      if (file_exists(buffer)) {
-        retval = true;
-        break;
-      }
-    }
-    // release the storage
-    for (int i = 0; i < n; i++) {
-      if (pelements[i] != NULL) {
-        FREE_C_HEAP_ARRAY(char, pelements[i]);
-      }
-    }
-    if (pelements != NULL) {
-      FREE_C_HEAP_ARRAY(char*, pelements);
-    }
-  } else {
-    snprintf(buffer, buflen, "%s/lib%s.so", pname, fname);
-    retval = true;
-  }
-  return retval;
-}
-
 // check if addr is inside libjvm.so
 bool os::address_is_in_vm(address addr) {
   static address libjvm_base_addr;
@@ -3496,6 +3442,37 @@
   schedctl_start(schedctl_init());
 }
 
+////////////////////////////////////////////////////////////////////////////////
+// suspend/resume support
+
+//  The low-level signal-based suspend/resume support is a remnant from the
+//  old VM-suspension that used to be for java-suspension, safepoints etc,
+//  within hotspot. Currently used by JFR's OSThreadSampler
+//
+//  The remaining code is greatly simplified from the more general suspension
+//  code that used to be used.
+//
+//  The protocol is quite simple:
+//  - suspend:
+//      - sends a signal to the target thread
+//      - polls the suspend state of the osthread using a yield loop
+//      - target thread signal handler (SR_handler) sets suspend state
+//        and blocks in sigsuspend until continued
+//  - resume:
+//      - sets target osthread state to continue
+//      - sends signal to end the sigsuspend loop in the SR_handler
+//
+//  Note that the SR_lock plays no role in this suspend/resume protocol,
+//  but is checked for NULL in SR_handler as a thread termination indicator.
+//  The SR_lock is, however, used by JavaThread::java_suspend()/java_resume() APIs.
+//
+//  Note that resume_clear_context() and suspend_save_context() are needed
+//  by SR_handler(), so that fetch_frame_from_ucontext() works,
+//  which in part is used by:
+//    - Forte Analyzer: AsyncGetCallTrace()
+//    - StackBanging: get_frame_at_stack_banging_point()
+//    - JFR: get_topframe()-->....-->get_valid_uc_in_signal_handler()
+
 static void resume_clear_context(OSThread *osthread) {
   osthread->set_ucontext(NULL);
 }
@@ -3506,7 +3483,7 @@
 
 static PosixSemaphore sr_semaphore;
 
-void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
+void os::Solaris::SR_handler(Thread* thread, ucontext_t* context) {
   // Save and restore errno to avoid confusing native code with EINTR
   // after sigsuspend.
   int old_errno = errno;
@@ -3516,7 +3493,7 @@
 
   os::SuspendResume::State current = osthread->sr.state();
   if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
-    suspend_save_context(osthread, uc);
+    suspend_save_context(osthread, context);
 
     // attempt to switch the state, we assume we had a SUSPEND_REQUEST
     os::SuspendResume::State state = osthread->sr.suspended();
@@ -3663,45 +3640,6 @@
   }
 }
 
-class PcFetcher : public os::SuspendedThreadTask {
- public:
-  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
-  ExtendedPC result();
- protected:
-  void do_task(const os::SuspendedThreadTaskContext& context);
- private:
-  ExtendedPC _epc;
-};
-
-ExtendedPC PcFetcher::result() {
-  guarantee(is_done(), "task is not done yet.");
-  return _epc;
-}
-
-void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
-  Thread* thread = context.thread();
-  OSThread* osthread = thread->osthread();
-  if (osthread->ucontext() != NULL) {
-    _epc = os::Solaris::ucontext_get_pc((const ucontext_t *) context.ucontext());
-  } else {
-    // NULL context is unexpected, double-check this is the VMThread
-    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
-  }
-}
-
-// A lightweight implementation that does not suspend the target thread and
-// thus returns only a hint. Used for profiling only!
-ExtendedPC os::get_thread_pc(Thread* thread) {
-  // Make sure that it is called by the watcher and the Threads lock is owned.
-  assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
-  // For now, is only used to profile the VM Thread
-  assert(thread->is_VM_thread(), "Can only be called for VMThread");
-  PcFetcher fetcher(thread);
-  fetcher.run();
-  return fetcher.result();
-}
-
-
 // This does not do anything on Solaris. This is basically a hook for being
 // able to use structured exception handling (thread-local exception filters) on, e.g., Win32.
 void os::os_exception_wrapper(java_call_t f, JavaValue* value,
--- a/src/os/windows/vm/decoder_windows.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os/windows/vm/decoder_windows.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,160 +27,99 @@
 #include "runtime/arguments.hpp"
 #include "runtime/os.hpp"
 #include "decoder_windows.hpp"
+#include "windbghelp.hpp"
 
 WindowsDecoder::WindowsDecoder() {
-  _dbghelp_handle = NULL;
-  _can_decode_in_vm = false;
-  _pfnSymGetSymFromAddr64 = NULL;
-  _pfnUndecorateSymbolName = NULL;
-#ifdef AMD64
-  _pfnStackWalk64 = NULL;
-  _pfnSymFunctionTableAccess64 = NULL;
-  _pfnSymGetModuleBase64 = NULL;
-#endif
+  _can_decode_in_vm = true;
   _decoder_status = no_error;
   initialize();
 }
 
 void WindowsDecoder::initialize() {
-  if (!has_error() && _dbghelp_handle == NULL) {
-    HMODULE handle = ::LoadLibrary("dbghelp.dll");
-    if (!handle) {
-      _decoder_status = helper_not_found;
-      return;
-    }
-
-    _dbghelp_handle = handle;
-
-    pfn_SymSetOptions _pfnSymSetOptions = (pfn_SymSetOptions)::GetProcAddress(handle, "SymSetOptions");
-    pfn_SymInitialize _pfnSymInitialize = (pfn_SymInitialize)::GetProcAddress(handle, "SymInitialize");
-    _pfnSymGetSymFromAddr64 = (pfn_SymGetSymFromAddr64)::GetProcAddress(handle, "SymGetSymFromAddr64");
-    _pfnUndecorateSymbolName = (pfn_UndecorateSymbolName)::GetProcAddress(handle, "UnDecorateSymbolName");
-
-    if (_pfnSymSetOptions == NULL || _pfnSymInitialize == NULL || _pfnSymGetSymFromAddr64 == NULL) {
-      uninitialize();
-      _decoder_status = helper_func_error;
-      return;
-    }
-
-#ifdef AMD64
-    _pfnStackWalk64 = (pfn_StackWalk64)::GetProcAddress(handle, "StackWalk64");
-    _pfnSymFunctionTableAccess64 = (pfn_SymFunctionTableAccess64)::GetProcAddress(handle, "SymFunctionTableAccess64");
-    _pfnSymGetModuleBase64 = (pfn_SymGetModuleBase64)::GetProcAddress(handle, "SymGetModuleBase64");
-    if (_pfnStackWalk64 == NULL || _pfnSymFunctionTableAccess64 == NULL || _pfnSymGetModuleBase64 == NULL) {
-      // We can't call StackWalk64 to walk the stack, but we are still
-      // able to decode the symbols. Let's limp on.
-      _pfnStackWalk64 = NULL;
-      _pfnSymFunctionTableAccess64 = NULL;
-      _pfnSymGetModuleBase64 = NULL;
-    }
-#endif
-
+  if (!has_error()) {
     HANDLE hProcess = ::GetCurrentProcess();
-    _pfnSymSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
-    if (!_pfnSymInitialize(hProcess, NULL, TRUE)) {
-      _pfnSymGetSymFromAddr64 = NULL;
-      _pfnUndecorateSymbolName = NULL;
-      ::FreeLibrary(handle);
-      _dbghelp_handle = NULL;
+    WindowsDbgHelp::symSetOptions(SYMOPT_UNDNAME | SYMOPT_DEFERRED_LOADS | SYMOPT_EXACT_SYMBOLS);
+    if (!WindowsDbgHelp::symInitialize(hProcess, NULL, TRUE)) {
       _decoder_status = helper_init_error;
       return;
     }
 
     // set pdb search paths
-    pfn_SymSetSearchPath  _pfn_SymSetSearchPath =
-      (pfn_SymSetSearchPath)::GetProcAddress(handle, "SymSetSearchPath");
-    pfn_SymGetSearchPath  _pfn_SymGetSearchPath =
-      (pfn_SymGetSearchPath)::GetProcAddress(handle, "SymGetSearchPath");
-    if (_pfn_SymSetSearchPath != NULL && _pfn_SymGetSearchPath != NULL) {
-      char paths[MAX_PATH];
-      int  len = sizeof(paths);
-      if (!_pfn_SymGetSearchPath(hProcess, paths, len)) {
-        paths[0] = '\0';
-      } else {
-        // available spaces in path buffer
-        len -= (int)strlen(paths);
+    char paths[MAX_PATH];
+    int  len = sizeof(paths);
+    if (!WindowsDbgHelp::symGetSearchPath(hProcess, paths, len)) {
+      paths[0] = '\0';
+    } else {
+      // available spaces in path buffer
+      len -= (int)strlen(paths);
+    }
+
+    char tmp_path[MAX_PATH];
+    DWORD dwSize;
+    HMODULE hJVM = ::GetModuleHandle("jvm.dll");
+    tmp_path[0] = '\0';
+    // append the path where jvm.dll is located
+    if (hJVM != NULL && (dwSize = ::GetModuleFileName(hJVM, tmp_path, sizeof(tmp_path))) > 0) {
+      while (dwSize > 0 && tmp_path[dwSize] != '\\') {
+        dwSize --;
       }
 
-      char tmp_path[MAX_PATH];
-      DWORD dwSize;
-      HMODULE hJVM = ::GetModuleHandle("jvm.dll");
-      tmp_path[0] = '\0';
-      // append the path where jvm.dll is located
-      if (hJVM != NULL && (dwSize = ::GetModuleFileName(hJVM, tmp_path, sizeof(tmp_path))) > 0) {
-        while (dwSize > 0 && tmp_path[dwSize] != '\\') {
-          dwSize --;
-        }
+      tmp_path[dwSize] = '\0';
 
-        tmp_path[dwSize] = '\0';
+      if (dwSize > 0 && len > (int)dwSize + 1) {
+        strncat(paths, os::path_separator(), 1);
+        strncat(paths, tmp_path, dwSize);
+        len -= dwSize + 1;
+      }
+    }
 
-        if (dwSize > 0 && len > (int)dwSize + 1) {
+    // append $JRE/bin. Arguments::get_java_home actually returns $JRE
+    // path
+    char *p = Arguments::get_java_home();
+    assert(p != NULL, "empty java home");
+    size_t java_home_len = strlen(p);
+    if (len > (int)java_home_len + 5) {
+      strncat(paths, os::path_separator(), 1);
+      strncat(paths, p, java_home_len);
+      strncat(paths, "\\bin", 4);
+      len -= (int)(java_home_len + 5);
+    }
+
+    // append $JDK/bin path if it exists
+    assert(java_home_len < MAX_PATH, "Invalid path length");
+    // assume $JRE is under $JDK, construct $JDK/bin path and
+    // see if it exists or not
+    if (strncmp(&p[java_home_len - 3], "jre", 3) == 0) {
+      strncpy(tmp_path, p, java_home_len - 3);
+      tmp_path[java_home_len - 3] = '\0';
+      strncat(tmp_path, "bin", 3);
+
+      // if the directory exists
+      DWORD dwAttrib = GetFileAttributes(tmp_path);
+      if (dwAttrib != INVALID_FILE_ATTRIBUTES &&
+          (dwAttrib & FILE_ATTRIBUTE_DIRECTORY)) {
+        // tmp_path should have the same length as java_home_len, since we only
+        // replaced 'jre' with 'bin'
+        if (len > (int)java_home_len + 1) {
           strncat(paths, os::path_separator(), 1);
-          strncat(paths, tmp_path, dwSize);
-          len -= dwSize + 1;
+          strncat(paths, tmp_path, java_home_len);
         }
       }
-
-      // append $JRE/bin. Arguments::get_java_home actually returns $JRE
-      // path
-      char *p = Arguments::get_java_home();
-      assert(p != NULL, "empty java home");
-      size_t java_home_len = strlen(p);
-      if (len > (int)java_home_len + 5) {
-        strncat(paths, os::path_separator(), 1);
-        strncat(paths, p, java_home_len);
-        strncat(paths, "\\bin", 4);
-        len -= (int)(java_home_len + 5);
-      }
-
-      // append $JDK/bin path if it exists
-      assert(java_home_len < MAX_PATH, "Invalid path length");
-      // assume $JRE is under $JDK, construct $JDK/bin path and
-      // see if it exists or not
-      if (strncmp(&p[java_home_len - 3], "jre", 3) == 0) {
-        strncpy(tmp_path, p, java_home_len - 3);
-        tmp_path[java_home_len - 3] = '\0';
-        strncat(tmp_path, "bin", 3);
-
-        // if the directory exists
-        DWORD dwAttrib = GetFileAttributes(tmp_path);
-        if (dwAttrib != INVALID_FILE_ATTRIBUTES &&
-            (dwAttrib & FILE_ATTRIBUTE_DIRECTORY)) {
-          // tmp_path should have the same length as java_home_len, since we only
-          // replaced 'jre' with 'bin'
-          if (len > (int)java_home_len + 1) {
-            strncat(paths, os::path_separator(), 1);
-            strncat(paths, tmp_path, java_home_len);
-          }
-        }
-      }
-
-      _pfn_SymSetSearchPath(hProcess, paths);
     }
 
-     // find out if jvm.dll contains private symbols, by decoding
-     // current function and comparing the result
-     address addr = (address)Decoder::demangle;
-     char buf[MAX_PATH];
-     if (decode(addr, buf, sizeof(buf), NULL, NULL, true /* demangle */)) {
-       _can_decode_in_vm = !strcmp(buf, "Decoder::demangle");
-     }
+    WindowsDbgHelp::symSetSearchPath(hProcess, paths);
+
+    // find out if jvm.dll contains private symbols, by decoding
+    // current function and comparing the result
+    address addr = (address)Decoder::demangle;
+    char buf[MAX_PATH];
+    if (decode(addr, buf, sizeof(buf), NULL, NULL, true /* demangle */)) {
+      _can_decode_in_vm = !strcmp(buf, "Decoder::demangle");
+    }
   }
 }
 
-void WindowsDecoder::uninitialize() {
-  _pfnSymGetSymFromAddr64 = NULL;
-  _pfnUndecorateSymbolName = NULL;
-#ifdef AMD64
-  _pfnStackWalk64 = NULL;
-  _pfnSymFunctionTableAccess64 = NULL;
-  _pfnSymGetModuleBase64 = NULL;
-#endif
-  if (_dbghelp_handle != NULL) {
-    ::FreeLibrary(_dbghelp_handle);
-  }
-  _dbghelp_handle = NULL;
-}
+void WindowsDecoder::uninitialize() {}
 
 bool WindowsDecoder::can_decode_C_frame_in_vm() const {
   return  (!has_error() && _can_decode_in_vm);
@@ -188,14 +127,14 @@
 
 
 bool WindowsDecoder::decode(address addr, char *buf, int buflen, int* offset, const char* modulepath, bool demangle_name)  {
-  if (_pfnSymGetSymFromAddr64 != NULL) {
+  if (!has_error()) {
     PIMAGEHLP_SYMBOL64 pSymbol;
     char symbolInfo[MAX_PATH + sizeof(IMAGEHLP_SYMBOL64)];
     pSymbol = (PIMAGEHLP_SYMBOL64)symbolInfo;
     pSymbol->MaxNameLength = MAX_PATH;
     pSymbol->SizeOfStruct = sizeof(IMAGEHLP_SYMBOL64);
     DWORD64 displacement;
-    if (_pfnSymGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
+    if (WindowsDbgHelp::symGetSymFromAddr64(::GetCurrentProcess(), (DWORD64)addr, &displacement, pSymbol)) {
       if (buf != NULL) {
         if (!(demangle_name && demangle(pSymbol->Name, buf, buflen))) {
           jio_snprintf(buf, buflen, "%s", pSymbol->Name);
@@ -211,69 +150,9 @@
 }
 
 bool WindowsDecoder::demangle(const char* symbol, char *buf, int buflen) {
-  return _pfnUndecorateSymbolName != NULL &&
-         _pfnUndecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE);
+  if (!has_error()) {
+    return WindowsDbgHelp::unDecorateSymbolName(symbol, buf, buflen, UNDNAME_COMPLETE) > 0;
+  }
+  return false;
 }
 
-#ifdef AMD64
-BOOL WindowsDbgHelp::StackWalk64(DWORD MachineType,
-                                 HANDLE hProcess,
-                                 HANDLE hThread,
-                                 LPSTACKFRAME64 StackFrame,
-                                 PVOID ContextRecord,
-                                 PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
-                                 PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
-                                 PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
-                                 PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress) {
-  DecoderLocker locker;
-  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
-
-  if (!wd->has_error() && wd->_pfnStackWalk64) {
-    return wd->_pfnStackWalk64(MachineType,
-                               hProcess,
-                               hThread,
-                               StackFrame,
-                               ContextRecord,
-                               ReadMemoryRoutine,
-                               FunctionTableAccessRoutine,
-                               GetModuleBaseRoutine,
-                               TranslateAddress);
-  } else {
-    return false;
-  }
-}
-
-PVOID WindowsDbgHelp::SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) {
-  DecoderLocker locker;
-  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
-
-  if (!wd->has_error() && wd->_pfnSymFunctionTableAccess64) {
-    return wd->_pfnSymFunctionTableAccess64(hProcess, AddrBase);
-  } else {
-    return NULL;
-  }
-}
-
-pfn_SymFunctionTableAccess64 WindowsDbgHelp::pfnSymFunctionTableAccess64() {
-  DecoderLocker locker;
-  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
-
-  if (!wd->has_error()) {
-    return wd->_pfnSymFunctionTableAccess64;
-  } else {
-    return NULL;
-  }
-}
-
-pfn_SymGetModuleBase64 WindowsDbgHelp::pfnSymGetModuleBase64() {
-  DecoderLocker locker;
-  WindowsDecoder* wd = (WindowsDecoder*)locker.decoder();
-
-  if (!wd->has_error()) {
-    return wd->_pfnSymGetModuleBase64;
-  } else {
-    return NULL;
-  }
-}
-
-#endif // AMD64
--- a/src/os/windows/vm/decoder_windows.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os/windows/vm/decoder_windows.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -25,33 +25,8 @@
 #ifndef OS_WINDOWS_VM_DECODER_WINDOWS_HPP
 #define OS_WINDOWS_VM_DECIDER_WINDOWS_HPP
 
-#include <windows.h>
-#include <imagehlp.h>
-
 #include "utilities/decoder.hpp"
 
-// functions needed for decoding symbols
-typedef DWORD (WINAPI *pfn_SymSetOptions)(DWORD);
-typedef BOOL  (WINAPI *pfn_SymInitialize)(HANDLE, PCTSTR, BOOL);
-typedef BOOL  (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
-typedef DWORD (WINAPI *pfn_UndecorateSymbolName)(const char*, char*, DWORD, DWORD);
-typedef BOOL  (WINAPI *pfn_SymSetSearchPath)(HANDLE, PCTSTR);
-typedef BOOL  (WINAPI *pfn_SymGetSearchPath)(HANDLE, PTSTR, int);
-
-#ifdef AMD64
-typedef BOOL  (WINAPI *pfn_StackWalk64)(DWORD MachineType,
-                                        HANDLE hProcess,
-                                        HANDLE hThread,
-                                        LPSTACKFRAME64 StackFrame,
-                                        PVOID ContextRecord,
-                                        PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
-                                        PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
-                                        PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
-                                        PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
-typedef PVOID (WINAPI *pfn_SymFunctionTableAccess64)(HANDLE hProcess, DWORD64 AddrBase);
-typedef DWORD64 (WINAPI *pfn_SymGetModuleBase64)(HANDLE hProcess, DWORD64 dwAddr);
-#endif
-
 class WindowsDecoder : public AbstractDecoder {
 
 public:
@@ -70,38 +45,8 @@
   void initialize();
   void uninitialize();
 
-private:
-  HMODULE                   _dbghelp_handle;
   bool                      _can_decode_in_vm;
-  pfn_SymGetSymFromAddr64   _pfnSymGetSymFromAddr64;
-  pfn_UndecorateSymbolName  _pfnUndecorateSymbolName;
-#ifdef AMD64
-  pfn_StackWalk64              _pfnStackWalk64;
-  pfn_SymFunctionTableAccess64 _pfnSymFunctionTableAccess64;
-  pfn_SymGetModuleBase64       _pfnSymGetModuleBase64;
 
-  friend class WindowsDbgHelp;
-#endif
 };
 
-#ifdef AMD64
-// TODO: refactor and move the handling of dbghelp.dll outside of Decoder
-class WindowsDbgHelp : public Decoder {
-public:
-  static BOOL StackWalk64(DWORD MachineType,
-                          HANDLE hProcess,
-                          HANDLE hThread,
-                          LPSTACKFRAME64 StackFrame,
-                          PVOID ContextRecord,
-                          PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
-                          PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
-                          PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
-                          PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
-  static PVOID SymFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase);
-
-  static pfn_SymFunctionTableAccess64 pfnSymFunctionTableAccess64();
-  static pfn_SymGetModuleBase64       pfnSymGetModuleBase64();
-};
-#endif
-
 #endif // OS_WINDOWS_VM_DECODER_WINDOWS_HPP
--- a/src/os/windows/vm/os_windows.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os/windows/vm/os_windows.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -74,6 +74,8 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/vmError.hpp"
+#include "windbghelp.hpp"
+
 
 #ifdef _DEBUG
 #include <crtdbg.h>
@@ -1009,7 +1011,6 @@
 }
 
 void os::abort(bool dump_core, void* siginfo, const void* context) {
-  HINSTANCE dbghelp;
   EXCEPTION_POINTERS ep;
   MINIDUMP_EXCEPTION_INFORMATION mei;
   MINIDUMP_EXCEPTION_INFORMATION* pmei;
@@ -1026,28 +1027,6 @@
     win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
   }
 
-  dbghelp = os::win32::load_Windows_dll("DBGHELP.DLL", NULL, 0);
-
-  if (dbghelp == NULL) {
-    jio_fprintf(stderr, "Failed to load dbghelp.dll\n");
-    CloseHandle(dumpFile);
-    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
-  }
-
-  _MiniDumpWriteDump =
-      CAST_TO_FN_PTR(BOOL(WINAPI *)(HANDLE, DWORD, HANDLE, MINIDUMP_TYPE,
-                                    PMINIDUMP_EXCEPTION_INFORMATION,
-                                    PMINIDUMP_USER_STREAM_INFORMATION,
-                                    PMINIDUMP_CALLBACK_INFORMATION),
-                                    GetProcAddress(dbghelp,
-                                    "MiniDumpWriteDump"));
-
-  if (_MiniDumpWriteDump == NULL) {
-    jio_fprintf(stderr, "Failed to find MiniDumpWriteDump() in module dbghelp.dll.\n");
-    CloseHandle(dumpFile);
-    win32::exit_process_or_thread(win32::EPT_PROCESS, 1);
-  }
-
   dumpType = (MINIDUMP_TYPE)(MiniDumpWithFullMemory | MiniDumpWithHandleData |
     MiniDumpWithFullMemoryInfo | MiniDumpWithThreadInfo | MiniDumpWithUnloadedModules);
 
@@ -1064,8 +1043,8 @@
 
   // Older versions of dbghelp.dll (the one shipped with Win2003 for example) may not support all
   // the dump types we really want. If first call fails, lets fall back to just use MiniDumpWithFullMemory then.
-  if (_MiniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) == false &&
-      _MiniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL) == false) {
+  if (!WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, dumpType, pmei, NULL, NULL) &&
+      !WindowsDbgHelp::miniDumpWriteDump(hProcess, processId, dumpFile, (MINIDUMP_TYPE)MiniDumpWithFullMemory, pmei, NULL, NULL)) {
     jio_fprintf(stderr, "Call to MiniDumpWriteDump() failed (Error 0x%x)\n", GetLastError());
   }
   CloseHandle(dumpFile);
@@ -1198,70 +1177,6 @@
   }
 }
 
-static bool file_exists(const char* filename) {
-  if (filename == NULL || strlen(filename) == 0) {
-    return false;
-  }
-  return GetFileAttributes(filename) != INVALID_FILE_ATTRIBUTES;
-}
-
-bool os::dll_build_name(char *buffer, size_t buflen,
-                        const char* pname, const char* fname) {
-  bool retval = false;
-  const size_t pnamelen = pname ? strlen(pname) : 0;
-  const char c = (pnamelen > 0) ? pname[pnamelen-1] : 0;
-
-  // Return error on buffer overflow.
-  if (pnamelen + strlen(fname) + 10 > buflen) {
-    return retval;
-  }
-
-  if (pnamelen == 0) {
-    jio_snprintf(buffer, buflen, "%s.dll", fname);
-    retval = true;
-  } else if (c == ':' || c == '\\') {
-    jio_snprintf(buffer, buflen, "%s%s.dll", pname, fname);
-    retval = true;
-  } else if (strchr(pname, *os::path_separator()) != NULL) {
-    int n;
-    char** pelements = split_path(pname, &n);
-    if (pelements == NULL) {
-      return false;
-    }
-    for (int i = 0; i < n; i++) {
-      char* path = pelements[i];
-      // Really shouldn't be NULL, but check can't hurt
-      size_t plen = (path == NULL) ? 0 : strlen(path);
-      if (plen == 0) {
-        continue; // skip the empty path values
-      }
-      const char lastchar = path[plen - 1];
-      if (lastchar == ':' || lastchar == '\\') {
-        jio_snprintf(buffer, buflen, "%s%s.dll", path, fname);
-      } else {
-        jio_snprintf(buffer, buflen, "%s\\%s.dll", path, fname);
-      }
-      if (file_exists(buffer)) {
-        retval = true;
-        break;
-      }
-    }
-    // release the storage
-    for (int i = 0; i < n; i++) {
-      if (pelements[i] != NULL) {
-        FREE_C_HEAP_ARRAY(char, pelements[i]);
-      }
-    }
-    if (pelements != NULL) {
-      FREE_C_HEAP_ARRAY(char*, pelements);
-    }
-  } else {
-    jio_snprintf(buffer, buflen, "%s\\%s.dll", pname, fname);
-    retval = true;
-  }
-  return retval;
-}
-
 // Needs to be in os specific directory because windows requires another
 // header file <direct.h>
 const char* os::get_current_directory(char *buf, size_t buflen) {
@@ -3591,22 +3506,6 @@
   return interrupted;
 }
 
-// Get's a pc (hint) for a running thread. Currently used only for profiling.
-ExtendedPC os::get_thread_pc(Thread* thread) {
-  CONTEXT context;
-  context.ContextFlags = CONTEXT_CONTROL;
-  HANDLE handle = thread->osthread()->thread_handle();
-  if (GetThreadContext(handle, &context)) {
-#ifdef _M_AMD64
-    return ExtendedPC((address) context.Rip);
-#else
-    return ExtendedPC((address) context.Eip);
-#endif
-  } else {
-    return ExtendedPC(NULL);
-  }
-}
-
 // GetCurrentThreadId() returns DWORD
 intx os::current_thread_id()  { return GetCurrentThreadId(); }
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/windbghelp.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "utilities/ostream.hpp"
+#include "windbghelp.hpp"
+
+#include <windows.h>
+
+typedef DWORD (WINAPI *pfn_SymSetOptions)(DWORD);
+typedef DWORD (WINAPI *pfn_SymGetOptions)(void);
+typedef BOOL  (WINAPI *pfn_SymInitialize)(HANDLE, PCTSTR, BOOL);
+typedef BOOL  (WINAPI *pfn_SymGetSymFromAddr64)(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
+typedef DWORD (WINAPI *pfn_UnDecorateSymbolName)(const char*, char*, DWORD, DWORD);
+typedef BOOL  (WINAPI *pfn_SymSetSearchPath)(HANDLE, PCTSTR);
+typedef BOOL  (WINAPI *pfn_SymGetSearchPath)(HANDLE, PTSTR, int);
+typedef BOOL  (WINAPI *pfn_StackWalk64)(DWORD MachineType,
+                                        HANDLE hProcess,
+                                        HANDLE hThread,
+                                        LPSTACKFRAME64 StackFrame,
+                                        PVOID ContextRecord,
+                                        PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+                                        PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+                                        PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+                                        PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+typedef PVOID (WINAPI *pfn_SymFunctionTableAccess64)(HANDLE hProcess, DWORD64 AddrBase);
+typedef DWORD64 (WINAPI *pfn_SymGetModuleBase64)(HANDLE hProcess, DWORD64 dwAddr);
+typedef BOOL (WINAPI *pfn_MiniDumpWriteDump) (HANDLE hProcess, DWORD ProcessId, HANDLE hFile,
+                                              MINIDUMP_TYPE DumpType, PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam,
+                                              PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam,
+                                              PMINIDUMP_CALLBACK_INFORMATION    CallbackParam);
+typedef BOOL (WINAPI *pfn_SymGetLineFromAddr64) (HANDLE hProcess, DWORD64 dwAddr,
+                                                 PDWORD pdwDisplacement, PIMAGEHLP_LINE64 Line);
+typedef LPAPI_VERSION (WINAPI *pfn_ImagehlpApiVersion)(void);
+
+// Add functions as needed.
+#define FOR_ALL_FUNCTIONS(DO) \
+ DO(ImagehlpApiVersion) \
+ DO(SymGetOptions) \
+ DO(SymSetOptions) \
+ DO(SymInitialize) \
+ DO(SymGetSymFromAddr64) \
+ DO(UnDecorateSymbolName) \
+ DO(SymSetSearchPath) \
+ DO(SymGetSearchPath) \
+ DO(StackWalk64) \
+ DO(SymFunctionTableAccess64) \
+ DO(SymGetModuleBase64) \
+ DO(MiniDumpWriteDump) \
+ DO(SymGetLineFromAddr64)
+
+
+#define DECLARE_FUNCTION_POINTER(functionname) \
+static pfn_##functionname g_pfn_##functionname;
+
+FOR_ALL_FUNCTIONS(DECLARE_FUNCTION_POINTER)
+
+
+static HMODULE g_dll_handle = NULL;
+static DWORD g_dll_load_error = 0;
+static API_VERSION g_version = { 0, 0, 0, 0 };
+
+static enum {
+  state_uninitialized = 0,
+  state_ready = 1,
+  state_error = 2
+} g_state = state_uninitialized;
+
+static void initialize() {
+
+  assert(g_state == state_uninitialized, "wrong sequence");
+  g_state = state_error;
+
+  g_dll_handle = ::LoadLibrary("DBGHELP.DLL");
+  if (g_dll_handle == NULL) {
+    g_dll_load_error = ::GetLastError();
+  } else {
+    // Note: We loaded the DLL successfully. From here on we count
+    // initialization as success. We still may fail to load all of the
+    // desired function pointers successfully, but DLL may still be usable
+    // enough for our purposes.
+    g_state = state_ready;
+
+#define DO_RESOLVE(functionname) \
+      g_pfn_##functionname = (pfn_##functionname) ::GetProcAddress(g_dll_handle, #functionname);
+
+    FOR_ALL_FUNCTIONS(DO_RESOLVE)
+
+    // Retrieve version information.
+    if (g_pfn_ImagehlpApiVersion) {
+      const API_VERSION* p = g_pfn_ImagehlpApiVersion();
+      memcpy(&g_version, p, sizeof(API_VERSION));
+    }
+  }
+
+}
+
+///////////////////// External functions //////////////////////////
+
+// All outside facing functions are synchronized. Also, we run
+// initialization on first touch.
+
+
+// Call InitializeCriticalSection as early as possible.
+class CritSect {
+  CRITICAL_SECTION cs;
+public:
+  CritSect() { ::InitializeCriticalSection(&cs); }
+  void enter() { ::EnterCriticalSection(&cs); }
+  void leave() { ::LeaveCriticalSection(&cs); }
+};
+
+static CritSect g_cs;
+
+class EntryGuard {
+public:
+  EntryGuard() {
+    g_cs.enter();
+    if (g_state == state_uninitialized) {
+      initialize();
+    }
+  }
+  ~EntryGuard() {
+    g_cs.leave();
+  }
+};
+
+DWORD WindowsDbgHelp::symSetOptions(DWORD arg) {
+  EntryGuard entry_guard;
+  if (g_pfn_SymSetOptions != NULL) {
+    return g_pfn_SymSetOptions(arg);
+  }
+  return 0;
+}
+
+DWORD WindowsDbgHelp::symGetOptions(void) {
+  EntryGuard entry_guard;
+  if (g_pfn_SymGetOptions != NULL) {
+    return g_pfn_SymGetOptions();
+  }
+  return 0;
+}
+
+BOOL WindowsDbgHelp::symInitialize(HANDLE hProcess, PCTSTR UserSearchPath, BOOL fInvadeProcess) {
+  EntryGuard entry_guard;
+  if (g_pfn_SymInitialize != NULL) {
+    return g_pfn_SymInitialize(hProcess, UserSearchPath, fInvadeProcess);
+  }
+  return FALSE;
+}
+
+BOOL WindowsDbgHelp::symGetSymFromAddr64(HANDLE hProcess, DWORD64 the_address,
+                                         PDWORD64 Displacement, PIMAGEHLP_SYMBOL64 Symbol) {
+  EntryGuard entry_guard;
+  if (g_pfn_SymGetSymFromAddr64 != NULL) {
+    return g_pfn_SymGetSymFromAddr64(hProcess, the_address, Displacement, Symbol);
+  }
+  return FALSE;
+}
+
+DWORD WindowsDbgHelp::unDecorateSymbolName(const char* DecoratedName, char* UnDecoratedName,
+                                           DWORD UndecoratedLength, DWORD Flags) {
+  EntryGuard entry_guard;
+  if (g_pfn_UnDecorateSymbolName != NULL) {
+    return g_pfn_UnDecorateSymbolName(DecoratedName, UnDecoratedName, UndecoratedLength, Flags);
+  }
+  if (UnDecoratedName != NULL && UndecoratedLength > 0) {
+    UnDecoratedName[0] = '\0';
+  }
+  return 0;
+}
+
+BOOL WindowsDbgHelp::symSetSearchPath(HANDLE hProcess, PCTSTR SearchPath) {
+  EntryGuard entry_guard;
+  if (g_pfn_SymSetSearchPath != NULL) {
+    return g_pfn_SymSetSearchPath(hProcess, SearchPath);
+  }
+  return FALSE;
+}
+
+BOOL WindowsDbgHelp::symGetSearchPath(HANDLE hProcess, PTSTR SearchPath, int SearchPathLength) {
+  EntryGuard entry_guard;
+  if (g_pfn_SymGetSearchPath != NULL) {
+    return g_pfn_SymGetSearchPath(hProcess, SearchPath, SearchPathLength);
+  }
+  return FALSE;
+}
+
+BOOL WindowsDbgHelp::stackWalk64(DWORD MachineType,
+                                 HANDLE hProcess,
+                                 HANDLE hThread,
+                                 LPSTACKFRAME64 StackFrame,
+                                 PVOID ContextRecord) {
+  EntryGuard entry_guard;
+  if (g_pfn_StackWalk64 != NULL) {
+    return g_pfn_StackWalk64(MachineType, hProcess, hThread, StackFrame,
+                             ContextRecord,
+                             NULL, // ReadMemoryRoutine
+                             g_pfn_SymFunctionTableAccess64, // FunctionTableAccessRoutine,
+                             g_pfn_SymGetModuleBase64, // GetModuleBaseRoutine
+                             NULL // TranslateAddressRoutine
+                             );
+  }
+  return FALSE;
+}
+
+PVOID WindowsDbgHelp::symFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase) {
+  EntryGuard entry_guard;
+  if (g_pfn_SymFunctionTableAccess64 != NULL) {
+    return g_pfn_SymFunctionTableAccess64(hProcess, AddrBase);
+  }
+  return NULL;
+}
+
+DWORD64 WindowsDbgHelp::symGetModuleBase64(HANDLE hProcess, DWORD64 dwAddr) {
+  EntryGuard entry_guard;
+  if (g_pfn_SymGetModuleBase64 != NULL) {
+    return g_pfn_SymGetModuleBase64(hProcess, dwAddr);
+  }
+  return 0;
+}
+
+BOOL WindowsDbgHelp::miniDumpWriteDump(HANDLE hProcess, DWORD ProcessId, HANDLE hFile,
+                                       MINIDUMP_TYPE DumpType, PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam,
+                                       PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam,
+                                       PMINIDUMP_CALLBACK_INFORMATION CallbackParam) {
+  EntryGuard entry_guard;
+  if (g_pfn_MiniDumpWriteDump != NULL) {
+    return g_pfn_MiniDumpWriteDump(hProcess, ProcessId, hFile, DumpType,
+                                   ExceptionParam, UserStreamParam, CallbackParam);
+  }
+  return FALSE;
+}
+
+BOOL WindowsDbgHelp::symGetLineFromAddr64(HANDLE hProcess, DWORD64 dwAddr,
+                          PDWORD pdwDisplacement, PIMAGEHLP_LINE64 Line) {
+  EntryGuard entry_guard;
+  if (g_pfn_SymGetLineFromAddr64 != NULL) {
+    return g_pfn_SymGetLineFromAddr64(hProcess, dwAddr, pdwDisplacement, Line);
+  }
+  return FALSE;
+}
+
+// Print one liner describing state (if library loaded, which functions are
+// missing - if any, and the dbhelp API version)
+void WindowsDbgHelp::print_state_on(outputStream* st) {
+  // Note: We should not lock while printing, but this should be
+  // safe to do without lock anyway.
+  st->print("dbghelp: ");
+
+  if (g_state == state_uninitialized) {
+    st->print("uninitialized.");
+  } else if (g_state == state_error) {
+    st->print("loading error: %u", g_dll_load_error);
+  } else {
+    st->print("loaded successfully ");
+
+    // We may want to print dll file name here - which may be interesting for
+    // cases where more than one version exists on the system, e.g. with a
+    // debugging sdk separately installed. But we get the file name in the DLL
+    // section of the hs-err file too, so this may be redundant.
+
+    // Print version.
+    st->print("- version: %u.%u.%u",
+              g_version.MajorVersion, g_version.MinorVersion, g_version.Revision);
+
+    // Print any functions which failed to load.
+    int num_missing = 0;
+    st->print(" - missing functions: ");
+
+    #define CHECK_AND_PRINT_IF_NULL(functionname) \
+    if (g_pfn_##functionname == NULL) { \
+      st->print("%s" #functionname, ((num_missing > 0) ? ", " : "")); \
+      num_missing ++; \
+    }
+
+    FOR_ALL_FUNCTIONS(CHECK_AND_PRINT_IF_NULL)
+
+    if (num_missing == 0) {
+      st->print("none");
+    }
+  }
+  st->cr();
+}
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/windows/vm/windbghelp.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_WINDOWS_VM_DBGHELPLOADER_HPP
+#define OS_WINDOWS_VM_DBGHELPLOADER_HPP
+
+#include <windows.h>
+#include <imagehlp.h>
+
+// This is a very plain wrapper for loading dbghelp.dll. It does not offer
+//  any additional functionality. It takes care of locking.
+
+class outputStream;
+
+// Please note: dbghelp.dll may not have been loaded, or it may have been loaded but not
+//  all functions may be available (because on the target system dbghelp.dll is of an
+//  older version).
+// In all these cases we return an error from the WindowsDbgHelp::symXXXX() wrapper. We never
+//  assert. It should always be safe to call these functions, but caller has to process the
+//  return code (which he would have to do anyway).
+namespace WindowsDbgHelp {
+
+  DWORD symSetOptions(DWORD);
+  DWORD symGetOptions(void);
+  BOOL symInitialize(HANDLE, PCTSTR, BOOL);
+  BOOL symGetSymFromAddr64(HANDLE, DWORD64, PDWORD64, PIMAGEHLP_SYMBOL64);
+  DWORD unDecorateSymbolName(const char*, char*, DWORD, DWORD);
+  BOOL symSetSearchPath(HANDLE, PCTSTR);
+  BOOL symGetSearchPath(HANDLE, PTSTR, int);
+  BOOL stackWalk64(DWORD MachineType,
+                   HANDLE hProcess,
+                   HANDLE hThread,
+                   LPSTACKFRAME64 StackFrame,
+                   PVOID ContextRecord);
+  PVOID symFunctionTableAccess64(HANDLE hProcess, DWORD64 AddrBase);
+  DWORD64 symGetModuleBase64(HANDLE hProcess, DWORD64 dwAddr);
+  BOOL miniDumpWriteDump(HANDLE hProcess, DWORD ProcessId, HANDLE hFile,
+                         MINIDUMP_TYPE DumpType, PMINIDUMP_EXCEPTION_INFORMATION ExceptionParam,
+                         PMINIDUMP_USER_STREAM_INFORMATION UserStreamParam,
+                         PMINIDUMP_CALLBACK_INFORMATION CallbackParam);
+  BOOL symGetLineFromAddr64 (HANDLE hProcess, DWORD64 dwAddr,
+                             PDWORD pdwDisplacement, PIMAGEHLP_LINE64 Line);
+
+  // Print one liner describing state (if library loaded, which functions are
+  // missing - if any, and the dbhelp API version)
+  void print_state_on(outputStream* st);
+
+};
+
+
+#endif // OS_WINDOWS_VM_DBGHELPLOADER_HPP
+
--- a/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/aix_ppc/vm/atomic_aix_ppc.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -30,6 +30,8 @@
 #error "Atomic currently only impleneted for PPC64"
 #endif
 
+#include "utilities/debug.hpp"
+
 // Implementation of class atomic
 
 inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
@@ -93,9 +95,21 @@
 #define strasm_nobarrier                  ""
 #define strasm_nobarrier_clobber_memory   ""
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
 
-  unsigned int result;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+
+  D result;
 
   __asm__ __volatile__ (
     strasm_lwsync
@@ -108,13 +122,17 @@
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
-  return (jint) result;
+  return result;
 }
 
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
 
-  long result;
+  D result;
 
   __asm__ __volatile__ (
     strasm_lwsync
@@ -127,11 +145,7 @@
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
-  return (intptr_t) result;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return result;
 }
 
 
@@ -312,7 +326,7 @@
                                                 T volatile* dest,
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
-  STATIC_CAST(1 == sizeof(T));
+  STATIC_ASSERT(1 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
@@ -382,7 +396,7 @@
                                                 T volatile* dest,
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
-  STATIC_CAST(4 == sizeof(T));
+  STATIC_ASSERT(4 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
@@ -432,7 +446,7 @@
                                                 T volatile* dest,
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
-  STATIC_CAST(8 == sizeof(T));
+  STATIC_ASSERT(8 == sizeof(T));
 
   // Note that cmpxchg guarantees a two-way memory barrier across
   // the cmpxchg, so it's really a a 'fence_cmpxchg_fence' if not
--- a/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/aix_ppc/vm/thread_aix_ppc.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -29,7 +29,6 @@
  private:
   void pd_initialize() {
     _anchor.clear();
-    _last_interpreter_fp = NULL;
   }
 
   // The `last' frame is the youngest Java frame on the thread's stack.
@@ -60,20 +59,4 @@
   bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
                                            bool isInJava);
 
-  // -Xprof support
-  //
-  // In order to find the last Java fp from an async profile
-  // tick, we store the current interpreter fp in the thread.
-  // This value is only valid while we are in the C++ interpreter
-  // and profiling.
- protected:
-  intptr_t *_last_interpreter_fp;
-
- public:
-  static ByteSize last_interpreter_fp_offset() {
-    return byte_offset_of(JavaThread, _last_interpreter_fp);
-  }
-
-  intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
-
 #endif // OS_CPU_AIX_PPC_VM_THREAD_AIX_PPC_HPP
--- a/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/bsd_x86/vm/atomic_bsd_x86.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -40,13 +40,25 @@
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  jint addend = add_value;
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D fetch_and_add(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+  D old_value;
   __asm__ volatile (  "lock xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest)
+                    : "=r" (old_value)
+                    : "0" (add_value), "r" (dest)
                     : "cc", "memory");
-  return addend + add_value;
+  return old_value;
 }
 
 inline void Atomic::inc    (volatile jint*     dest) {
@@ -111,17 +123,17 @@
 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t addend = add_value;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+  D old_value;
   __asm__ __volatile__ (  "lock xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest)
+                        : "=r" (old_value)
+                        : "0" (add_value), "r" (dest)
                         : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return old_value;
 }
 
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
@@ -164,15 +176,6 @@
 
 #else // !AMD64
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
   inc((volatile jint*)dest);
 }
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -908,6 +908,12 @@
   // workaround for OS X 10.9.0 (Mavericks)
   // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages
   if (pthread_main_np() == 1) {
+    // At least on Mac OS 10.12 we have observed stack sizes not aligned
+    // to pages boundaries. This can be provoked by e.g. setrlimit() (ulimit -s xxxx in the
+    // shell). Apparently Mac OS actually rounds upwards to next multiple of page size,
+    // however, we round downwards here to be on the safe side.
+    *size = align_down(*size, getpagesize());
+
     if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) {
       char kern_osrelease[256];
       size_t kern_osrelease_size = sizeof(kern_osrelease);
--- a/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/bsd_zero/vm/atomic_bsd_zero.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -74,7 +74,7 @@
 }
 
 /* Atomically add an int to memory.  */
-static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
+static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
   for (;;) {
       // Loop until success.
 
@@ -135,7 +135,7 @@
 }
 
 /* Atomically add an int to memory.  */
-static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
+static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
   for (;;) {
       // Loop until a __kernel_cmpxchg succeeds.
 
@@ -173,32 +173,38 @@
   *dest = store_value;
 }
 
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+
 #ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
+  return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
 #else
 #ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
+  return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
 #else
   return __sync_add_and_fetch(dest, add_value);
 #endif // M68K
 #endif // ARM
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+
   return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
 }
 
 inline void Atomic::inc(volatile jint* dest) {
@@ -277,7 +283,7 @@
                                                 T volatile* dest,
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
-  STATIC_CAST(4 == sizeof(T));
+  STATIC_ASSERT(4 == sizeof(T));
 #ifdef ARM
   return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
 #else
@@ -295,7 +301,7 @@
                                                 T volatile* dest,
                                                 T compare_value,
                                                 cmpxchg_memory_order order) const {
-  STATIC_CAST(8 == sizeof(T));
+  STATIC_ASSERT(8 == sizeof(T));
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 }
 
--- a/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/linux_aarch64/vm/atomic_linux_aarch64.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -47,10 +47,15 @@
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
 
-inline jint Atomic::add(jint add_value, volatile jint* dest)
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
 {
- return __sync_add_and_fetch(dest, add_value);
-}
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const {
+    return __sync_add_and_fetch(dest, add_value);
+  }
+};
 
 inline void Atomic::inc(volatile jint* dest)
 {
@@ -105,16 +110,6 @@
 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
-{
- return __sync_add_and_fetch(dest, add_value);
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest)
-{
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
-}
-
 inline void Atomic::inc_ptr(volatile intptr_t* dest)
 {
  add_ptr(1, dest);
--- a/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -91,9 +91,21 @@
 //
 // For ARMv7 we add explicit barriers in the stubs.
 
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
 #ifdef AARCH64
-  jint val;
+  D val;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
@@ -106,7 +118,7 @@
     : "memory");
   return val;
 #else
-  return (*os::atomic_add_func)(add_value, dest);
+  return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
 #endif
 }
 
@@ -118,9 +130,13 @@
   Atomic::add(-1, (volatile jint *)dest);
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
 #ifdef AARCH64
-  intptr_t val;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+  D val;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
@@ -132,14 +148,8 @@
     : [add_val] "r" (add_value), [dest] "r" (dest)
     : "memory");
   return val;
-#else
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-#endif
 }
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
+#endif // AARCH64
 
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
   Atomic::add_ptr(1, dest);
--- a/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/linux_ppc/vm/atomic_linux_ppc.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -93,9 +93,21 @@
 #define strasm_nobarrier                  ""
 #define strasm_nobarrier_clobber_memory   ""
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
 
-  unsigned int result;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+
+  D result;
 
   __asm__ __volatile__ (
     strasm_lwsync
@@ -108,13 +120,17 @@
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
-  return (jint) result;
+  return result;
 }
 
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
 
-  long result;
+  D result;
 
   __asm__ __volatile__ (
     strasm_lwsync
@@ -127,11 +143,7 @@
     : /*%1*/"r" (add_value), /*%2*/"r" (dest)
     : "cc", "memory" );
 
-  return (intptr_t) result;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return result;
 }
 
 
--- a/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/linux_ppc/vm/thread_linux_ppc.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2013 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -30,7 +30,6 @@
 
   void pd_initialize() {
     _anchor.clear();
-    _last_interpreter_fp = NULL;
   }
 
   // The `last' frame is the youngest Java frame on the thread's stack.
@@ -62,22 +61,4 @@
 
   bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava);
 
- protected:
-
-  // -Xprof support
-  //
-  // In order to find the last Java fp from an async profile
-  // tick, we store the current interpreter fp in the thread.
-  // This value is only valid while we are in the C++ interpreter
-  // and profiling.
-  intptr_t *_last_interpreter_fp;
-
- public:
-
-  static ByteSize last_interpreter_fp_offset() {
-    return byte_offset_of(JavaThread, _last_interpreter_fp);
-  }
-
-  intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
-
 #endif // OS_CPU_LINUX_PPC_VM_THREAD_LINUX_PPC_HPP
--- a/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/linux_s390/vm/atomic_linux_s390.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -82,8 +82,21 @@
 // The return value of the method is the value that was successfully stored. At the
 // time the caller receives back control, the value in memory may have changed already.
 
-inline jint Atomic::add(jint inc, volatile jint*dest) {
-  unsigned int old, upd;
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I inc, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+
+  D old, upd;
 
   if (VM_Version::has_LoadAndALUAtomicV1()) {
     __asm__ __volatile__ (
@@ -124,12 +137,17 @@
     );
   }
 
-  return (jint)upd;
+  return upd;
 }
 
 
-inline intptr_t Atomic::add_ptr(intptr_t inc, volatile intptr_t* dest) {
-  unsigned long old, upd;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I inc, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+
+  D old, upd;
 
   if (VM_Version::has_LoadAndALUAtomicV1()) {
     __asm__ __volatile__ (
@@ -170,11 +188,7 @@
     );
   }
 
-  return (intptr_t)upd;
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return upd;
 }
 
 
--- a/src/os_cpu/linux_s390/vm/thread_linux_s390.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/linux_s390/vm/thread_linux_s390.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2016 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -30,7 +30,6 @@
 
   void pd_initialize() {
     _anchor.clear();
-    _last_interpreter_fp = NULL;
   }
 
   // The `last' frame is the youngest Java frame on the thread's stack.
@@ -61,22 +60,4 @@
 
   bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava);
 
- protected:
-
-  // -Xprof support
-  //
-  // In order to find the last Java fp from an async profile
-  // tick, we store the current interpreter fp in the thread.
-  // This value is only valid while we are in the C++ interpreter
-  // and profiling.
-  intptr_t *_last_interpreter_fp;
-
- public:
-
-  static ByteSize last_interpreter_fp_offset() {
-    return byte_offset_of(JavaThread, _last_interpreter_fp);
-  }
-
-  intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
-
 #endif // OS_CPU_LINUX_S390_VM_THREAD_LINUX_S390_HPP
--- a/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -51,8 +51,21 @@
 
 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  intptr_t rv;
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+
+  D rv;
   __asm__ volatile(
     "1: \n\t"
     " ld     [%2], %%o2\n\t"
@@ -68,8 +81,13 @@
   return rv;
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t rv;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+
+  D rv;
   __asm__ volatile(
     "1: \n\t"
     " ldx    [%2], %%o2\n\t"
@@ -85,10 +103,6 @@
   return rv;
 }
 
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
-}
-
 
 inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
   intptr_t rv = exchange_value;
--- a/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/linux_x86/vm/atomic_linux_x86.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -40,13 +40,25 @@
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  jint addend = add_value;
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D fetch_and_add(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+  D old_value;
   __asm__ volatile (  "lock xaddl %0,(%2)"
-                    : "=r" (addend)
-                    : "0" (addend), "r" (dest)
+                    : "=r" (old_value)
+                    : "0" (add_value), "r" (dest)
                     : "cc", "memory");
-  return addend + add_value;
+  return old_value;
 }
 
 inline void Atomic::inc    (volatile jint*     dest) {
@@ -111,17 +123,17 @@
 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  intptr_t addend = add_value;
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+  D old_value;
   __asm__ __volatile__ ("lock xaddq %0,(%2)"
-                        : "=r" (addend)
-                        : "0" (addend), "r" (dest)
+                        : "=r" (old_value)
+                        : "0" (add_value), "r" (dest)
                         : "cc", "memory");
-  return addend + add_value;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
+  return old_value;
 }
 
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
@@ -164,15 +176,6 @@
 
 #else // !AMD64
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)Atomic::add((jint)add_value, (volatile jint*)dest);
-}
-
-
 inline void Atomic::inc_ptr(volatile intptr_t* dest) {
   inc((volatile jint*)dest);
 }
--- a/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/linux_zero/vm/atomic_linux_zero.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -74,7 +74,7 @@
 }
 
 /* Atomically add an int to memory.  */
-static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) {
+static inline int m68k_add_and_fetch(int add_value, volatile int *ptr) {
   for (;;) {
       // Loop until success.
 
@@ -135,7 +135,7 @@
 }
 
 /* Atomically add an int to memory.  */
-static inline int arm_add_and_fetch(volatile int *ptr, int add_value) {
+static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
   for (;;) {
       // Loop until a __kernel_cmpxchg succeeds.
 
@@ -167,32 +167,38 @@
   *dest = store_value;
 }
 
-inline jint Atomic::add(jint add_value, volatile jint* dest) {
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+
 #ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
+  return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
 #else
 #ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
+  return add_using_helper<int>(m68k_add_and_fetch, add_value, dest);
 #else
   return __sync_add_and_fetch(dest, add_value);
 #endif // M68K
 #endif // ARM
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-#ifdef ARM
-  return arm_add_and_fetch(dest, add_value);
-#else
-#ifdef M68K
-  return m68k_add_and_fetch(dest, add_value);
-#else
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+
   return __sync_add_and_fetch(dest, add_value);
-#endif // M68K
-#endif // ARM
-}
-
-inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) {
-  return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
 }
 
 inline void Atomic::inc(volatile jint* dest) {
--- a/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -62,22 +62,21 @@
 extern "C" jint     _Atomic_swap32(jint     exchange_value, volatile jint*     dest);
 extern "C" intptr_t _Atomic_swap64(intptr_t exchange_value, volatile intptr_t* dest);
 
-extern "C" jint     _Atomic_add32(jint     inc,       volatile jint*     dest);
-extern "C" intptr_t _Atomic_add64(intptr_t add_value, volatile intptr_t* dest);
-
-
-inline jint     Atomic::add     (jint    add_value, volatile jint*     dest) {
-  return _Atomic_add32(add_value, dest);
-}
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return _Atomic_add64(add_value, dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr((intptr_t)add_value, (volatile intptr_t*)dest);
-}
-
+// Implement ADD using a CAS loop.
+template<size_t byte_size>
+struct Atomic::PlatformAdd VALUE_OBJ_CLASS_SPEC {
+  template<typename I, typename D>
+  inline D operator()(I add_value, D volatile* dest) const {
+    D old_value = *dest;
+    while (true) {
+      D new_value = old_value + add_value;
+      D result = cmpxchg(new_value, dest, old_value);
+      if (result == old_value) break;
+      old_value = result;
+    }
+    return old_value + add_value;
+  }
+};
 
 inline jint     Atomic::xchg    (jint     exchange_value, volatile jint*     dest) {
   return _Atomic_swap32(exchange_value, dest);
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Tue Sep 26 13:09:56 2017 +0200
@@ -90,58 +90,6 @@
         .nonvolatile
         .end
 
-  // Support for jint Atomic::add(jint add_value, volatile jint* dest).
-  //
-  // Arguments:
-  //      add_value: O0   (e.g., +1 or -1)
-  //      dest:      O1
-  //
-  // Results:
-  //     O0: the new value stored in dest
-  //
-  // Overwrites O3
-
-        .inline _Atomic_add32, 2
-        .volatile
-    2:
-        ld      [%o1], %o2
-        add     %o0, %o2, %o3
-        cas     [%o1], %o2, %o3
-        cmp     %o2, %o3
-        bne     2b
-         nop
-        add     %o0, %o2, %o0
-        .nonvolatile
-        .end
-
-
-  // Support for intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
-  //
-  // 64-bit
-  //
-  // Arguments:
-  //      add_value: O0   (e.g., +1 or -1)
-  //      dest:      O1
-  //
-  // Results:
-  //     O0: the new value stored in dest
-  //
-  // Overwrites O3
-
-        .inline _Atomic_add64, 2
-        .volatile
-    3:
-        ldx     [%o1], %o2
-        add     %o0, %o2, %o3
-        casx    [%o1], %o2, %o3
-        cmp     %o2, %o3
-        bne     %xcc, 3b
-         nop
-        add     %o0, %o2, %o0
-        .nonvolatile
-        .end
-
-
   // Support for void Prefetch::read(void *loc, intx interval)
   //
   // Prefetch for several reads.
--- a/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/solaris_x86/vm/atomic_solaris_x86.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -51,6 +51,8 @@
 
 extern "C" {
   jint _Atomic_add(jint add_value, volatile jint* dest);
+  jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
+
   jint _Atomic_xchg(jint exchange_value, volatile jint* dest);
   jbyte _Atomic_cmpxchg_byte(jbyte exchange_value, volatile jbyte* dest,
                              jbyte compare_value);
@@ -60,8 +62,34 @@
                              jlong compare_value);
 }
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return _Atomic_add(add_value, dest);
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
+// Not using add_using_helper; see comment for cmpxchg.
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
+  return PrimitiveConversions::cast<D>(
+    _Atomic_add(PrimitiveConversions::cast<jint>(add_value),
+                reinterpret_cast<jint volatile*>(dest)));
+}
+
+// Not using add_using_helper; see comment for cmpxchg.
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(8 == sizeof(I));
+  STATIC_ASSERT(8 == sizeof(D));
+  return PrimitiveConversions::cast<D>(
+    _Atomic_add_long(PrimitiveConversions::cast<jlong>(add_value),
+                     reinterpret_cast<jlong volatile*>(dest)));
 }
 
 inline jint     Atomic::xchg       (jint     exchange_value, volatile jint*     dest) {
@@ -115,17 +143,8 @@
 
 inline void Atomic::store    (jlong    store_value, jlong*             dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-extern "C" jlong _Atomic_add_long(jlong add_value, volatile jlong* dest);
 extern "C" jlong _Atomic_xchg_long(jlong exchange_value, volatile jlong* dest);
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)_Atomic_add_long((jlong)add_value, (volatile jlong*)dest);
-}
-
 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
   return (intptr_t)_Atomic_xchg_long((jlong)exchange_value, (volatile jlong*)dest);
 }
--- a/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/windows_x86/vm/atomic_windows_x86.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -57,20 +57,28 @@
 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
 inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
 
+template<size_t byte_size>
+struct Atomic::PlatformAdd
+  : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
+{
+  template<typename I, typename D>
+  D add_and_fetch(I add_value, D volatile* dest) const;
+};
+
 #ifdef AMD64
 inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
 inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-  return (jint)(*os::atomic_add_func)(add_value, dest);
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  return add_using_helper<jint>(os::atomic_add_func, add_value, dest);
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)(*os::atomic_add_ptr_func)(add_value, dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)(*os::atomic_add_ptr_func)(add_value, (volatile intptr_t*)dest);
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
+  return add_using_helper<intptr_t>(os::atomic_add_ptr_func, add_value, dest);
 }
 
 inline void Atomic::inc    (volatile jint*     dest) {
@@ -130,7 +138,11 @@
 
 #else // !AMD64
 
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
+template<>
+template<typename I, typename D>
+inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
+  STATIC_ASSERT(4 == sizeof(I));
+  STATIC_ASSERT(4 == sizeof(D));
   __asm {
     mov edx, dest;
     mov eax, add_value;
@@ -140,14 +152,6 @@
   }
 }
 
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-  return (intptr_t)add((jint)add_value, (volatile jint*)dest);
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add((jint)add_value, (volatile jint*)dest);
-}
-
 inline void Atomic::inc    (volatile jint*     dest) {
   // alternative for InterlockedIncrement
   __asm {
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -29,7 +29,6 @@
 #include "classfile/vmSymbols.hpp"
 #include "code/icBuffer.hpp"
 #include "code/vtableStubs.hpp"
-#include "decoder_windows.hpp"
 #include "interpreter/interpreter.hpp"
 #include "jvm_windows.h"
 #include "memory/allocation.inline.hpp"
@@ -51,10 +50,12 @@
 #include "runtime/stubRoutines.hpp"
 #include "runtime/thread.inline.hpp"
 #include "runtime/timer.hpp"
+#include "unwind_windows_x86.hpp"
 #include "utilities/events.hpp"
 #include "utilities/vmError.hpp"
+#include "windbghelp.hpp"
 
-# include "unwind_windows_x86.hpp"
+
 #undef REG_SP
 #undef REG_FP
 #undef REG_PC
@@ -401,24 +402,18 @@
       lastpc = pc;
     }
 
-    PVOID p = WindowsDbgHelp::SymFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
+    PVOID p = WindowsDbgHelp::symFunctionTableAccess64(GetCurrentProcess(), stk.AddrPC.Offset);
     if (!p) {
       // StackWalk64() can't handle this PC. Calling StackWalk64 again may cause crash.
       break;
     }
 
-    BOOL result = WindowsDbgHelp::StackWalk64(
+    BOOL result = WindowsDbgHelp::stackWalk64(
         IMAGE_FILE_MACHINE_AMD64,  // __in      DWORD MachineType,
         GetCurrentProcess(),       // __in      HANDLE hProcess,
         GetCurrentThread(),        // __in      HANDLE hThread,
         &stk,                      // __inout   LP STACKFRAME64 StackFrame,
-        &ctx,                      // __inout   PVOID ContextRecord,
-        NULL,                      // __in_opt  PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
-        WindowsDbgHelp::pfnSymFunctionTableAccess64(),
-                                   // __in_opt  PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
-        WindowsDbgHelp::pfnSymGetModuleBase64(),
-                                   // __in_opt  PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
-        NULL);                     // __in_opt  PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress
+        &ctx);                     // __inout   PVOID ContextRecord,
 
     if (!result) {
       break;
--- a/src/share/vm/Xusage.txt	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/Xusage.txt	Tue Sep 26 13:09:56 2017 +0200
@@ -12,7 +12,6 @@
     -Xms<size>        set initial Java heap size
     -Xmx<size>        set maximum Java heap size
     -Xss<size>        set java thread stack size
-    -Xprof            output cpu profiling data (deprecated)
     -Xfuture          enable strictest checks, anticipating future default
     -Xrs              reduce use of OS signals by Java/VM (see documentation)
     -Xcheck:jni       perform additional checks for JNI functions
--- a/src/share/vm/aot/aotCodeHeap.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/aot/aotCodeHeap.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -478,6 +478,8 @@
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_oop_disjoint_arraycopy", address, StubRoutines::_arrayof_oop_disjoint_arraycopy);
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_oop_disjoint_arraycopy_uninit", address, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit);
 
+    SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_unsafe_arraycopy", address, StubRoutines::_unsafe_arraycopy);
+
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_checkcast_arraycopy", address, StubRoutines::_checkcast_arraycopy);
 
     SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_aescrypt_encryptBlock", address, StubRoutines::_aescrypt_encryptBlock);
--- a/src/share/vm/classfile/classLoader.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/classLoader.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -57,7 +57,6 @@
 #include "prims/jvm_misc.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/compilationPolicy.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
@@ -147,6 +146,7 @@
 ClassPathEntry* ClassLoader::_first_append_entry = NULL;
 ClassPathEntry* ClassLoader::_last_append_entry  = NULL;
 int             ClassLoader::_num_entries        = 0;
+int             ClassLoader::_num_boot_entries   = -1;
 #if INCLUDE_CDS
 GrowableArray<char*>* ClassLoader::_boot_modules_array = NULL;
 GrowableArray<char*>* ClassLoader::_platform_modules_array = NULL;
@@ -242,7 +242,7 @@
 
 // Given a fully qualified class name, find its defining package in the class loader's
 // package entry table.
-static PackageEntry* get_package_entry(const char* class_name, ClassLoaderData* loader_data, TRAPS) {
+PackageEntry* ClassLoader::get_package_entry(const char* class_name, ClassLoaderData* loader_data, TRAPS) {
   ResourceMark rm(THREAD);
   const char *pkg_name = ClassLoader::package_from_name(class_name);
   if (pkg_name == NULL) {
@@ -509,7 +509,7 @@
 #endif
 
       } else {
-        PackageEntry* package_entry = get_package_entry(name, ClassLoaderData::the_null_class_loader_data(), CHECK_NULL);
+        PackageEntry* package_entry = ClassLoader::get_package_entry(name, ClassLoaderData::the_null_class_loader_data(), CHECK_NULL);
         if (package_entry != NULL) {
           ResourceMark rm;
           // Get the module name
@@ -540,6 +540,13 @@
   return NULL;
 }
 
+JImageLocationRef ClassLoader::jimage_find_resource(JImageFile* jf,
+                                                    const char* module_name,
+                                                    const char* file_name,
+                                                    jlong &size) {
+  return ((*JImageFindResource)(jf, module_name, get_jimage_version_string(), file_name, &size));
+}
+
 #ifndef PRODUCT
 bool ctw_visitor(JImageFile* jimage,
         const char* module_name, const char* version, const char* package,
@@ -1066,7 +1073,7 @@
   char path[JVM_MAXPATHLEN];
   char ebuf[1024];
   void* handle = NULL;
-  if (os::dll_build_name(path, sizeof(path), Arguments::get_dll_dir(), "zip")) {
+  if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "zip")) {
     handle = os::dll_load(path, ebuf, sizeof ebuf);
   }
   if (handle == NULL) {
@@ -1104,7 +1111,7 @@
   char path[JVM_MAXPATHLEN];
   char ebuf[1024];
   void* handle = NULL;
-  if (os::dll_build_name(path, sizeof(path), Arguments::get_dll_dir(), "jimage")) {
+  if (os::dll_locate_lib(path, sizeof(path), Arguments::get_dll_dir(), "jimage")) {
     handle = os::dll_load(path, ebuf, sizeof ebuf);
   }
   if (handle == NULL) {
@@ -1434,7 +1441,6 @@
   const char* const class_name = name->as_C_string();
 
   EventMark m("loading class %s", class_name);
-  ThreadProfilerMark tpm(ThreadProfilerMark::classLoaderRegion);
 
   const char* const file_name = file_name_for_class_name(class_name,
                                                          name->utf8_length());
@@ -1459,9 +1465,6 @@
   // This would include:
   //   [--patch-module=<module>=<file>(<pathsep><file>)*]; [jimage | exploded module build]
   //
-  // DumpSharedSpaces and search_append_only are mutually exclusive and cannot
-  // be true at the same time.
-  assert(!(DumpSharedSpaces && search_append_only), "DumpSharedSpaces and search_append_only are both true");
 
   // Load Attempt #1: --patch-module
   // Determine the class' defining module.  If it appears in the _patch_mod_entries,
@@ -1507,6 +1510,11 @@
 
     e = _first_append_entry;
     while (e != NULL) {
+      if (DumpSharedSpaces && classpath_index >= _num_boot_entries) {
+        // Do not load any class from the app classpath using the boot loader. Let
+        // the built-in app class laoder load them.
+        break;
+      }
       stream = e->open_stream(file_name, CHECK_NULL);
       if (!context.check(stream, classpath_index)) {
         return NULL;
@@ -1520,9 +1528,6 @@
   }
 
   if (NULL == stream) {
-    if (DumpSharedSpaces) {
-      tty->print_cr("Preload Warning: Cannot find %s", class_name);
-    }
     return NULL;
   }
 
@@ -1548,6 +1553,100 @@
   return context.record_result(name, e, classpath_index, result, THREAD);
 }
 
+#if INCLUDE_CDS
+static char* skip_uri_protocol(char* source) {
+  if (strncmp(source, "file:", 5) == 0) {
+    // file: protocol path could start with file:/ or file:///
+    // locate the char after all the forward slashes
+    int offset = 5;
+    while (*(source + offset) == '/') {
+        offset++;
+    }
+    source += offset;
+  // for non-windows platforms, move back one char as the path begins with a '/'
+#ifndef _WINDOWS
+    source -= 1;
+#endif
+  } else if (strncmp(source, "jrt:/", 5) == 0) {
+    source += 5;
+  }
+  return source;
+}
+
+void ClassLoader::record_shared_class_loader_type(InstanceKlass* ik, const ClassFileStream* stream) {
+  assert(DumpSharedSpaces, "sanity");
+  assert(stream != NULL, "sanity");
+
+  if (ik->is_anonymous()) {
+    // We do not archive anonymous classes.
+    return;
+  }
+
+  if (stream->source() == NULL) {
+    if (ik->class_loader() == NULL) {
+      // JFR classes
+      ik->set_shared_classpath_index(0);
+      ik->set_class_loader_type(ClassLoader::BOOT_LOADER);
+    }
+    return;
+  }
+
+  assert(has_jrt_entry(), "CDS dumping does not support exploded JDK build");
+
+  ModuleEntry* module = ik->module();
+  ClassPathEntry* e = NULL;
+  int classpath_index = 0;
+
+  // Check if the class is from the runtime image
+  if (module != NULL && (module->location() != NULL) &&
+      (module->location()->starts_with("jrt:"))) {
+    e = _jrt_entry;
+    classpath_index = 0;
+  } else {
+    classpath_index = 1;
+    ResourceMark rm;
+    char* canonical_path = NEW_RESOURCE_ARRAY(char, JVM_MAXPATHLEN);
+    for (e = _first_append_entry; e != NULL; e = e->next()) {
+      if (get_canonical_path(e->name(), canonical_path, JVM_MAXPATHLEN)) {
+        char* src = (char*)stream->source();
+        // save the path from the file: protocol or the module name from the jrt: protocol
+        // if no protocol prefix is found, src is the same as stream->source() after the following call
+        src = skip_uri_protocol(src);
+        if (strcmp(canonical_path, os::native_path((char*)src)) == 0) {
+          break;
+        }
+        classpath_index ++;
+      }
+    }
+    if (e == NULL) {
+      assert(ik->shared_classpath_index() < 0,
+        "must be a class from a custom jar which isn't in the class path or boot class path");
+      return;
+    }
+  }
+
+  if (classpath_index < _num_boot_entries) {
+    // ik is either:
+    // 1) a boot class loaded from the runtime image during vm initialization (classpath_index = 0); or
+    // 2) a user's class from -Xbootclasspath/a (classpath_index > 0)
+    // In the second case, the classpath_index, classloader_type will be recorded via
+    // context.record_result() in ClassLoader::load_class(Symbol* name, bool search_append_only, TRAPS).
+    if (classpath_index > 0) {
+      return;
+    }
+  }
+
+  ResourceMark rm;
+  const char* const class_name = ik->name()->as_C_string();
+  const char* const file_name = file_name_for_class_name(class_name,
+                                                         ik->name()->utf8_length());
+  assert(file_name != NULL, "invariant");
+  Thread* THREAD = Thread::current();
+  ClassLoaderExt::Context context(class_name, file_name, CATCH);
+  context.record_result(ik->name(), e, classpath_index, ik, THREAD);
+}
+#endif // INCLUDE_CDS
+
 // Initialize the class loader's access to methods in libzip.  Parse and
 // process the boot classpath into a list ClassPathEntry objects.  Once
 // this list has been created, it must not change order (see class PackageInfo)
@@ -1632,6 +1731,7 @@
 #if INCLUDE_CDS
 void ClassLoader::initialize_shared_path() {
   if (DumpSharedSpaces) {
+    _num_boot_entries = _num_entries;
     ClassLoaderExt::setup_search_paths();
     _shared_paths_misc_info->write_jint(0); // see comments in SharedPathsMiscInfo::check()
   }
--- a/src/share/vm/classfile/classLoader.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/classLoader.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -25,6 +25,7 @@
 #ifndef SHARE_VM_CLASSFILE_CLASSLOADER_HPP
 #define SHARE_VM_CLASSFILE_CLASSLOADER_HPP
 
+#include "classfile/jimage.hpp"
 #include "runtime/orderAccess.hpp"
 #include "runtime/perfData.hpp"
 #include "utilities/exceptions.hpp"
@@ -47,6 +48,7 @@
 
 class JImageFile;
 class ClassFileStream;
+class PackageEntry;
 
 class ClassPathEntry : public CHeapObj<mtClass> {
 private:
@@ -103,7 +105,6 @@
   jlong pos;                    /* position of LOC header (if negative) or data */
 } jzentry;
 
-
 class ClassPathZipEntry: public ClassPathEntry {
  enum {
    _unknown = 0,
@@ -249,6 +250,10 @@
   //       the entries on the _first_append_entry linked list.
   static int _num_entries;
 
+  // number of entries in the boot class path including the
+  // java runtime image
+  static int _num_boot_entries;
+
   // Array of module names associated with the boot class loader
   CDS_ONLY(static GrowableArray<char*>* _boot_modules_array;)
 
@@ -289,6 +294,7 @@
   static bool get_canonical_path(const char* orig, char* out, int len);
   static const char* file_name_for_class_name(const char* class_name,
                                               int class_name_len);
+  static PackageEntry* get_package_entry(const char* class_name, ClassLoaderData* loader_data, TRAPS);
 
  public:
   static jboolean decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg);
@@ -436,7 +442,10 @@
   static void initialize_module_loader_map(JImageFile* jimage);
   static s2 classloader_type(Symbol* class_name, ClassPathEntry* e,
                              int classpath_index, TRAPS);
+  static void record_shared_class_loader_type(InstanceKlass* ik, const ClassFileStream* stream);
 #endif
+  static JImageLocationRef jimage_find_resource(JImageFile* jf, const char* module_name,
+                                                const char* file_name, jlong &size);
 
   static void  trace_class_path(const char* msg, const char* name = NULL);
 
--- a/src/share/vm/classfile/classLoaderData.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/classLoaderData.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -76,6 +76,9 @@
 #include "utilities/growableArray.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc/g1/g1SATBCardTableModRefBS.hpp"
+#endif // INCLUDE_ALL_GCS
 #if INCLUDE_TRACE
 #include "trace/tracing.hpp"
 #endif
@@ -776,6 +779,25 @@
   return OopHandle(_handles.add(h()));
 }
 
+void ClassLoaderData::remove_handle(OopHandle h) {
+  oop* ptr = h.ptr_raw();
+  if (ptr != NULL) {
+    assert(_handles.contains(ptr), "Got unexpected handle " PTR_FORMAT, p2i(ptr));
+#if INCLUDE_ALL_GCS
+    // This barrier is used by G1 to remember the old oop values, so
+    // that we don't forget any objects that were live at the snapshot at
+    // the beginning.
+    if (UseG1GC) {
+      oop obj = *ptr;
+      if (obj != NULL) {
+        G1SATBCardTableModRefBS::enqueue(obj);
+      }
+    }
+#endif
+    *ptr = NULL;
+  }
+}
+
 void ClassLoaderData::init_handle_locked(OopHandle& dest, Handle h) {
   MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
   if (dest.resolve() != NULL) {
--- a/src/share/vm/classfile/classLoaderData.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/classLoaderData.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -365,6 +365,7 @@
   const char* loader_name();
 
   OopHandle add_handle(Handle h);
+  void remove_handle(OopHandle h);
   void init_handle_locked(OopHandle& pd, Handle h);  // used for concurrent access to ModuleEntry::_pd field
   void add_class(Klass* k, bool publicize = true);
   void remove_class(Klass* k);
--- a/src/share/vm/classfile/classLoaderExt.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/classLoaderExt.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -26,6 +26,7 @@
 #define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
 
 #include "classfile/classLoader.hpp"
+#include "classfile/systemDictionary.hpp"
 #include "oops/instanceKlass.hpp"
 #include "runtime/handles.hpp"
 
@@ -56,8 +57,15 @@
       if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
 #if INCLUDE_CDS
         if (DumpSharedSpaces) {
-          s2 classloader_type = ClassLoader::classloader_type(
-                          class_name, e, classpath_index, CHECK_(result));
+          oop loader = result->class_loader();
+          s2 classloader_type = ClassLoader::BOOT_LOADER;
+          if (SystemDictionary::is_system_class_loader(loader)) {
+            classloader_type = ClassLoader::APP_LOADER;
+            ClassLoaderExt::set_has_app_classes();
+          } else if (SystemDictionary::is_platform_class_loader(loader)) {
+            classloader_type = ClassLoader::PLATFORM_LOADER;
+            ClassLoaderExt::set_has_platform_classes();
+          }
           result->set_shared_classpath_index(classpath_index);
           result->set_class_loader_type(classloader_type);
         }
@@ -82,6 +90,13 @@
    return true;
  }
   static Klass* load_one_class(ClassListParser* parser, TRAPS);
+#if INCLUDE_CDS
+  static void set_has_app_classes() {}
+  static void set_has_platform_classes() {}
+  static char* read_manifest(ClassPathEntry* entry, jint *manifest_size, TRAPS) {
+    return NULL;
+  }
+#endif
 };
 
 #endif // SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
--- a/src/share/vm/classfile/dictionary.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/dictionary.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -85,6 +85,7 @@
 
 void Dictionary::free_entry(DictionaryEntry* entry) {
   // avoid recursion when deleting linked list
+  // pd_set is accessed during a safepoint.
   while (entry->pd_set() != NULL) {
     ProtectionDomainEntry* to_delete = entry->pd_set();
     entry->set_pd_set(to_delete->next());
@@ -101,7 +102,7 @@
   if (protection_domain == instance_klass()->protection_domain()) {
     // Ensure this doesn't show up in the pd_set (invariant)
     bool in_pd_set = false;
-    for (ProtectionDomainEntry* current = _pd_set;
+    for (ProtectionDomainEntry* current = pd_set_acquire();
                                 current != NULL;
                                 current = current->next()) {
       if (current->protection_domain() == protection_domain) {
@@ -121,7 +122,7 @@
     return true;
   }
 
-  for (ProtectionDomainEntry* current = _pd_set;
+  for (ProtectionDomainEntry* current = pd_set_acquire();
                               current != NULL;
                               current = current->next()) {
     if (current->protection_domain() == protection_domain) return true;
@@ -135,12 +136,12 @@
   if (!contains_protection_domain(protection_domain())) {
     ProtectionDomainCacheEntry* entry = SystemDictionary::cache_get(protection_domain);
     ProtectionDomainEntry* new_head =
-                new ProtectionDomainEntry(entry, _pd_set);
+                new ProtectionDomainEntry(entry, pd_set());
     // Warning: Preserve store ordering.  The SystemDictionary is read
     //          without locks.  The new ProtectionDomainEntry must be
     //          complete before other threads can be allowed to see it
     //          via a store to _pd_set.
-    OrderAccess::release_store_ptr(&_pd_set, new_head);
+    release_set_pd_set(new_head);
   }
   LogTarget(Trace, protectiondomain) lt;
   if (lt.is_enabled()) {
@@ -365,11 +366,21 @@
   for (int i = 0; i < table_size(); ++i) {
     DictionaryEntry* p = bucket(i);
     while (p != NULL) {
-      DictionaryEntry* tmp;
-      tmp = p->next();
-      p->set_next(master_list);
-      master_list = p;
-      p = tmp;
+      DictionaryEntry* next = p->next();
+      InstanceKlass*ik = p->instance_klass();
+      // we cannot include signed classes in the archive because the certificates
+      // used during dump time may be different than those used during
+      // runtime (due to expiration, etc).
+      if (ik->signers() != NULL) {
+        ResourceMark rm;
+        tty->print_cr("Preload Warning: Skipping %s from signed JAR",
+                       ik->name()->as_C_string());
+        free_entry(p);
+      } else {
+        p->set_next(master_list);
+        master_list = p;
+      }
+      p = next;
     }
     set_entry(i, NULL);
   }
--- a/src/share/vm/classfile/dictionary.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/dictionary.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -29,6 +29,7 @@
 #include "classfile/systemDictionary.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/oop.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/hashtable.hpp"
 #include "utilities/ostream.hpp"
 
@@ -48,21 +49,6 @@
   DictionaryEntry* get_entry(int index, unsigned int hash, Symbol* name);
 
 protected:
-  DictionaryEntry* bucket(int i) const {
-    return (DictionaryEntry*)Hashtable<InstanceKlass*, mtClass>::bucket(i);
-  }
-
-  // The following method is not MT-safe and must be done under lock.
-  DictionaryEntry** bucket_addr(int i) {
-    return (DictionaryEntry**)Hashtable<InstanceKlass*, mtClass>::bucket_addr(i);
-  }
-
-  void add_entry(int index, DictionaryEntry* new_entry) {
-    Hashtable<InstanceKlass*, mtClass>::add_entry(index, (HashtableEntry<InstanceKlass*, mtClass>*)new_entry);
-  }
-
-  void free_entry(DictionaryEntry* entry);
-
   static size_t entry_size();
 public:
   Dictionary(ClassLoaderData* loader_data, int table_size);
@@ -106,6 +92,24 @@
 
   void print_on(outputStream* st) const;
   void verify();
+  DictionaryEntry* bucket(int i) const {
+    return (DictionaryEntry*)Hashtable<InstanceKlass*, mtClass>::bucket(i);
+  }
+
+  // The following method is not MT-safe and must be done under lock.
+  DictionaryEntry** bucket_addr(int i) {
+    return (DictionaryEntry**)Hashtable<InstanceKlass*, mtClass>::bucket_addr(i);
+  }
+
+  void add_entry(int index, DictionaryEntry* new_entry) {
+    Hashtable<InstanceKlass*, mtClass>::add_entry(index, (HashtableEntry<InstanceKlass*, mtClass>*)new_entry);
+  }
+
+  void unlink_entry(DictionaryEntry* entry) {
+    Hashtable<InstanceKlass*, mtClass>::unlink_entry((HashtableEntry<InstanceKlass*, mtClass>*)entry);
+  }
+
+  void free_entry(DictionaryEntry* entry);
 };
 
 // An entry in the class loader data dictionaries, this describes a class as
@@ -134,7 +138,7 @@
   // It is essentially a cache to avoid repeated Java up-calls to
   // ClassLoader.checkPackageAccess().
   //
-  ProtectionDomainEntry* _pd_set;
+  ProtectionDomainEntry* volatile _pd_set;
 
  public:
   // Tells whether a protection is in the approved set.
@@ -153,8 +157,15 @@
     return (DictionaryEntry**)HashtableEntry<InstanceKlass*, mtClass>::next_addr();
   }
 
-  ProtectionDomainEntry* pd_set() const { return _pd_set; }
-  void set_pd_set(ProtectionDomainEntry* pd_set) { _pd_set = pd_set; }
+  ProtectionDomainEntry* pd_set() const            { return _pd_set; }
+  void set_pd_set(ProtectionDomainEntry* new_head) {  _pd_set = new_head; }
+
+  ProtectionDomainEntry* pd_set_acquire() const    {
+    return (ProtectionDomainEntry*)OrderAccess::load_ptr_acquire(&_pd_set);
+  }
+  void release_set_pd_set(ProtectionDomainEntry* new_head) {
+    OrderAccess::release_store_ptr(&_pd_set, new_head);
+  }
 
   // Tells whether the initiating class' protection domain can access the klass in this entry
   bool is_valid_protection_domain(Handle protection_domain) {
@@ -167,7 +178,7 @@
   }
 
   void verify_protection_domain_set() {
-    for (ProtectionDomainEntry* current = _pd_set;
+    for (ProtectionDomainEntry* current = pd_set(); // accessed at a safepoint
                                 current != NULL;
                                 current = current->_next) {
       current->_pd_cache->protection_domain()->verify();
@@ -181,7 +192,7 @@
 
   void print_count(outputStream *st) {
     int count = 0;
-    for (ProtectionDomainEntry* current = _pd_set;
+    for (ProtectionDomainEntry* current = pd_set();  // accessed inside SD lock
                                 current != NULL;
                                 current = current->_next) {
       count++;
@@ -246,10 +257,6 @@
 class SymbolPropertyTable : public Hashtable<Symbol*, mtSymbol> {
   friend class VMStructs;
 private:
-  SymbolPropertyEntry* bucket(int i) {
-    return (SymbolPropertyEntry*) Hashtable<Symbol*, mtSymbol>::bucket(i);
-  }
-
   // The following method is not MT-safe and must be done under lock.
   SymbolPropertyEntry** bucket_addr(int i) {
     return (SymbolPropertyEntry**) Hashtable<Symbol*, mtSymbol>::bucket_addr(i);
@@ -303,5 +310,9 @@
   void methods_do(void f(Method*));
 
   void verify();
+
+  SymbolPropertyEntry* bucket(int i) {
+    return (SymbolPropertyEntry*) Hashtable<Symbol*, mtSymbol>::bucket(i);
+  }
 };
 #endif // SHARE_VM_CLASSFILE_DICTIONARY_HPP
--- a/src/share/vm/classfile/klassFactory.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/klassFactory.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -70,11 +70,25 @@
       ClassLoaderData* loader_data =
         ClassLoaderData::class_loader_data(class_loader());
       int path_index = ik->shared_classpath_index();
-      SharedClassPathEntry* ent =
-        (SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
+      const char* pathname;
+      if (path_index < 0) {
+        // shared classes loaded by user defined class loader
+        // do not have shared_classpath_index
+        ModuleEntry* mod_entry = ik->module();
+        if (mod_entry != NULL && (mod_entry->location() != NULL)) {
+          ResourceMark rm;
+          pathname = (const char*)(mod_entry->location()->as_C_string());
+        } else {
+          pathname = "";
+        }
+      } else {
+        SharedClassPathEntry* ent =
+          (SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
+        pathname = ent == NULL ? NULL : ent->name();
+      }
       ClassFileStream* stream = new ClassFileStream(ptr,
                                                     end_ptr - ptr,
-                                                    ent == NULL ? NULL : ent->name(),
+                                                    pathname,
                                                     ClassFileStream::verify);
       ClassFileParser parser(stream,
                              class_name,
@@ -215,8 +229,10 @@
 
   TRACE_KLASS_CREATION(result, parser, THREAD);
 
-#if INCLUDE_CDS && INCLUDE_JVMTI
+#if INCLUDE_CDS
   if (DumpSharedSpaces) {
+    ClassLoader::record_shared_class_loader_type(result, stream);
+#if INCLUDE_JVMTI
     assert(cached_class_file == NULL, "Sanity");
     // Archive the class stream data into the optional data section
     JvmtiCachedClassFileData *p;
@@ -233,8 +249,9 @@
     p->length = len;
     memcpy(p->data, bytes, len);
     result->set_archived_class_data(p);
+#endif // INCLUDE_JVMTI
   }
-#endif
+#endif // INCLUDE_CDS
 
   return result;
 }
--- a/src/share/vm/classfile/stringTable.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/stringTable.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -729,7 +729,6 @@
   }
 
   G1CollectedHeap::heap()->end_archive_alloc_range(string_space, os::vm_allocation_granularity());
-  assert(string_space->length() <= 2, "sanity");
   return true;
 }
 
--- a/src/share/vm/classfile/systemDictionary.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/systemDictionary.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -67,6 +67,7 @@
 #include "prims/resolvedMethodTable.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
+#include "runtime/arguments_ext.hpp"
 #include "runtime/biasedLocking.hpp"
 #include "runtime/fieldType.hpp"
 #include "runtime/handles.inline.hpp"
@@ -79,9 +80,8 @@
 #include "services/classLoadingService.hpp"
 #include "services/diagnosticCommand.hpp"
 #include "services/threadService.hpp"
-#include "trace/traceMacros.hpp"
+#include "trace/tracing.hpp"
 #include "utilities/macros.hpp"
-#include "utilities/ticks.hpp"
 #if INCLUDE_CDS
 #include "classfile/sharedClassUtil.hpp"
 #include "classfile/systemDictionaryShared.hpp"
@@ -89,9 +89,6 @@
 #if INCLUDE_JVMCI
 #include "jvmci/jvmciRuntime.hpp"
 #endif
-#if INCLUDE_TRACE
-#include "trace/tracing.hpp"
-#endif
 
 PlaceholderTable*      SystemDictionary::_placeholders        = NULL;
 Dictionary*            SystemDictionary::_shared_dictionary   = NULL;
@@ -645,17 +642,17 @@
   return NULL;
 }
 
-static void post_class_load_event(const Ticks& start_time,
-                                  InstanceKlass* k,
+static void post_class_load_event(EventClassLoad* event,
+                                  const InstanceKlass* k,
                                   const ClassLoaderData* init_cld) {
 #if INCLUDE_TRACE
-  EventClassLoad event(UNTIMED);
-  if (event.should_commit()) {
-    event.set_starttime(start_time);
-    event.set_loadedClass(k);
-    event.set_definingClassLoader(k->class_loader_data());
-    event.set_initiatingClassLoader(init_cld);
-    event.commit();
+  assert(event != NULL, "invariant");
+  assert(k != NULL, "invariant");
+  if (event->should_commit()) {
+    event->set_loadedClass(k);
+    event->set_definingClassLoader(k->class_loader_data());
+    event->set_initiatingClassLoader(init_cld);
+    event->commit();
   }
 #endif // INCLUDE_TRACE
 }
@@ -694,7 +691,7 @@
   assert(name != NULL && !FieldType::is_array(name) &&
          !FieldType::is_obj(name)  && !FieldType::is_valuetype(name), "invalid class name");
 
-  Ticks class_load_start_time = Ticks::now();
+  EventClassLoad class_load_start_event;
 
   HandleMark hm(THREAD);
 
@@ -914,7 +911,7 @@
             // during compilations.
             MutexLocker mu(Compile_lock, THREAD);
             update_dictionary(d_index, d_hash, p_index, p_hash,
-                              k, class_loader, THREAD);
+              k, class_loader, THREAD);
           }
 
           if (JvmtiExport::should_post_class_load()) {
@@ -940,7 +937,7 @@
     return NULL;
   }
 
-  post_class_load_event(class_load_start_time, k, loader_data);
+  post_class_load_event(&class_load_start_event, k, loader_data);
 
 #ifdef ASSERT
   {
@@ -955,12 +952,9 @@
   if (protection_domain() == NULL) return k;
 
   // Check the protection domain has the right access
-  {
-    MutexLocker mu(SystemDictionary_lock, THREAD);
-    if (dictionary->is_valid_protection_domain(d_index, d_hash, name,
-                                               protection_domain)) {
-      return k;
-    }
+  if (dictionary->is_valid_protection_domain(d_index, d_hash, name,
+                                             protection_domain)) {
+    return k;
   }
 
   // Verify protection domain. If it fails an exception is thrown
@@ -1047,14 +1041,13 @@
                                               GrowableArray<Handle>* cp_patches,
                                               TRAPS) {
 
-  Ticks class_load_start_time = Ticks::now();
+  EventClassLoad class_load_start_event;
 
   ClassLoaderData* loader_data;
   if (host_klass != NULL) {
     // Create a new CLD for anonymous class, that uses the same class loader
     // as the host_klass
     guarantee(host_klass->class_loader() == class_loader(), "should be the same");
-    guarantee(!DumpSharedSpaces, "must not create anonymous classes when dumping");
     loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL);
   } else {
     loader_data = ClassLoaderData::class_loader_data(class_loader());
@@ -1105,7 +1098,7 @@
         JvmtiExport::post_class_load((JavaThread *) THREAD, k);
     }
 
-    post_class_load_event(class_load_start_time, k, loader_data);
+    post_class_load_event(&class_load_start_event, k, loader_data);
   }
   assert(host_klass != NULL || NULL == cp_patches,
          "cp_patches only found with host_klass");
@@ -1123,6 +1116,15 @@
                                                      Handle protection_domain,
                                                      ClassFileStream* st,
                                                      TRAPS) {
+#if INCLUDE_CDS
+  ResourceMark rm(THREAD);
+  if (DumpSharedSpaces && !class_loader.is_null() &&
+      !ArgumentsExt::using_AppCDS() && strcmp(class_name->as_C_string(), "Unnamed") != 0) {
+    // If AppCDS is not enabled, don't define the class at dump time (except for the "Unnamed"
+    // class, which is used by MethodHandles).
+    THROW_MSG_NULL(vmSymbols::java_lang_ClassNotFoundException(), class_name->as_C_string());
+  }
+#endif
 
   HandleMark hm(THREAD);
 
@@ -1149,11 +1151,13 @@
  InstanceKlass* k = NULL;
 
 #if INCLUDE_CDS
-  k = SystemDictionaryShared::lookup_from_stream(class_name,
-                                                 class_loader,
-                                                 protection_domain,
-                                                 st,
-                                                 CHECK_NULL);
+  if (!DumpSharedSpaces) {
+    k = SystemDictionaryShared::lookup_from_stream(class_name,
+                                                   class_loader,
+                                                   protection_domain,
+                                                   st,
+                                                   CHECK_NULL);
+  }
 #endif
 
   if (k == NULL) {
@@ -1262,6 +1266,16 @@
          "Cannot use sharing if java.base is patched");
   ResourceMark rm;
   int path_index = ik->shared_classpath_index();
+  ClassLoaderData* loader_data = class_loader_data(class_loader);
+  if (path_index < 0) {
+    // path_index < 0 indicates that the class is intended for a custom loader
+    // and should not be loaded by boot/platform/app loaders
+    if (loader_data->is_builtin_class_loader_data()) {
+      return false;
+    } else {
+      return true;
+    }
+  }
   SharedClassPathEntry* ent =
             (SharedClassPathEntry*)FileMapInfo::shared_classpath(path_index);
   if (!Universe::is_module_initialized()) {
@@ -1275,7 +1289,6 @@
   PackageEntry* pkg_entry = NULL;
   ModuleEntry* mod_entry = NULL;
   const char* pkg_string = NULL;
-  ClassLoaderData* loader_data = class_loader_data(class_loader);
   pkg_name = InstanceKlass::package_from_name(class_name, CHECK_false);
   if (pkg_name != NULL) {
     pkg_string = pkg_name->as_C_string();
@@ -1448,6 +1461,18 @@
   }
   return ik;
 }
+
+void SystemDictionary::clear_invoke_method_table() {
+  SymbolPropertyEntry* spe = NULL;
+  for (int index = 0; index < _invoke_method_table->table_size(); index++) {
+    SymbolPropertyEntry* p = _invoke_method_table->bucket(index);
+    while (p != NULL) {
+      spe = p;
+      p = p->next();
+      _invoke_method_table->free_entry(spe);
+    }
+  }
+}
 #endif // INCLUDE_CDS
 
 InstanceKlass* SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
@@ -1494,7 +1519,6 @@
         }
       }
     } else {
-      assert(!DumpSharedSpaces, "Archive dumped after module system initialization");
       // After the module system has been initialized, check if the class'
       // package is in a module defined to the boot loader.
       if (pkg_name == NULL || pkg_entry == NULL || pkg_entry->in_unnamed_module()) {
@@ -2013,8 +2037,19 @@
   invoke_method_table()->methods_do(f);
 }
 
+class RemoveClassesClosure : public CLDClosure {
+  public:
+    void do_cld(ClassLoaderData* cld) {
+      if (cld->is_system_class_loader_data() || cld->is_platform_class_loader_data()) {
+        cld->dictionary()->remove_classes_in_error_state();
+      }
+    }
+};
+
 void SystemDictionary::remove_classes_in_error_state() {
   ClassLoaderData::the_null_class_loader_data()->dictionary()->remove_classes_in_error_state();
+  RemoveClassesClosure rcc;
+  ClassLoaderDataGraph::cld_do(&rcc);
 }
 
 // ----------------------------------------------------------------------------
@@ -2969,6 +3004,56 @@
   }
 }
 
+class CombineDictionariesClosure : public CLDClosure {
+  private:
+    Dictionary* _master_dictionary;
+  public:
+    CombineDictionariesClosure(Dictionary* master_dictionary) :
+      _master_dictionary(master_dictionary) {}
+    void do_cld(ClassLoaderData* cld) {
+      ResourceMark rm;
+      if (cld->is_system_class_loader_data() || cld->is_platform_class_loader_data()) {
+        for (int i = 0; i < cld->dictionary()->table_size(); ++i) {
+          Dictionary* curr_dictionary = cld->dictionary();
+          DictionaryEntry* p = curr_dictionary->bucket(i);
+          while (p != NULL) {
+            Symbol* name = p->instance_klass()->name();
+            unsigned int d_hash = _master_dictionary->compute_hash(name);
+            int d_index = _master_dictionary->hash_to_index(d_hash);
+            DictionaryEntry* next = p->next();
+            if (p->literal()->class_loader_data() != cld) {
+              // This is an initiating class loader entry; don't use it
+              log_trace(cds)("Skipping initiating cl entry: %s", name->as_C_string());
+              curr_dictionary->free_entry(p);
+            } else {
+              log_trace(cds)("Moved to boot dictionary: %s", name->as_C_string());
+              curr_dictionary->unlink_entry(p);
+              p->set_pd_set(NULL); // pd_set is runtime only information and will be reconstructed.
+              _master_dictionary->add_entry(d_index, p);
+            }
+            p = next;
+          }
+          *curr_dictionary->bucket_addr(i) = NULL;
+        }
+      }
+    }
+};
+
+// Combining platform and system loader dictionaries into boot loader dictionaries.
+// During run time, we only have one shared dictionary.
+void SystemDictionary::combine_shared_dictionaries() {
+  assert(DumpSharedSpaces, "dump time only");
+  Dictionary* master_dictionary = ClassLoaderData::the_null_class_loader_data()->dictionary();
+  CombineDictionariesClosure cdc(master_dictionary);
+  ClassLoaderDataGraph::cld_do(&cdc);
+
+  // These tables are no longer valid or necessary. Keeping them around will
+  // cause SystemDictionary::verify() to fail. Let's empty them.
+  _placeholders        = new PlaceholderTable(_placeholder_table_size);
+  _loader_constraints  = new LoaderConstraintTable(_loader_constraint_size);
+
+  NOT_PRODUCT(SystemDictionary::verify());
+}
 
 // caller needs ResourceMark
 const char* SystemDictionary::loader_name(const oop loader) {
--- a/src/share/vm/classfile/systemDictionary.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/classfile/systemDictionary.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -392,6 +392,7 @@
 public:
   // Sharing support.
   static void reorder_dictionary_for_sharing();
+  static void combine_shared_dictionaries();
   static size_t count_bytes_for_buckets();
   static size_t count_bytes_for_table();
   static void copy_buckets(char* top, char* end);
@@ -654,6 +655,7 @@
                                           TRAPS);
   static bool is_system_class_loader(oop class_loader);
   static bool is_platform_class_loader(oop class_loader);
+  static void clear_invoke_method_table();
 
 protected:
   static InstanceKlass* find_shared_class(Symbol* class_name);
--- a/src/share/vm/code/nmethod.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/code/nmethod.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1220,7 +1220,7 @@
     // for stack scanning.
     if (state == not_entrant) {
       mark_as_seen_on_stack();
-      OrderAccess::storestore();
+      OrderAccess::storestore(); // _stack_traversal_mark and _state
     }
 
     // Change state
--- a/src/share/vm/code/nmethod.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/code/nmethod.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -136,7 +136,7 @@
   // stack.  An not_entrant method can be removed when there are no
   // more activations, i.e., when the _stack_traversal_mark is less than
   // current sweep traversal index.
-  volatile jlong _stack_traversal_mark;
+  volatile long _stack_traversal_mark;
 
   // The _hotness_counter indicates the hotness of a method. The higher
   // the value the hotter the method. The hotness counter of a nmethod is
@@ -396,8 +396,8 @@
  public:
 
   // Sweeper support
-  jlong  stack_traversal_mark()                    { return OrderAccess::load_acquire(&_stack_traversal_mark); }
-  void  set_stack_traversal_mark(jlong l)          { OrderAccess::release_store(&_stack_traversal_mark, l); }
+  long  stack_traversal_mark()                    { return _stack_traversal_mark; }
+  void  set_stack_traversal_mark(long l)          { _stack_traversal_mark = l; }
 
   // implicit exceptions support
   address continuation_for_implicit_exception(address pc);
--- a/src/share/vm/compiler/compileBroker.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/compiler/compileBroker.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -720,44 +720,49 @@
     // At this point it may be possible that no osthread was created for the
     // JavaThread due to lack of memory. We would have to throw an exception
     // in that case. However, since this must work and we do not allow
-    // exceptions anyway, check and abort if this fails.
+    // exceptions anyway, check and abort if this fails. But first release the
+    // lock.
 
-    if (thread == NULL || thread->osthread() == NULL) {
-      vm_exit_during_initialization("java.lang.OutOfMemoryError",
-                                    os::native_thread_creation_failed_msg());
+    if (thread != NULL && thread->osthread() != NULL) {
+
+      java_lang_Thread::set_thread(thread_oop(), thread);
+
+      // Note that this only sets the JavaThread _priority field, which by
+      // definition is limited to Java priorities and not OS priorities.
+      // The os-priority is set in the CompilerThread startup code itself
+
+      java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
+
+      // Note that we cannot call os::set_priority because it expects Java
+      // priorities and we are *explicitly* using OS priorities so that it's
+      // possible to set the compiler thread priority higher than any Java
+      // thread.
+
+      int native_prio = CompilerThreadPriority;
+      if (native_prio == -1) {
+        if (UseCriticalCompilerThreadPriority) {
+          native_prio = os::java_to_os_priority[CriticalPriority];
+        } else {
+          native_prio = os::java_to_os_priority[NearMaxPriority];
+        }
+      }
+      os::set_native_priority(thread, native_prio);
+
+      java_lang_Thread::set_daemon(thread_oop());
+
+      thread->set_threadObj(thread_oop());
+      if (compiler_thread) {
+        thread->as_CompilerThread()->set_compiler(comp);
+      }
+      Threads::add(thread);
+      Thread::start(thread);
     }
+  }
 
-    java_lang_Thread::set_thread(thread_oop(), thread);
-
-    // Note that this only sets the JavaThread _priority field, which by
-    // definition is limited to Java priorities and not OS priorities.
-    // The os-priority is set in the CompilerThread startup code itself
-
-    java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
-
-    // Note that we cannot call os::set_priority because it expects Java
-    // priorities and we are *explicitly* using OS priorities so that it's
-    // possible to set the compiler thread priority higher than any Java
-    // thread.
-
-    int native_prio = CompilerThreadPriority;
-    if (native_prio == -1) {
-      if (UseCriticalCompilerThreadPriority) {
-        native_prio = os::java_to_os_priority[CriticalPriority];
-      } else {
-        native_prio = os::java_to_os_priority[NearMaxPriority];
-      }
-    }
-    os::set_native_priority(thread, native_prio);
-
-    java_lang_Thread::set_daemon(thread_oop());
-
-    thread->set_threadObj(thread_oop());
-    if (compiler_thread) {
-      thread->as_CompilerThread()->set_compiler(comp);
-    }
-    Threads::add(thread);
-    Thread::start(thread);
+  // First release lock before aborting VM.
+  if (thread == NULL || thread->osthread() == NULL) {
+    vm_exit_during_initialization("java.lang.OutOfMemoryError",
+                                  os::native_thread_creation_failed_msg());
   }
 
   // Let go of Threads_lock before yielding
--- a/src/share/vm/compiler/disassembler.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/compiler/disassembler.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,6 @@
 #include "gc/shared/collectedHeap.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/os.hpp"
 #include "runtime/stubCodeGenerator.hpp"
@@ -163,7 +162,6 @@
   bool          _print_pc;
   bool          _print_bytes;
   address       _cur_insn;
-  int           _total_ticks;
   int           _bytes_per_line; // arch-specific formatting option
 
   static bool match(const char* event, const char* tag) {
@@ -213,18 +211,6 @@
       _nm->print_code_comment_on(st, COMMENT_COLUMN, pc0, pc);
       // this calls reloc_string_for which calls oop::print_value_on
     }
-
-    // Output pc bucket ticks if we have any
-    if (total_ticks() != 0) {
-      address bucket_pc = FlatProfiler::bucket_start_for(pc);
-      if (bucket_pc != NULL && bucket_pc > pc0 && bucket_pc <= pc) {
-        int bucket_count = FlatProfiler::bucket_count_for(pc0);
-        if (bucket_count != 0) {
-          st->bol();
-          st->print_cr("%3.1f%% [%d]", bucket_count*100.0/total_ticks(), bucket_count);
-        }
-      }
-    }
     // follow each complete insn by a nice newline
     st->cr();
   }
@@ -233,8 +219,6 @@
 
   outputStream* output() { return _output; }
   address cur_insn() { return _cur_insn; }
-  int total_ticks() { return _total_ticks; }
-  void set_total_ticks(int n) { _total_ticks = n; }
   const char* options() { return _option_buf; }
 };
 
@@ -561,20 +545,6 @@
 #endif
   env.output()->print_cr("  [" PTR_FORMAT ", " PTR_FORMAT "]  " JLONG_FORMAT " bytes", p2i(p), p2i(end), ((jlong)(end - p)));
 
-  // If there has been profiling, print the buckets.
-  if (FlatProfiler::bucket_start_for(p) != NULL) {
-    unsigned char* p1 = p;
-    int total_bucket_count = 0;
-    while (p1 < end) {
-      unsigned char* p0 = p1;
-      p1 += pd_instruction_alignment();
-      address bucket_pc = FlatProfiler::bucket_start_for(p1);
-      if (bucket_pc != NULL && bucket_pc > p0 && bucket_pc <= p1)
-        total_bucket_count += FlatProfiler::bucket_count_for(p0);
-    }
-    env.set_total_ticks(total_bucket_count);
-  }
-
   // Print constant table.
   if (nm->consts_size() > 0) {
     nm->print_nmethod_labels(env.output(), nm->consts_begin());
--- a/src/share/vm/gc/g1/g1CardLiveData.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1CardLiveData.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -409,7 +409,7 @@
 
   virtual void work(uint worker_id) {
     while (true) {
-      size_t to_process = Atomic::add(1, &_cur_chunk) - 1;
+      size_t to_process = Atomic::add(1u, &_cur_chunk) - 1;
       if (to_process >= _num_chunks) {
         break;
       }
--- a/src/share/vm/gc/g1/g1CollectedHeap.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1CollectedHeap.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1719,7 +1719,6 @@
                              G1BlockOffsetTable::compute_size(g1_rs.size() / HeapWordSize),
                              G1BlockOffsetTable::heap_map_factor());
 
-  ReservedSpace cardtable_rs(G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize));
   G1RegionToSpaceMapper* cardtable_storage =
     create_aux_memory_mapper("Card Table",
                              G1SATBCardTableLoggingModRefBS::compute_size(g1_rs.size() / HeapWordSize),
--- a/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1ConcurrentMark.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -200,7 +200,7 @@
     return NULL;
   }
 
-  size_t cur_idx = Atomic::add(1, &_hwm) - 1;
+  size_t cur_idx = Atomic::add(1u, &_hwm) - 1;
   if (cur_idx >= _chunk_capacity) {
     return NULL;
   }
--- a/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1GCPhaseTimes.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -54,7 +54,6 @@
   _gc_par_phases[UniverseRoots] = new WorkerDataArray<double>(max_gc_threads, "Universe Roots (ms):");
   _gc_par_phases[JNIRoots] = new WorkerDataArray<double>(max_gc_threads, "JNI Handles Roots (ms):");
   _gc_par_phases[ObjectSynchronizerRoots] = new WorkerDataArray<double>(max_gc_threads, "ObjectSynchronizer Roots (ms):");
-  _gc_par_phases[FlatProfilerRoots] = new WorkerDataArray<double>(max_gc_threads, "FlatProfiler Roots (ms):");
   _gc_par_phases[ManagementRoots] = new WorkerDataArray<double>(max_gc_threads, "Management Roots (ms):");
   _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
   _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
--- a/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1GCPhaseTimes.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -49,7 +49,6 @@
     UniverseRoots,
     JNIRoots,
     ObjectSynchronizerRoots,
-    FlatProfilerRoots,
     ManagementRoots,
     SystemDictionaryRoots,
     CLDGRoots,
--- a/src/share/vm/gc/g1/g1HeapVerifier.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1HeapVerifier.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -62,7 +62,7 @@
       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
       if (_g1h->is_obj_dead_cond(obj, _vo)) {
         Log(gc, verify) log;
-        log.info("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
+        log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
         if (_vo == VerifyOption_G1UseMarkWord) {
           log.error("  Mark word: " PTR_FORMAT, p2i(obj->mark()));
         }
--- a/src/share/vm/gc/g1/g1HotCardCache.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1HotCardCache.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -64,7 +64,7 @@
     return card_ptr;
   }
   // Otherwise, the card is hot.
-  size_t index = Atomic::add(1, &_hot_cache_idx) - 1;
+  size_t index = Atomic::add(1u, &_hot_cache_idx) - 1;
   size_t masked_index = index & (_hot_cache_size - 1);
   jbyte* current_ptr = _hot_cache[masked_index];
 
--- a/src/share/vm/gc/g1/g1HotCardCache.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1HotCardCache.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2013, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -67,7 +67,7 @@
 
   size_t            _hot_cache_size;
 
-  int               _hot_cache_par_chunk_size;
+  size_t            _hot_cache_par_chunk_size;
 
   // Avoids false sharing when concurrently updating _hot_cache_idx or
   // _hot_cache_par_claimed_idx. These are never updated at the same time
--- a/src/share/vm/gc/g1/g1MarkSweep.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1MarkSweep.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -48,7 +48,6 @@
 #include "prims/jvmtiExport.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/biasedLocking.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/synchronizer.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/vmThread.hpp"
--- a/src/share/vm/gc/g1/g1RemSet.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1RemSet.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -243,7 +243,7 @@
 
     bool marked_as_dirty = Atomic::cmpxchg(Dirty, &_in_dirty_region_buffer[region], Clean) == Clean;
     if (marked_as_dirty) {
-      size_t allocated = Atomic::add(1, &_cur_dirty_region) - 1;
+      size_t allocated = Atomic::add(1u, &_cur_dirty_region) - 1;
       _dirty_region_buffer[allocated] = region;
     }
   }
--- a/src/share/vm/gc/g1/g1RootProcessor.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1RootProcessor.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,6 @@
 #include "gc/g1/g1RootProcessor.hpp"
 #include "gc/g1/heapRegion.inline.hpp"
 #include "memory/allocation.inline.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/mutex.hpp"
 #include "services/management.hpp"
 #include "utilities/macros.hpp"
@@ -272,13 +271,6 @@
   }
 
   {
-    G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::FlatProfilerRoots, worker_i);
-    if (!_process_strong_tasks.is_task_claimed(G1RP_PS_FlatProfiler_oops_do)) {
-      FlatProfiler::oops_do(strong_roots);
-    }
-  }
-
-  {
     G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::ManagementRoots, worker_i);
     if (!_process_strong_tasks.is_task_claimed(G1RP_PS_Management_oops_do)) {
       Management::oops_do(strong_roots);
--- a/src/share/vm/gc/g1/g1RootProcessor.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/g1/g1RootProcessor.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -57,7 +57,6 @@
     G1RP_PS_Universe_oops_do,
     G1RP_PS_JNIHandles_oops_do,
     G1RP_PS_ObjectSynchronizer_oops_do,
-    G1RP_PS_FlatProfiler_oops_do,
     G1RP_PS_Management_oops_do,
     G1RP_PS_SystemDictionary_oops_do,
     G1RP_PS_ClassLoaderDataGraph_oops_do,
--- a/src/share/vm/gc/parallel/pcTasks.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/parallel/pcTasks.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -39,7 +39,6 @@
 #include "oops/objArrayKlass.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/jniHandles.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/vmThread.hpp"
@@ -105,10 +104,6 @@
       ObjectSynchronizer::oops_do(&mark_and_push_closure);
       break;
 
-    case flat_profiler:
-      FlatProfiler::oops_do(&mark_and_push_closure);
-      break;
-
     case management:
       Management::oops_do(&mark_and_push_closure);
       break;
--- a/src/share/vm/gc/parallel/pcTasks.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/parallel/pcTasks.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -94,12 +94,11 @@
     jni_handles           = 2,
     threads               = 3,
     object_synchronizer   = 4,
-    flat_profiler         = 5,
-    management            = 6,
-    jvmti                 = 7,
-    system_dictionary     = 8,
-    class_loader_data     = 9,
-    code_cache            = 10
+    management            = 5,
+    jvmti                 = 6,
+    system_dictionary     = 7,
+    class_loader_data     = 8,
+    code_cache            = 9
   };
  private:
   RootType _root_type;
--- a/src/share/vm/gc/parallel/psMarkSweep.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/parallel/psMarkSweep.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -50,7 +50,6 @@
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/vmThread.hpp"
 #include "services/management.hpp"
@@ -514,7 +513,6 @@
     MarkingCodeBlobClosure each_active_code_blob(mark_and_push_closure(), !CodeBlobToOopClosure::FixRelocations);
     Threads::oops_do(mark_and_push_closure(), &each_active_code_blob);
     ObjectSynchronizer::oops_do(mark_and_push_closure());
-    FlatProfiler::oops_do(mark_and_push_closure());
     Management::oops_do(mark_and_push_closure());
     JvmtiExport::oops_do(mark_and_push_closure());
     SystemDictionary::always_strong_oops_do(mark_and_push_closure());
@@ -607,7 +605,6 @@
   JNIHandles::oops_do(adjust_pointer_closure());   // Global (strong) JNI handles
   Threads::oops_do(adjust_pointer_closure(), NULL);
   ObjectSynchronizer::oops_do(adjust_pointer_closure());
-  FlatProfiler::oops_do(adjust_pointer_closure());
   Management::oops_do(adjust_pointer_closure());
   JvmtiExport::oops_do(adjust_pointer_closure());
   SystemDictionary::oops_do(adjust_pointer_closure());
--- a/src/share/vm/gc/parallel/psParallelCompact.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/parallel/psParallelCompact.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -61,7 +61,6 @@
 #include "oops/oop.inline.hpp"
 #include "oops/valueArrayKlass.inline.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/safepoint.hpp"
 #include "runtime/vmThread.hpp"
 #include "services/management.hpp"
@@ -2087,7 +2086,6 @@
     // We scan the thread roots in parallel
     Threads::create_thread_roots_marking_tasks(q);
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::object_synchronizer));
-    q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::flat_profiler));
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::management));
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::system_dictionary));
     q->enqueue(new MarkFromRootsTask(MarkFromRootsTask::class_loader_data));
@@ -2170,7 +2168,6 @@
   JNIHandles::oops_do(&oop_closure);   // Global (strong) JNI handles
   Threads::oops_do(&oop_closure, NULL);
   ObjectSynchronizer::oops_do(&oop_closure);
-  FlatProfiler::oops_do(&oop_closure);
   Management::oops_do(&oop_closure);
   JvmtiExport::oops_do(&oop_closure);
   SystemDictionary::oops_do(&oop_closure);
--- a/src/share/vm/gc/parallel/psScavenge.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/parallel/psScavenge.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -49,7 +49,6 @@
 #include "logging/log.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/threadCritical.hpp"
 #include "runtime/vmThread.hpp"
@@ -381,7 +380,6 @@
       // We scan the thread roots in parallel
       Threads::create_thread_roots_tasks(q);
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
-      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
       q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
--- a/src/share/vm/gc/parallel/psTasks.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/parallel/psTasks.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -38,7 +38,6 @@
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/oop.inline.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/thread.hpp"
 #include "runtime/vmThread.hpp"
 #include "services/management.hpp"
@@ -74,10 +73,6 @@
       ObjectSynchronizer::oops_do(&roots_closure);
       break;
 
-    case flat_profiler:
-      FlatProfiler::oops_do(&roots_closure);
-      break;
-
     case system_dictionary:
       SystemDictionary::oops_do(&roots_closure);
       break;
--- a/src/share/vm/gc/parallel/psTasks.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/parallel/psTasks.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -57,12 +57,11 @@
     jni_handles           = 2,
     threads               = 3,
     object_synchronizer   = 4,
-    flat_profiler         = 5,
-    system_dictionary     = 6,
-    class_loader_data     = 7,
-    management            = 8,
-    jvmti                 = 9,
-    code_cache            = 10
+    system_dictionary     = 5,
+    class_loader_data     = 6,
+    management            = 7,
+    jvmti                 = 8,
+    code_cache            = 9
   };
  private:
   RootType _root_type;
--- a/src/share/vm/gc/serial/genMarkSweep.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/serial/genMarkSweep.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -46,7 +46,6 @@
 #include "oops/instanceRefKlass.hpp"
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/synchronizer.hpp"
 #include "runtime/thread.inline.hpp"
--- a/src/share/vm/gc/shared/genCollectedHeap.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/shared/genCollectedHeap.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -47,7 +47,6 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/biasedLocking.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/handles.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
@@ -71,7 +70,6 @@
   GCH_PS_Universe_oops_do,
   GCH_PS_JNIHandles_oops_do,
   GCH_PS_ObjectSynchronizer_oops_do,
-  GCH_PS_FlatProfiler_oops_do,
   GCH_PS_Management_oops_do,
   GCH_PS_SystemDictionary_oops_do,
   GCH_PS_ClassLoaderDataGraph_oops_do,
@@ -606,9 +604,6 @@
   if (!_process_strong_tasks->is_task_claimed(GCH_PS_ObjectSynchronizer_oops_do)) {
     ObjectSynchronizer::oops_do(strong_roots);
   }
-  if (!_process_strong_tasks->is_task_claimed(GCH_PS_FlatProfiler_oops_do)) {
-    FlatProfiler::oops_do(strong_roots);
-  }
   if (!_process_strong_tasks->is_task_claimed(GCH_PS_Management_oops_do)) {
     Management::oops_do(strong_roots);
   }
--- a/src/share/vm/gc/shared/vmGCOperations.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/gc/shared/vmGCOperations.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
 #include "gc/shared/gcLocker.inline.hpp"
 #include "gc/shared/genCollectedHeap.hpp"
 #include "gc/shared/vmGCOperations.hpp"
+#include "interpreter/oopMapCache.hpp"
 #include "logging/log.hpp"
 #include "memory/oopFactory.hpp"
 #include "runtime/handles.inline.hpp"
@@ -111,6 +112,9 @@
 
 void VM_GC_Operation::doit_epilogue() {
   assert(Thread::current()->is_Java_thread(), "just checking");
+  // Clean up old interpreter OopMap entries that were replaced
+  // during the GC thread root traversal.
+  OopMapCache::cleanup_old_entries();
   if (Universe::has_reference_pending_list()) {
     Heap_lock->notify_all();
   }
--- a/src/share/vm/interpreter/oopMapCache.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/interpreter/oopMapCache.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "interpreter/oopMapCache.hpp"
 #include "logging/log.hpp"
+#include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
@@ -37,6 +38,9 @@
   friend class OopMapCache;
   friend class VerifyClosure;
 
+ private:
+  OopMapCacheEntry* _next;
+
  protected:
   // Initialization
   void fill(const methodHandle& method, int bci);
@@ -54,8 +58,9 @@
 
  public:
   OopMapCacheEntry() : InterpreterOopMap() {
+    _next = NULL;
 #ifdef ASSERT
-     _resource_allocate_bit_mask = false;
+    _resource_allocate_bit_mask = false;
 #endif
   }
 };
@@ -264,23 +269,26 @@
 
   // Check if map is generated correctly
   // (Use ?: operator to make sure all 'true' & 'false' are represented exactly the same so we can use == afterwards)
-  if (TraceOopMapGeneration && Verbose) tty->print("Locals (%d): ", max_locals);
+  Log(interpreter, oopmap) logv;
+  LogStream st(logv.trace());
 
+  st.print("Locals (%d): ", max_locals);
   for(int i = 0; i < max_locals; i++) {
     bool v1 = is_oop(i)               ? true : false;
     bool v2 = vars[i].is_reference() || vars[i].is_valuetype() ? true : false;
     assert(v1 == v2, "locals oop mask generation error");
-    if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
+    st.print("%d", v1 ? 1 : 0);
   }
+  st.cr();
 
-  if (TraceOopMapGeneration && Verbose) { tty->cr(); tty->print("Stack (%d): ", stack_top); }
+  st.print("Stack (%d): ", stack_top);
   for(int j = 0; j < stack_top; j++) {
     bool v1 = is_oop(max_locals + j)  ? true : false;
     bool v2 = stack[j].is_reference() || stack[j].is_valuetype( )? true : false;
     assert(v1 == v2, "stack oop mask generation error");
-    if (TraceOopMapGeneration && Verbose) tty->print("%d", v1 ? 1 : 0);
+    st.print("%d", v1 ? 1 : 0);
   }
-  if (TraceOopMapGeneration && Verbose) tty->cr();
+  st.cr();
   return true;
 }
 
@@ -375,8 +383,6 @@
 
   // verify bit mask
   assert(verify_mask(vars, stack, max_locals, stack_top), "mask could not be verified");
-
-
 }
 
 void OopMapCacheEntry::flush() {
@@ -387,16 +393,6 @@
 
 // Implementation of OopMapCache
 
-#ifndef PRODUCT
-
-static long _total_memory_usage = 0;
-
-long OopMapCache::memory_usage() {
-  return _total_memory_usage;
-}
-
-#endif
-
 void InterpreterOopMap::resource_copy(OopMapCacheEntry* from) {
   assert(_resource_allocate_bit_mask,
     "Should not resource allocate the _bit_mask");
@@ -437,15 +433,11 @@
          ^ ((unsigned int) method->size_of_parameters() << 6);
 }
 
+OopMapCacheEntry* volatile OopMapCache::_old_entries = NULL;
 
-OopMapCache::OopMapCache() :
-  _mut(Mutex::leaf, "An OopMapCache lock", true, Monitor::_safepoint_check_never)
-{
-  _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry, _size, mtClass);
-  // Cannot call flush for initialization, since flush
-  // will check if memory should be deallocated
-  for(int i = 0; i < _size; i++) _array[i].initialize();
-  NOT_PRODUCT(_total_memory_usage += sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
+OopMapCache::OopMapCache() {
+  _array  = NEW_C_HEAP_ARRAY(OopMapCacheEntry*, _size, mtClass);
+  for(int i = 0; i < _size; i++) _array[i] = NULL;
 }
 
 
@@ -454,112 +446,152 @@
   // Deallocate oop maps that are allocated out-of-line
   flush();
   // Deallocate array
-  NOT_PRODUCT(_total_memory_usage -= sizeof(OopMapCache) + (sizeof(OopMapCacheEntry) * _size);)
-  FREE_C_HEAP_ARRAY(OopMapCacheEntry, _array);
+  FREE_C_HEAP_ARRAY(OopMapCacheEntry*, _array);
 }
 
 OopMapCacheEntry* OopMapCache::entry_at(int i) const {
-  return &_array[i % _size];
+  return (OopMapCacheEntry*)OrderAccess::load_ptr_acquire(&(_array[i % _size]));
+}
+
+bool OopMapCache::put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old) {
+  return Atomic::cmpxchg_ptr (entry, &_array[i % _size], old) == old;
 }
 
 void OopMapCache::flush() {
-  for (int i = 0; i < _size; i++) _array[i].flush();
+  for (int i = 0; i < _size; i++) {
+    OopMapCacheEntry* entry = _array[i];
+    if (entry != NULL) {
+      _array[i] = NULL;  // no barrier, only called in OopMapCache destructor
+      entry->flush();
+      FREE_C_HEAP_OBJ(entry);
+    }
+  }
 }
 
 void OopMapCache::flush_obsolete_entries() {
-  for (int i = 0; i < _size; i++)
-    if (!_array[i].is_empty() && _array[i].method()->is_old()) {
+  assert(SafepointSynchronize::is_at_safepoint(), "called by RedefineClasses in a safepoint");
+  for (int i = 0; i < _size; i++) {
+    OopMapCacheEntry* entry = _array[i];
+    if (entry != NULL && !entry->is_empty() && entry->method()->is_old()) {
       // Cache entry is occupied by an old redefined method and we don't want
       // to pin it down so flush the entry.
       if (log_is_enabled(Debug, redefine, class, oopmap)) {
         ResourceMark rm;
-        log_debug(redefine, class, oopmap)
+        log_debug(redefine, class, interpreter, oopmap)
           ("flush: %s(%s): cached entry @%d",
-           _array[i].method()->name()->as_C_string(), _array[i].method()->signature()->as_C_string(), i);
+           entry->method()->name()->as_C_string(), entry->method()->signature()->as_C_string(), i);
       }
-      _array[i].flush();
+      _array[i] = NULL;
+      entry->flush();
+      FREE_C_HEAP_OBJ(entry);
     }
+  }
 }
 
+// Called by GC for thread root scan during a safepoint only.  The other interpreted frame oopmaps
+// are generated locally and not cached.
 void OopMapCache::lookup(const methodHandle& method,
                          int bci,
-                         InterpreterOopMap* entry_for) const {
-  MutexLockerEx x(&_mut, Mutex::_no_safepoint_check_flag);
+                         InterpreterOopMap* entry_for) {
+  assert(SafepointSynchronize::is_at_safepoint(), "called by GC in a safepoint");
+  int probe = hash_value_for(method, bci);
+  int i;
+  OopMapCacheEntry* entry = NULL;
 
-  OopMapCacheEntry* entry = NULL;
-  int probe = hash_value_for(method, bci);
+  if (log_is_enabled(Debug, interpreter, oopmap)) {
+    static int count = 0;
+    ResourceMark rm;
+    log_debug(interpreter, oopmap)
+          ("%d - Computing oopmap at bci %d for %s at hash %d", ++count, bci,
+           method()->name_and_sig_as_C_string(), probe);
+  }
 
   // Search hashtable for match
-  int i;
   for(i = 0; i < _probe_depth; i++) {
     entry = entry_at(probe + i);
-    if (entry->match(method, bci)) {
+    if (entry != NULL && !entry->is_empty() && entry->match(method, bci)) {
       entry_for->resource_copy(entry);
       assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
+      log_debug(interpreter, oopmap)("- found at hash %d", probe + i);
       return;
     }
   }
 
-  if (TraceOopMapGeneration) {
-    static int count = 0;
-    ResourceMark rm;
-    tty->print("%d - Computing oopmap at bci %d for ", ++count, bci);
-    method->print_value(); tty->cr();
-  }
+  // Entry is not in hashtable.
+  // Compute entry
 
-  // Entry is not in hashtable.
-  // Compute entry and return it
+  OopMapCacheEntry* tmp = NEW_C_HEAP_OBJ(OopMapCacheEntry, mtClass);
+  tmp->initialize();
+  tmp->fill(method, bci);
+  entry_for->resource_copy(tmp);
 
   if (method->should_not_be_cached()) {
     // It is either not safe or not a good idea to cache this Method*
     // at this time. We give the caller of lookup() a copy of the
     // interesting info via parameter entry_for, but we don't add it to
     // the cache. See the gory details in Method*.cpp.
-    compute_one_oop_map(method, bci, entry_for);
+    FREE_C_HEAP_OBJ(tmp);
     return;
   }
 
   // First search for an empty slot
   for(i = 0; i < _probe_depth; i++) {
-    entry  = entry_at(probe + i);
-    if (entry->is_empty()) {
-      entry->fill(method, bci);
-      entry_for->resource_copy(entry);
-      assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
-      return;
+    entry = entry_at(probe + i);
+    if (entry == NULL) {
+      if (put_at(probe + i, tmp, NULL)) {
+        assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
+        return;
+      }
     }
   }
 
-  if (TraceOopMapGeneration) {
-    ResourceMark rm;
-    tty->print_cr("*** collision in oopmap cache - flushing item ***");
+  log_debug(interpreter, oopmap)("*** collision in oopmap cache - flushing item ***");
+
+  // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
+  // where the first entry in the collision array is replaced with the new one.
+  OopMapCacheEntry* old = entry_at(probe + 0);
+  if (put_at(probe + 0, tmp, old)) {
+    enqueue_for_cleanup(old);
+  } else {
+    enqueue_for_cleanup(tmp);
   }
 
-  // No empty slot (uncommon case). Use (some approximation of a) LRU algorithm
-  //entry_at(probe + _probe_depth - 1)->flush();
-  //for(i = _probe_depth - 1; i > 0; i--) {
-  //  // Coping entry[i] = entry[i-1];
-  //  OopMapCacheEntry *to   = entry_at(probe + i);
-  //  OopMapCacheEntry *from = entry_at(probe + i - 1);
-  //  to->copy(from);
-  // }
+  assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
+  return;
+}
 
-  assert(method->is_method(), "gaga");
+void OopMapCache::enqueue_for_cleanup(OopMapCacheEntry* entry) {
+  bool success = false;
+  OopMapCacheEntry* head;
+  do {
+    head = _old_entries;
+    entry->_next = head;
+    success = Atomic::cmpxchg_ptr (entry, &_old_entries, head) == head;
+  } while (!success);
 
-  entry = entry_at(probe + 0);
-  entry->fill(method, bci);
+  if (log_is_enabled(Debug, interpreter, oopmap)) {
+    ResourceMark rm;
+    log_debug(interpreter, oopmap)("enqueue %s at bci %d for cleanup",
+                          entry->method()->name_and_sig_as_C_string(), entry->bci());
+  }
+}
 
-  // Copy the  newly cached entry to input parameter
-  entry_for->resource_copy(entry);
-
-  if (TraceOopMapGeneration) {
-    ResourceMark rm;
-    tty->print("Done with ");
-    method->print_value(); tty->cr();
+// This is called after GC threads are done and nothing is accessing the old_entries
+// list, so no synchronization needed.
+void OopMapCache::cleanup_old_entries() {
+  OopMapCacheEntry* entry = _old_entries;
+  _old_entries = NULL;
+  while (entry != NULL) {
+    if (log_is_enabled(Debug, interpreter, oopmap)) {
+      ResourceMark rm;
+      log_debug(interpreter, oopmap)("cleanup entry %s at bci %d",
+                          entry->method()->name_and_sig_as_C_string(), entry->bci());
+    }
+    OopMapCacheEntry* next = entry->_next;
+    entry->flush();
+    FREE_C_HEAP_OBJ(entry);
+    entry = next;
   }
-  assert(!entry_for->is_empty(), "A non-empty oop map should be returned");
-
-  return;
 }
 
 void OopMapCache::compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry) {
--- a/src/share/vm/interpreter/oopMapCache.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/interpreter/oopMapCache.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -144,17 +144,19 @@
 };
 
 class OopMapCache : public CHeapObj<mtClass> {
+ static OopMapCacheEntry* volatile _old_entries;
  private:
   enum { _size        = 32,     // Use fixed size for now
          _probe_depth = 3       // probe depth in case of collisions
   };
 
-  OopMapCacheEntry* _array;
+  OopMapCacheEntry* volatile * _array;
 
   unsigned int hash_value_for(const methodHandle& method, int bci) const;
   OopMapCacheEntry* entry_at(int i) const;
+  bool put_at(int i, OopMapCacheEntry* entry, OopMapCacheEntry* old);
 
-  mutable Mutex _mut;
+  static void enqueue_for_cleanup(OopMapCacheEntry* entry);
 
   void flush();
 
@@ -167,13 +169,11 @@
 
   // Returns the oopMap for (method, bci) in parameter "entry".
   // Returns false if an oop map was not found.
-  void lookup(const methodHandle& method, int bci, InterpreterOopMap* entry) const;
+  void lookup(const methodHandle& method, int bci, InterpreterOopMap* entry);
 
   // Compute an oop map without updating the cache or grabbing any locks (for debugging)
   static void compute_one_oop_map(const methodHandle& method, int bci, InterpreterOopMap* entry);
-
-  // Returns total no. of bytes allocated as part of OopMapCache's
-  static long memory_usage()                     PRODUCT_RETURN0;
+  static void cleanup_old_entries();
 };
 
 #endif // SHARE_VM_INTERPRETER_OOPMAPCACHE_HPP
--- a/src/share/vm/interpreter/rewriter.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/interpreter/rewriter.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -109,6 +109,11 @@
     MetadataFactory::free_metadata(loader_data, cache);
     _pool->set_cache(NULL);  // so the verifier isn't confused
   }
+
+  DEBUG_ONLY(
+  if (DumpSharedSpaces) {
+    cache->verify_just_initialized();
+  })
 }
 
 
--- a/src/share/vm/logging/logTag.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/logging/logTag.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -74,6 +74,7 @@
   LOG_TAG(iklass) \
   LOG_TAG(init) \
   LOG_TAG(inlining) \
+  LOG_TAG(interpreter) \
   LOG_TAG(itables) \
   LOG_TAG(jit) \
   LOG_TAG(jni) \
@@ -139,6 +140,7 @@
   LOG_TAG(timer) \
   LOG_TAG(update) \
   LOG_TAG(unload) /* Trace unloading of classes */ \
+  LOG_TAG(unshareable) \
   LOG_TAG(verification) \
   LOG_TAG(verify) \
   LOG_TAG(vmoperation) \
--- a/src/share/vm/memory/metaspaceClosure.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/memory/metaspaceClosure.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -275,7 +275,8 @@
       address, bool,
       UniqueMetaspaceClosure::my_hash,   // solaris compiler doesn't like: primitive_hash<address>
       UniqueMetaspaceClosure::my_equals, // solaris compiler doesn't like: primitive_equals<address>
-    16384> _has_been_visited;
+      15889,                             // prime number
+      ResourceObj::C_HEAP> _has_been_visited;
 };
 
 #endif // SHARE_VM_MEMORY_METASPACE_ITERATOR_HPP
--- a/src/share/vm/memory/metaspaceShared.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/memory/metaspaceShared.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -374,25 +374,63 @@
 // Global object for holding classes that have been loaded.  Since this
 // is run at a safepoint just before exit, this is the entire set of classes.
 static GrowableArray<Klass*>* _global_klass_objects;
+
+static void collect_array_classes(Klass* k) {
+  _global_klass_objects->append_if_missing(k);
+  if (k->is_array_klass()) {
+    // Add in the array classes too
+    ArrayKlass* ak = ArrayKlass::cast(k);
+    Klass* h = ak->higher_dimension();
+    if (h != NULL) {
+      h->array_klasses_do(collect_array_classes);
+    }
+  }
+}
+
 class CollectClassesClosure : public KlassClosure {
   void do_klass(Klass* k) {
     if (!(k->is_instance_klass() && InstanceKlass::cast(k)->is_in_error_state())) {
       _global_klass_objects->append_if_missing(k);
     }
+    if (k->is_array_klass()) {
+      // Add in the array classes too
+      ArrayKlass* ak = ArrayKlass::cast(k);
+      Klass* h = ak->higher_dimension();
+      if (h != NULL) {
+        h->array_klasses_do(collect_array_classes);
+      }
+    }
   }
 };
 
 static void remove_unshareable_in_classes() {
   for (int i = 0; i < _global_klass_objects->length(); i++) {
     Klass* k = _global_klass_objects->at(i);
-    k->remove_unshareable_info();
+    if (!k->is_objArray_klass()) {
+      // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
+      // on their array classes.
+      assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
+      k->remove_unshareable_info();
+    }
+  }
+}
+
+static void remove_java_mirror_in_classes() {
+  for (int i = 0; i < _global_klass_objects->length(); i++) {
+    Klass* k = _global_klass_objects->at(i);
+    if (!k->is_objArray_klass()) {
+      // InstanceKlass and TypeArrayKlass will in turn call remove_unshareable_info
+      // on their array classes.
+      assert(k->is_instance_klass() || k->is_typeArray_klass(), "must be");
+      k->remove_java_mirror();
+    }
   }
 }
 
 static void rewrite_nofast_bytecode(Method* method) {
-  RawBytecodeStream bcs(method);
+  BytecodeStream bcs(method);
   while (!bcs.is_last_bytecode()) {
-    Bytecodes::Code opcode = bcs.raw_next();
+    Bytecodes::Code opcode = bcs.next();
     switch (opcode) {
     case Bytecodes::_getfield:      *bcs.bcp() = Bytecodes::_nofast_getfield;      break;
     case Bytecodes::_putfield:      *bcs.bcp() = Bytecodes::_nofast_putfield;      break;
@@ -446,6 +484,17 @@
   }
 }
 
+NOT_PRODUCT(
+static void assert_not_anonymous_class(InstanceKlass* k) {
+  assert(!(k->is_anonymous()), "cannot archive anonymous classes");
+}
+
+// Anonymous classes are not stored inside any dictionaries. They are created by
+// SystemDictionary::parse_stream() with a non-null host_klass.
+static void assert_no_anonymoys_classes_in_dictionaries() {
+  ClassLoaderDataGraph::dictionary_classes_do(assert_not_anonymous_class);
+})
+
 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
 //
@@ -957,8 +1006,8 @@
     }
     memcpy(p, obj, bytes);
     bool isnew = _new_loc_table->put(obj, (address)p);
+    log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
     assert(isnew, "must be");
-    log_trace(cds)("Copy: " PTR_FORMAT " ==> " PTR_FORMAT " %d", p2i(obj), p2i(p), bytes);
 
     _alloc_stats->record(ref->msotype(), int(newtop - oldtop), read_only);
     if (ref->msotype() == MetaspaceObj::SymbolType) {
@@ -1151,6 +1200,9 @@
   // Reorder the system dictionary. Moving the symbols affects
   // how the hash table indices are calculated.
   SystemDictionary::reorder_dictionary_for_sharing();
+  tty->print("Removing java_mirror ... ");
+  remove_java_mirror_in_classes();
+  tty->print_cr("done. ");
   NOT_PRODUCT(SystemDictionary::verify();)
 
   size_t buckets_bytes = SystemDictionary::count_bytes_for_buckets();
@@ -1218,11 +1270,20 @@
   rewrite_nofast_bytecodes_and_calculate_fingerprints();
   tty->print_cr("done. ");
 
+  // Move classes from platform/system dictionaries into the boot dictionary
+  SystemDictionary::combine_shared_dictionaries();
+
   // Remove all references outside the metadata
   tty->print("Removing unshareable information ... ");
   remove_unshareable_in_classes();
   tty->print_cr("done. ");
 
+  // We don't support archiving anonymous classes. Verify that they are not stored in
+  // the any dictionaries.
+  NOT_PRODUCT(assert_no_anonymoys_classes_in_dictionaries());
+
+  SystemDictionaryShared::finalize_verification_constraints();
+
   ArchiveCompactor::initialize();
   ArchiveCompactor::copy_and_compact();
 
@@ -1312,6 +1373,14 @@
     ArchiveCompactor::alloc_stats()->print_stats(int(_ro_region.used()), int(_rw_region.used()),
                                                  int(_mc_region.used()), int(_md_region.used()));
   }
+
+  if (PrintSystemDictionaryAtExit) {
+    SystemDictionary::print();
+  }
+  // There may be other pending VM operations that operate on the InstanceKlasses,
+  // which will fail because InstanceKlasses::remove_unshareable_info()
+  // has been called. Forget these operations and exit the VM directly.
+  vm_direct_exit(0);
 }
 
 void VM_PopulateDumpSharedSpace::print_region_stats() {
@@ -1438,10 +1507,6 @@
       exit(1);
     }
   }
-
-  // Copy the verification constraints from C_HEAP-alloced GrowableArrays to RO-alloced
-  // Arrays
-  SystemDictionaryShared::finalize_verification_constraints();
 }
 
 void MetaspaceShared::prepare_for_dumping() {
@@ -1509,17 +1574,11 @@
     link_and_cleanup_shared_classes(CATCH);
     tty->print_cr("Rewriting and linking classes: done");
 
+    SystemDictionary::clear_invoke_method_table();
+
     VM_PopulateDumpSharedSpace op;
     VMThread::execute(&op);
   }
-
-  if (PrintSystemDictionaryAtExit) {
-    SystemDictionary::print();
-  }
-
-  // Since various initialization steps have been undone by this process,
-  // it is not reasonable to continue running a java process.
-  exit(0);
 }
 
 
@@ -1529,8 +1588,14 @@
 
     while (parser.parse_one_line()) {
       Klass* klass = ClassLoaderExt::load_one_class(&parser, THREAD);
-
-      CLEAR_PENDING_EXCEPTION;
+      if (HAS_PENDING_EXCEPTION) {
+        if (klass == NULL &&
+             (PENDING_EXCEPTION->klass()->name() == vmSymbols::java_lang_ClassNotFoundException())) {
+          // print a warning only when the pending exception is class not found
+          tty->print_cr("Preload Warning: Cannot find %s", parser.current_class_name());
+        }
+        CLEAR_PENDING_EXCEPTION;
+      }
       if (klass != NULL) {
         if (log_is_enabled(Trace, cds)) {
           ResourceMark rm;
@@ -1613,6 +1678,8 @@
     tty->print_cr("Dumping objects to open archive heap region ...");
     _open_archive_heap_regions = new GrowableArray<MemRegion>(2);
     MetaspaceShared::dump_open_archive_heap_objects(_open_archive_heap_regions);
+
+    MetaspaceShared::destroy_archive_object_cache();
   }
 
   G1HeapVerifier::verify_archive_regions();
--- a/src/share/vm/memory/metaspaceShared.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/memory/metaspaceShared.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -29,6 +29,7 @@
 #include "memory/allocation.hpp"
 #include "memory/memRegion.hpp"
 #include "memory/virtualspace.hpp"
+#include "oops/oop.inline.hpp"
 #include "utilities/exceptions.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/resourceHash.hpp"
@@ -96,11 +97,16 @@
     return p1 == p2;
   }
   static unsigned obj_hash(oop const& p) {
-    unsigned hash = (unsigned)((uintptr_t)&p);
-    return hash ^ (hash >> LogMinObjAlignment);
+    assert(!p->mark()->has_bias_pattern(),
+           "this object should never have been locked");  // so identity_hash won't safepoin
+    unsigned hash = (unsigned)p->identity_hash();
+    return hash;
   }
   typedef ResourceHashtable<oop, oop,
-      MetaspaceShared::obj_hash, MetaspaceShared::obj_equals> ArchivedObjectCache;
+      MetaspaceShared::obj_hash,
+      MetaspaceShared::obj_equals,
+      15889, // prime number
+      ResourceObj::C_HEAP> ArchivedObjectCache;
   static ArchivedObjectCache* _archive_object_cache;
 
  public:
@@ -115,7 +121,10 @@
     NOT_CDS_JAVA_HEAP(return false;)
   }
   static void create_archive_object_cache() {
-    CDS_JAVA_HEAP_ONLY(_archive_object_cache = new ArchivedObjectCache(););
+    CDS_JAVA_HEAP_ONLY(_archive_object_cache = new (ResourceObj::C_HEAP, mtClass)ArchivedObjectCache(););
+  }
+  static void destroy_archive_object_cache() {
+    CDS_JAVA_HEAP_ONLY(delete _archive_object_cache; _archive_object_cache = NULL;);
   }
   static void fixup_mapped_heap_regions() NOT_CDS_JAVA_HEAP_RETURN;
 
--- a/src/share/vm/memory/universe.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/memory/universe.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -63,7 +63,6 @@
 #include "runtime/atomic.hpp"
 #include "runtime/commandLineFlagConstraintList.hpp"
 #include "runtime/deoptimization.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/java.hpp"
--- a/src/share/vm/memory/virtualspace.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/memory/virtualspace.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -582,7 +582,7 @@
   assert(markOopDesc::encode_pointer_as_mark(&_base[size])->decode_pointer() == &_base[size],
          "area must be distinguishable from marks for mark-sweep");
 
-  if (base() > 0) {
+  if (base() != NULL) {
     MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap);
   }
 }
--- a/src/share/vm/oops/arrayKlass.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/arrayKlass.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -224,12 +224,29 @@
 
 void ArrayKlass::remove_unshareable_info() {
   Klass::remove_unshareable_info();
+  if (_higher_dimension != NULL) {
+    ArrayKlass *ak = ArrayKlass::cast(higher_dimension());
+    ak->remove_unshareable_info();
+  }
+}
+
+void ArrayKlass::remove_java_mirror() {
+  Klass::remove_java_mirror();
+  if (_higher_dimension != NULL) {
+    ArrayKlass *ak = ArrayKlass::cast(higher_dimension());
+    ak->remove_java_mirror();
+  }
 }
 
 void ArrayKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
   assert(loader_data == ClassLoaderData::the_null_class_loader_data(), "array classes belong to null loader");
   Klass::restore_unshareable_info(loader_data, protection_domain, CHECK);
   // Klass recreates the component mirror also
+
+  if (_higher_dimension != NULL) {
+    ArrayKlass *ak = ArrayKlass::cast(higher_dimension());
+    ak->restore_unshareable_info(loader_data, protection_domain, CHECK);
+  }
 }
 
 // Printing
--- a/src/share/vm/oops/arrayKlass.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/arrayKlass.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -147,6 +147,7 @@
 
   // CDS support - remove and restore oops from metadata. Oops are not shared.
   virtual void remove_unshareable_info();
+  virtual void remove_java_mirror();
   virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
 
   // Printing
--- a/src/share/vm/oops/constantPool.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/constantPool.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -89,8 +89,6 @@
 
 void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
   if (cache() != NULL) {
-    MetadataFactory::free_array<u2>(loader_data, reference_map());
-    set_reference_map(NULL);
     MetadataFactory::free_metadata(loader_data, cache());
     set_cache(NULL);
   }
@@ -269,10 +267,14 @@
   }
 
   objArrayOop rr = resolved_references();
+  Array<u2>* ref_map = reference_map();
   if (rr != NULL) {
-    for (int i = 0; i < rr->length(); i++) {
+    int ref_map_len = ref_map == NULL ? 0 : ref_map->length();
+    int rr_len = rr->length();
+    for (int i = 0; i < rr_len; i++) {
       oop p = rr->obj_at(i);
-      if (p != NULL) {
+      rr->obj_at_put(i, NULL);
+      if (p != NULL && i < ref_map_len) {
         int index = object_to_cp_index(i);
         // Skip the entry if the string hash code is 0 since the string
         // is not included in the shared string_table, see StringTable::copy_shared_string.
@@ -283,11 +285,10 @@
           // have a 'bad' reference in the archived resolved_reference
           // array.
           rr->obj_at_put(i, op);
-        } else {
-          rr->obj_at_put(i, NULL);
         }
       }
     }
+
     oop archived = MetaspaceShared::archive_heap_object(rr, THREAD);
     _cache->set_archived_references(archived);
     set_resolved_references(NULL);
@@ -358,6 +359,26 @@
   // class redefinition. Since shared ConstantPools cannot be deallocated anyway,
   // we always set _on_stack to true to avoid having to change _flags during runtime.
   _flags |= (_on_stack | _is_shared);
+  int num_klasses = 0;
+  for (int index = 1; index < length(); index++) { // Index 0 is unused
+    assert(!tag_at(index).is_unresolved_klass_in_error(), "This must not happen during dump time");
+    if (tag_at(index).is_klass()) {
+      // This class was resolved as a side effect of executing Java code
+      // during dump time. We need to restore it back to an UnresolvedClass,
+      // so that the proper class loading and initialization can happen
+      // at runtime.
+      CPKlassSlot kslot = klass_slot_at(index);
+      int resolved_klass_index = kslot.resolved_klass_index();
+      int name_index = kslot.name_index();
+      assert(tag_at(name_index).is_symbol(), "sanity");
+      resolved_klasses()->at_put(resolved_klass_index, NULL);
+      tag_at_put(index, JVM_CONSTANT_UnresolvedClass);
+      assert(klass_name_at(index) == symbol_at(name_index), "sanity");
+    }
+  }
+  if (cache() != NULL) {
+    cache()->remove_unshareable_info();
+  }
 }
 
 int ConstantPool::cp_to_object_index(int cp_index) {
--- a/src/share/vm/oops/cpCache.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/cpCache.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -23,9 +23,12 @@
  */
 
 #include "precompiled.hpp"
+#include "interpreter/bytecodeStream.hpp"
+#include "interpreter/bytecodes.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/rewriter.hpp"
 #include "logging/log.hpp"
+#include "memory/metadataFactory.hpp"
 #include "memory/metaspaceClosure.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.inline.hpp"
@@ -48,6 +51,24 @@
   assert(constant_pool_index() == index, "");
 }
 
+void ConstantPoolCacheEntry::verify_just_initialized(bool f2_used) {
+  assert((_indices & (~cp_index_mask)) == 0, "sanity");
+  assert(_f1 == NULL, "sanity");
+  assert(_flags == 0, "sanity");
+  if (!f2_used) {
+    assert(_f2 == 0, "sanity");
+  }
+}
+
+void ConstantPoolCacheEntry::reinitialize(bool f2_used) {
+  _indices &= cp_index_mask;
+  _f1 = NULL;
+  _flags = 0;
+  if (!f2_used) {
+    _f2 = 0;
+  }
+}
+
 int ConstantPoolCacheEntry::make_flags(TosState state,
                                        int option_bits,
                                        int field_index_or_method_params) {
@@ -609,6 +630,74 @@
   }
 }
 
+void ConstantPoolCache::verify_just_initialized() {
+  DEBUG_ONLY(walk_entries_for_initialization(/*check_only = */ true));
+}
+
+void ConstantPoolCache::remove_unshareable_info() {
+  walk_entries_for_initialization(/*check_only = */ false);
+}
+
+void ConstantPoolCache::walk_entries_for_initialization(bool check_only) {
+  assert(DumpSharedSpaces, "sanity");
+  // When dumping the archive, we want to clean up the ConstantPoolCache
+  // to remove any effect of linking due to the execution of Java code --
+  // each ConstantPoolCacheEntry will have the same contents as if
+  // ConstantPoolCache::initialize has just returned:
+  //
+  // - We keep the ConstantPoolCache::constant_pool_index() bits for all entries.
+  // - We keep the "f2" field for entries used by invokedynamic and invokehandle
+  // - All other bits in the entries are cleared to zero.
+  ResourceMark rm;
+
+  InstanceKlass* ik = constant_pool()->pool_holder();
+  bool* f2_used = NEW_RESOURCE_ARRAY(bool, length());
+  memset(f2_used, 0, sizeof(bool) * length());
+
+  // Find all the slots that we need to preserve f2
+  for (int i = 0; i < ik->methods()->length(); i++) {
+    Method* m = ik->methods()->at(i);
+    RawBytecodeStream bcs(m);
+    while (!bcs.is_last_bytecode()) {
+      Bytecodes::Code opcode = bcs.raw_next();
+      switch (opcode) {
+      case Bytecodes::_invokedynamic: {
+          int index = Bytes::get_native_u4(bcs.bcp() + 1);
+          int cp_cache_index = constant_pool()->invokedynamic_cp_cache_index(index);
+          f2_used[cp_cache_index] = 1;
+        }
+        break;
+      case Bytecodes::_invokehandle: {
+          int cp_cache_index = Bytes::get_native_u2(bcs.bcp() + 1);
+          f2_used[cp_cache_index] = 1;
+        }
+        break;
+      default:
+        break;
+      }
+    }
+  }
+
+  if (check_only) {
+    DEBUG_ONLY(
+      for (int i=0; i<length(); i++) {
+        entry_at(i)->verify_just_initialized(f2_used[i]);
+      })
+  } else {
+    for (int i=0; i<length(); i++) {
+      entry_at(i)->reinitialize(f2_used[i]);
+    }
+  }
+}
+
+void ConstantPoolCache::deallocate_contents(ClassLoaderData* data) {
+  assert(!is_shared(), "shared caches are not deallocated");
+  data->remove_handle(_resolved_references);
+  set_resolved_references(NULL);
+  MetadataFactory::free_array<u2>(data, _reference_map);
+  set_reference_map(NULL);
+}
+
 #if INCLUDE_CDS_JAVA_HEAP
 oop ConstantPoolCache::archived_references() {
   assert(UseSharedSpaces, "UseSharedSpaces expected.");
--- a/src/share/vm/oops/cpCache.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/cpCache.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -397,6 +397,9 @@
     // When shifting flags as a 32-bit int, make sure we don't need an extra mask for tos_state:
     assert((((u4)-1 >> tos_state_shift) & ~tos_state_mask) == 0, "no need for tos_state mask");
   }
+
+  void verify_just_initialized(bool f2_used);
+  void reinitialize(bool f2_used);
 };
 
 
@@ -468,7 +471,11 @@
   // Assembly code support
   static int resolved_references_offset_in_bytes() { return offset_of(ConstantPoolCache, _resolved_references); }
 
+  // CDS support
+  void remove_unshareable_info();
+  void verify_just_initialized();
  private:
+  void walk_entries_for_initialization(bool check_only);
   void set_length(int length)                    { _length = length; }
 
   static int header_size()                       { return sizeof(ConstantPoolCache) / wordSize; }
@@ -514,9 +521,9 @@
   void dump_cache();
 #endif // INCLUDE_JVMTI
 
-  // Deallocate - no fields to deallocate
+  // RedefineClasses support
   DEBUG_ONLY(bool on_stack() { return false; })
-  void deallocate_contents(ClassLoaderData* data) {}
+  void deallocate_contents(ClassLoaderData* data);
   bool is_klass() const { return false; }
 
   // Printing
--- a/src/share/vm/oops/instanceKlass.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/instanceKlass.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -851,10 +851,10 @@
       char* message = NEW_RESOURCE_ARRAY(char, msglen);
       if (NULL == message) {
         // Out of memory: can't create detailed error message
-        THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
+          THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), className);
       } else {
         jio_snprintf(message, msglen, "%s%s", desc, className);
-        THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
+          THROW_MSG(vmSymbols::java_lang_NoClassDefFoundError(), message);
       }
     }
 
@@ -2171,14 +2171,14 @@
     m->remove_unshareable_info();
   }
 
+  // do array classes also.
+  if (array_klasses() != NULL) {
+    array_klasses()->remove_unshareable_info();
+  }
+
   // These are not allocated from metaspace, but they should should all be empty
-  // during dump time, so we don't need to worry about them in InstanceKlass::metaspace_pointers_do().
+  // during dump time, so we don't need to worry about them in InstanceKlass::iterate().
   guarantee(_source_debug_extension == NULL, "must be");
-  guarantee(_oop_map_cache == NULL, "must be");
-  guarantee(_init_thread == NULL, "must be");
-  guarantee(_oop_map_cache == NULL, "must be");
-  guarantee(_jni_ids == NULL, "must be");
-  guarantee(_methods_jmethod_ids == NULL, "must be");
   guarantee(_dep_context == DependencyContext::EMPTY, "must be");
   guarantee(_osr_nmethods_head == NULL, "must be");
 
@@ -2186,12 +2186,20 @@
   guarantee(_breakpoints == NULL, "must be");
   guarantee(_previous_versions == NULL, "must be");
 #endif
+
+ _init_thread = NULL;
+ _methods_jmethod_ids = NULL;
+ _jni_ids = NULL;
+ _oop_map_cache = NULL;
 }
 
-static void restore_unshareable_in_class(Klass* k, TRAPS) {
-  // Array classes have null protection domain.
-  // --> see ArrayKlass::complete_create_array_klass()
-  k->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
+void InstanceKlass::remove_java_mirror() {
+  Klass::remove_java_mirror();
+
+  // do array classes also.
+  if (array_klasses() != NULL) {
+    array_klasses()->remove_java_mirror();
+  }
 }
 
 void InstanceKlass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
@@ -2218,7 +2226,11 @@
   // restore constant pool resolved references
   constants()->restore_unshareable_info(CHECK);
 
-  array_klasses_do(restore_unshareable_in_class, CHECK);
+  if (array_klasses() != NULL) {
+    // Array classes have null protection domain.
+    // --> see ArrayKlass::complete_create_array_klass()
+    array_klasses()->restore_unshareable_info(ClassLoaderData::the_null_class_loader_data(), Handle(), CHECK);
+  }
 }
 
 // returns true IFF is_in_error_state() has been changed as a result of this call.
--- a/src/share/vm/oops/instanceKlass.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/instanceKlass.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -357,8 +357,6 @@
   }
 
   void set_class_loader_type(s2 loader_type) {
-    assert(( _misc_flags & loader_type_bits()) == 0,
-           "Should only be called once for each class.");
     switch (loader_type) {
     case ClassLoader::BOOT_LOADER:
       _misc_flags |= _misc_is_shared_boot_class;
@@ -1491,6 +1489,7 @@
 public:
   // CDS support - remove and restore oops from metadata. Oops are not shared.
   virtual void remove_unshareable_info();
+  virtual void remove_java_mirror();
   virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
 
   // jvm support
--- a/src/share/vm/oops/klass.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/klass.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -512,11 +512,13 @@
 void Klass::remove_unshareable_info() {
   assert (DumpSharedSpaces, "only called for DumpSharedSpaces");
   TRACE_REMOVE_ID(this);
+  if (log_is_enabled(Trace, cds, unshareable)) {
+    ResourceMark rm;
+    log_trace(cds, unshareable)("remove: %s", external_name());
+  }
 
   set_subklass(NULL);
   set_next_sibling(NULL);
-  // Clear the java mirror
-  set_java_mirror(NULL);
   set_next_link(NULL);
 
   // Null out class_loader_data because we don't share that yet.
@@ -524,10 +526,23 @@
   set_is_shared();
 }
 
+void Klass::remove_java_mirror() {
+  assert (DumpSharedSpaces, "only called for DumpSharedSpaces");
+  if (log_is_enabled(Trace, cds, unshareable)) {
+    ResourceMark rm;
+    log_trace(cds, unshareable)("remove java_mirror: %s", external_name());
+  }
+  set_java_mirror(NULL);
+}
+
 void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {
   assert(is_klass(), "ensure C++ vtable is restored");
   assert(is_shared(), "must be set");
   TRACE_RESTORE_ID(this);
+  if (log_is_enabled(Trace, cds, unshareable)) {
+    ResourceMark rm;
+    log_trace(cds, unshareable)("restore: %s", external_name());
+  }
 
   // If an exception happened during CDS restore, some of these fields may already be
   // set.  We leave the class on the CLD list, even if incomplete so that we don't
--- a/src/share/vm/oops/klass.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/klass.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -480,6 +480,7 @@
 
   // CDS support - remove and restore oops from metadata. Oops are not shared.
   virtual void remove_unshareable_info();
+  virtual void remove_java_mirror();
   virtual void restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS);
 
  protected:
--- a/src/share/vm/oops/method.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/method.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -218,26 +218,14 @@
 }
 
 void Method::mask_for(int bci, InterpreterOopMap* mask) {
-
-  Thread* myThread    = Thread::current();
-  methodHandle h_this(myThread, this);
-#if defined(ASSERT) && !INCLUDE_JVMCI
-  bool has_capability = myThread->is_VM_thread() ||
-                        myThread->is_ConcurrentGC_thread() ||
-                        myThread->is_GC_task_thread();
-
-  if (!has_capability) {
-    if (!VerifyStack && !VerifyLastFrame) {
-      // verify stack calls this outside VM thread
-      warning("oopmap should only be accessed by the "
-              "VM, GC task or CMS threads (or during debugging)");
-      InterpreterOopMap local_mask;
-      method_holder()->mask_for(h_this, bci, &local_mask);
-      local_mask.print();
-    }
+  methodHandle h_this(Thread::current(), this);
+  // Only GC uses the OopMapCache during thread stack root scanning
+  // any other uses generate an oopmap but do not save it in the cache.
+  if (Universe::heap()->is_gc_active()) {
+    method_holder()->mask_for(h_this, bci, mask);
+  } else {
+    OopMapCache::compute_one_oop_map(h_this, bci, mask);
   }
-#endif
-  method_holder()->mask_for(h_this, bci, mask);
   return;
 }
 
@@ -985,10 +973,6 @@
   _from_compiled_entry = cds_adapter->get_c2i_entry_trampoline();
   assert(*((int*)_from_compiled_entry) == 0, "must be NULL during dump time, to be initialized at run time");
 
-
-  // In case of DumpSharedSpaces, _method_data should always be NULL.
-  assert(_method_data == NULL, "unexpected method data?");
-
   set_method_data(NULL);
   clear_method_counters();
 }
--- a/src/share/vm/oops/oopHandle.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/oopHandle.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -46,6 +46,9 @@
   OopHandle(oop* w) : _obj(w) {}
 
   oop resolve() const { return (_obj == NULL) ? (oop)NULL : *_obj; }
+
+  // Used only for removing handle.
+  oop* ptr_raw() { return _obj; }
 };
 
 #endif // SHARE_VM_OOPS_OOPHANDLE_HPP
--- a/src/share/vm/oops/symbol.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/oops/symbol.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -219,7 +219,7 @@
 
 void Symbol::decrement_refcount() {
   if (_refcount >= 0) { // not a permanent symbol
-    jshort new_value = Atomic::add(-1, &_refcount);
+    short new_value = Atomic::add(short(-1), &_refcount);
 #ifdef ASSERT
     if (new_value == -1) { // we have transitioned from 0 -> -1
       print();
--- a/src/share/vm/opto/c2compiler.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/opto/c2compiler.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -283,7 +283,7 @@
   case vmIntrinsics::_weakCompareAndSetIntAcquire:
   case vmIntrinsics::_weakCompareAndSetIntRelease:
   case vmIntrinsics::_weakCompareAndSetInt:
-    if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapL)) return false;
+    if (!Matcher::match_rule_supported(Op_WeakCompareAndSwapI)) return false;
     break;
 
   /* CompareAndSet, Byte: */
--- a/src/share/vm/opto/lcm.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/opto/lcm.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -39,7 +39,7 @@
 // Check whether val is not-null-decoded compressed oop,
 // i.e. will grab into the base of the heap if it represents NULL.
 static bool accesses_heap_base_zone(Node *val) {
-  if (Universe::narrow_oop_base() > 0) { // Implies UseCompressedOops.
+  if (Universe::narrow_oop_base() != NULL) { // Implies UseCompressedOops.
     if (val && val->is_Mach()) {
       if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) {
         // This assumes all Decodes with TypePtr::NotNull are matched to nodes that
--- a/src/share/vm/opto/loopPredicate.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/opto/loopPredicate.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -912,7 +912,7 @@
       Node*          idx    = cmp->in(1);
       assert(!invar.is_invariant(idx), "index is variant");
       Node* rng = cmp->in(2);
-      assert(rng->Opcode() == Op_LoadRange || _igvn.type(rng)->is_int() >= 0, "must be");
+      assert(rng->Opcode() == Op_LoadRange || iff->is_RangeCheck() || _igvn.type(rng)->is_int()->_lo >= 0, "must be");
       assert(invar.is_invariant(rng), "range must be invariant");
       int scale    = 1;
       Node* offset = zero;
--- a/src/share/vm/opto/runtime.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/opto/runtime.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -62,7 +62,6 @@
 #include "opto/runtime.hpp"
 #include "opto/subnode.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/javaCalls.hpp"
--- a/src/share/vm/prims/jni.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/prims/jni.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -62,7 +62,6 @@
 #include "runtime/atomic.hpp"
 #include "runtime/compilationPolicy.hpp"
 #include "runtime/fieldDescriptor.hpp"
-#include "runtime/fprofiler.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/interfaceSupport.hpp"
 #include "runtime/java.hpp"
--- a/src/share/vm/prims/jvmti.xml	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/prims/jvmti.xml	Tue Sep 26 13:09:56 2017 +0200
@@ -227,7 +227,8 @@
                        label CDATA #REQUIRED>
 
    <!ELEMENT basetype (definition?,description)>
-   <!ATTLIST basetype id CDATA #REQUIRED>
+   <!ATTLIST basetype id CDATA #REQUIRED
+                      name CDATA #IMPLIED>
 
    <!ELEMENT definition (#PCDATA|jvmti)*>
 
@@ -438,7 +439,7 @@
     The details of how this is initiated are implementation specific.
   </intro>
 
-    <intro id="entry point" label="Statically Linked Agents (since version 1.2.3)">
+    <intro id="entryPoint" label="Statically Linked Agents (since version 1.2.3)">
 
       A native JVMTI Agent may be <i>statically linked</i> with the VM.
       The manner in which the library and VM image are combined is
@@ -6647,7 +6648,7 @@
           If <paramlink id="module"></paramlink> is not a module object.
         </error>
         <error id="JVMTI_ERROR_INVALID_MODULE">
-          If <paramlink id="to_modules"></paramlink> is not a module object.
+          If <paramlink id="to_module"></paramlink> is not a module object.
         </error>
         <error id="JVMTI_ERROR_ILLEGAL_ARGUMENT">
           If the package <paramlink id="pkg_name"></paramlink>
@@ -6702,7 +6703,7 @@
           If <paramlink id="module"></paramlink> is not a module object.
         </error>
         <error id="JVMTI_ERROR_INVALID_MODULE">
-          If <paramlink id="to_modules"></paramlink> is not a module object.
+          If <paramlink id="to_module"></paramlink> is not a module object.
         </error>
         <error id="JVMTI_ERROR_ILLEGAL_ARGUMENT">
           If the package <paramlink id="pkg_name"></paramlink>
@@ -13786,7 +13787,7 @@
 </example>
       </description>
     </basetype>
-    <basetype id="jvmtiEventCallbacks">
+    <basetype id="jvmtiEventCallbacks" name="eventCallbacks">
       <description>
         The callbacks used for events.
 <example>
--- a/src/share/vm/prims/jvmti.xsl	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/prims/jvmti.xsl	Tue Sep 26 13:09:56 2017 +0200
@@ -905,8 +905,15 @@
         </td>
         <td>
           <a>
-            <xsl:attribute name="name">
-              <xsl:value-of select="@id"/>
+            <xsl:attribute name="id">
+              <xsl:choose>
+                <xsl:when test="count(@name)=1">
+                  <xsl:value-of select="@name"/>
+                </xsl:when>
+                <xsl:otherwise>
+                  <xsl:value-of select="@id"/>
+                </xsl:otherwise>
+              </xsl:choose>
             </xsl:attribute>
           </a>
           <xsl:apply-templates select="description" mode="brief"/>
@@ -922,7 +929,7 @@
         </td>
         <td>
           <a>
-            <xsl:attribute name="name">
+            <xsl:attribute name="id">
               <xsl:value-of select="@id"/>
             </xsl:attribute>
           </a>
--- a/src/share/vm/prims/jvmtiExport.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/prims/jvmtiExport.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -2497,14 +2497,13 @@
       library = os::dll_load(agent, ebuf, sizeof ebuf);
     } else {
       // Try to load the agent from the standard dll directory
-      if (os::dll_build_name(buffer, sizeof(buffer), Arguments::get_dll_dir(),
+      if (os::dll_locate_lib(buffer, sizeof(buffer), Arguments::get_dll_dir(),
                              agent)) {
         library = os::dll_load(buffer, ebuf, sizeof ebuf);
       }
       if (library == NULL) {
-        // not found - try local path
-        char ns[1] = {0};
-        if (os::dll_build_name(buffer, sizeof(buffer), ns, agent)) {
+        // not found - try OS default library path
+        if (os::dll_build_name(buffer, sizeof(buffer), agent)) {
           library = os::dll_load(buffer, ebuf, sizeof ebuf);
         }
       }
--- a/src/share/vm/runtime/arguments.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/runtime/arguments.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -78,7 +78,6 @@
 char*  Arguments::_java_command                 = NULL;
 SystemProperty* Arguments::_system_properties   = NULL;
 const char*  Arguments::_gc_log_filename        = NULL;
-bool   Arguments::_has_profile                  = false;
 size_t Arguments::_conservative_max_heap_alignment = 0;
 size_t Arguments::_min_heap_size                = 0;
 Arguments::Mode Arguments::_mode                = _mixed;
@@ -379,6 +378,9 @@
   { "MaxGCMinorPauseMillis",        JDK_Version::jdk(8), JDK_Version::undefined(), JDK_Version::undefined() },
   { "UseConcMarkSweepGC",           JDK_Version::jdk(9), JDK_Version::undefined(), JDK_Version::undefined() },
   { "MonitorInUseLists",            JDK_Version::jdk(10),JDK_Version::undefined(), JDK_Version::undefined() },
+  { "MaxRAMFraction",               JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
+  { "MinRAMFraction",               JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
+  { "InitialRAMFraction",           JDK_Version::jdk(10),  JDK_Version::undefined(), JDK_Version::undefined() },
 
   // --- Deprecated alias flags (see also aliased_jvm_flags) - sorted by obsolete_in then expired_in:
   { "DefaultMaxRAMFraction",        JDK_Version::jdk(8),  JDK_Version::undefined(), JDK_Version::undefined() },
@@ -1291,13 +1293,11 @@
                                            "jdk.module.limitmods",
                                            "jdk.module.path",
                                            "jdk.module.upgrade.path",
-                                           "jdk.module.addmods.0",
                                            "jdk.module.patch.0" };
   const char* unsupported_options[] = { "-m", // cannot use at dump time
                                         "--limit-modules", // ignored at dump time
                                         "--module-path", // ignored at dump time
                                         "--upgrade-module-path", // ignored at dump time
-                                        "--add-modules", // ignored at dump time
                                         "--patch-module" // ignored at dump time
                                       };
   assert(ARRAY_SIZE(unsupported_properties) == ARRAY_SIZE(unsupported_options), "must be");
@@ -2069,20 +2069,33 @@
     }
   }
 
+  // Convert deprecated flags
+  if (FLAG_IS_DEFAULT(MaxRAMPercentage) &&
+      !FLAG_IS_DEFAULT(MaxRAMFraction))
+    MaxRAMPercentage = 100.0 / MaxRAMFraction;
+
+  if (FLAG_IS_DEFAULT(MinRAMPercentage) &&
+      !FLAG_IS_DEFAULT(MinRAMFraction))
+    MinRAMPercentage = 100.0 / MinRAMFraction;
+
+  if (FLAG_IS_DEFAULT(InitialRAMPercentage) &&
+      !FLAG_IS_DEFAULT(InitialRAMFraction))
+    InitialRAMPercentage = 100.0 / InitialRAMFraction;
+
   // If the maximum heap size has not been set with -Xmx,
   // then set it as fraction of the size of physical memory,
   // respecting the maximum and minimum sizes of the heap.
   if (FLAG_IS_DEFAULT(MaxHeapSize)) {
-    julong reasonable_max = phys_mem / MaxRAMFraction;
-
-    if (phys_mem <= MaxHeapSize * MinRAMFraction) {
+    julong reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
+    if (phys_mem <= (julong)((MaxHeapSize * MinRAMPercentage) / 100)) {
       // Small physical memory, so use a minimum fraction of it for the heap
-      reasonable_max = phys_mem / MinRAMFraction;
+      reasonable_max = (julong)((phys_mem * MinRAMPercentage) / 100);
     } else {
       // Not-small physical memory, so require a heap at least
       // as large as MaxHeapSize
       reasonable_max = MAX2(reasonable_max, (julong)MaxHeapSize);
     }
+
     if (!FLAG_IS_DEFAULT(ErgoHeapSizeLimit) && ErgoHeapSizeLimit != 0) {
       // Limit the heap size to ErgoHeapSizeLimit
       reasonable_max = MIN2(reasonable_max, (julong)ErgoHeapSizeLimit);
@@ -2135,7 +2148,7 @@
     reasonable_minimum = limit_by_allocatable_memory(reasonable_minimum);
 
     if (InitialHeapSize == 0) {
-      julong reasonable_initial = phys_mem / InitialRAMFraction;
+      julong reasonable_initial = (julong)((phys_mem * InitialRAMPercentage) / 100);
 
       reasonable_initial = MAX3(reasonable_initial, reasonable_minimum, (julong)min_heap_size());
       reasonable_initial = MIN2(reasonable_initial, (julong)MaxHeapSize);
@@ -2691,17 +2704,11 @@
   }
 
   // Do final processing now that all arguments have been parsed
-  result = finalize_vm_init_args();
+  result = finalize_vm_init_args(patch_mod_javabase);
   if (result != JNI_OK) {
     return result;
   }
 
-#if INCLUDE_CDS
-  if (UseSharedSpaces && patch_mod_javabase) {
-    no_shared_spaces("CDS is disabled when " JAVA_BASE_NAME " module is patched.");
-  }
-#endif
-
   return JNI_OK;
 }
 
@@ -3176,16 +3183,12 @@
       if (FLAG_SET_CMDLINE(bool, ReduceSignalUsage, true) != Flag::SUCCESS) {
         return JNI_EINVAL;
       }
-    // -Xprof
+      // -Xprof
     } else if (match_option(option, "-Xprof")) {
-#if INCLUDE_FPROF
-      log_warning(arguments)("Option -Xprof was deprecated in version 9 and will likely be removed in a future release.");
-      _has_profile = true;
-#else // INCLUDE_FPROF
-      jio_fprintf(defaultStream::error_stream(),
-        "Flat profiling is not supported in this VM.\n");
-      return JNI_ERR;
-#endif // INCLUDE_FPROF
+      char version[256];
+      // Obsolete in JDK 10
+      JDK_Version::jdk(10).to_string(version, sizeof(version));
+      warning("Ignoring option %s; support was removed in %s", option->optionString, version);
     // -Xconcurrentio
     } else if (match_option(option, "-Xconcurrentio")) {
       if (FLAG_SET_CMDLINE(bool, UseLWPSynchronization, true) != Flag::SUCCESS) {
@@ -3632,7 +3635,7 @@
   return nonEmptyDirs;
 }
 
-jint Arguments::finalize_vm_init_args() {
+jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) {
   // check if the default lib/endorsed directory exists; if so, error
   char path[JVM_MAXPATHLEN];
   const char* fileSep = os::file_separator();
@@ -3758,6 +3761,17 @@
   }
 #endif
 
+#if INCLUDE_CDS
+  if (DumpSharedSpaces) {
+    // Disable biased locking now as it interferes with the clean up of
+    // the archived Klasses and Java string objects (at dump time only).
+    UseBiasedLocking = false;
+  }
+  if (UseSharedSpaces && patch_mod_javabase) {
+    no_shared_spaces("CDS is disabled when " JAVA_BASE_NAME " module is patched.");
+  }
+#endif
+
   return JNI_OK;
 }
 
--- a/src/share/vm/runtime/arguments.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/runtime/arguments.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -412,7 +412,6 @@
   static bool   _sun_java_launcher_is_altjvm;
 
   // Option flags
-  static bool   _has_profile;
   static const char*  _gc_log_filename;
   // Value of the conservative maximum heap alignment needed
   static size_t  _conservative_max_heap_alignment;
@@ -536,7 +535,7 @@
                                  const JavaVMInitArgs *java_options_args,
                                  const JavaVMInitArgs *cmd_line_args);
   static jint parse_each_vm_init_arg(const JavaVMInitArgs* args, bool* patch_mod_javabase, Flag::Flags origin);
-  static jint finalize_vm_init_args();
+  static jint finalize_vm_init_args(bool patch_mod_javabase);
   static bool is_bad_option(const JavaVMOption* option, jboolean ignore, const char* option_type);
 
   static bool is_bad_option(const JavaVMOption* option, jboolean ignore) {
@@ -696,9 +695,6 @@
   // -Dsun.java.launcher.pid
   static int sun_java_launcher_pid()        { return _sun_java_launcher_pid; }
 
-  // -Xprof
-  static bool has_profile()                 { return _has_profile; }
-
   // -Xms
   static size_t min_heap_size()             { return _min_heap_size; }
   static void  set_min_heap_size(size_t v)  { _min_heap_size = v;  }
--- a/src/share/vm/runtime/arguments_ext.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/runtime/arguments_ext.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -36,6 +36,7 @@
   // Otherwise returns false.
   static inline bool process_options(const JavaVMOption *option) { return false; }
   static inline void report_unsupported_options() { }
+  static inline bool using_AppCDS() { return false; }
 };
 
 void ArgumentsExt::set_gc_specific_flags() {
--- a/src/share/vm/runtime/atomic.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/runtime/atomic.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -26,11 +26,14 @@
 #define SHARE_VM_RUNTIME_ATOMIC_HPP
 
 #include "memory/allocation.hpp"
+#include "metaprogramming/conditional.hpp"
 #include "metaprogramming/enableIf.hpp"
 #include "metaprogramming/isIntegral.hpp"
+#include "metaprogramming/isPointer.hpp"
 #include "metaprogramming/isSame.hpp"
 #include "metaprogramming/primitiveConversions.hpp"
 #include "metaprogramming/removeCV.hpp"
+#include "metaprogramming/removePointer.hpp"
 #include "utilities/align.hpp"
 #include "utilities/macros.hpp"
 
@@ -82,11 +85,17 @@
 
   // Atomically add to a location. Returns updated value. add*() provide:
   // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
-  inline static jshort   add    (jshort   add_value, volatile jshort*   dest);
-  inline static jint     add    (jint     add_value, volatile jint*     dest);
-  inline static size_t   add    (size_t   add_value, volatile size_t*   dest);
-  inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
-  inline static void*    add_ptr(intptr_t add_value, volatile void*     dest);
+
+  template<typename I, typename D>
+  inline static D add(I add_value, D volatile* dest);
+
+  inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
+    return add(add_value, dest);
+  }
+
+  inline static void* add_ptr(intptr_t add_value, volatile void* dest) {
+    return add(add_value, reinterpret_cast<char* volatile*>(dest));
+  }
 
   // Atomically increment location. inc*() provide:
   // <fence> increment-dest <membar StoreLoad|StoreStore>
@@ -156,6 +165,74 @@
   // that is needed here.
   template<typename From, typename To> struct IsPointerConvertible;
 
+  // Dispatch handler for add.  Provides type-based validity checking
+  // and limited conversions around calls to the platform-specific
+  // implementation layer provided by PlatformAdd.
+  template<typename I, typename D, typename Enable = void>
+  struct AddImpl;
+
+  // Platform-specific implementation of add.  Support for sizes of 4
+  // bytes and (if different) pointer size bytes are required.  The
+  // class is a function object that must be default constructable,
+  // with these requirements:
+  //
+  // - dest is of type D*, an integral or pointer type.
+  // - add_value is of type I, an integral type.
+  // - sizeof(I) == sizeof(D).
+  // - if D is an integral type, I == D.
+  // - platform_add is an object of type PlatformAdd<sizeof(D)>.
+  //
+  // Then
+  //   platform_add(add_value, dest)
+  // must be a valid expression, returning a result convertible to D.
+  //
+  // No definition is provided; all platforms must explicitly define
+  // this class and any needed specializations.
+  template<size_t byte_size> struct PlatformAdd;
+
+  // Helper base classes for defining PlatformAdd.  To use, define
+  // PlatformAdd or a specialization that derives from one of these,
+  // and include in the PlatformAdd definition the support function
+  // (described below) required by the base class.
+  //
+  // These classes implement the required function object protocol for
+  // PlatformAdd, using a support function template provided by the
+  // derived class.  Let add_value (of type I) and dest (of type D) be
+  // the arguments the object is called with.  If D is a pointer type
+  // P*, then let addend (of type I) be add_value * sizeof(P);
+  // otherwise, addend is add_value.
+  //
+  // FetchAndAdd requires the derived class to provide
+  //   fetch_and_add(addend, dest)
+  // atomically adding addend to the value of dest, and returning the
+  // old value.
+  //
+  // AddAndFetch requires the derived class to provide
+  //   add_and_fetch(addend, dest)
+  // atomically adding addend to the value of dest, and returning the
+  // new value.
+  //
+  // When D is a pointer type P*, both fetch_and_add and add_and_fetch
+  // treat it as if it were a uintptr_t; they do not perform any
+  // scaling of the addend, as that has already been done by the
+  // caller.
+public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
+  template<typename Derived> struct FetchAndAdd;
+  template<typename Derived> struct AddAndFetch;
+private:
+
+  // Support for platforms that implement some variants of add using a
+  // (typically out of line) non-template helper function.  The
+  // generic arguments passed to PlatformAdd need to be translated to
+  // the appropriate type for the helper function, the helper function
+  // invoked on the translated arguments, and the result translated
+  // back.  Type is the parameter / return type of the helper
+  // function.  No scaling of add_value is performed when D is a pointer
+  // type, so this function can be used to implement the support function
+  // required by AddAndFetch.
+  template<typename Type, typename Fn, typename I, typename D>
+  static D add_using_helper(Fn fn, I add_value, D volatile* dest);
+
   // Dispatch handler for cmpxchg.  Provides type-based validity
   // checking and limited conversions around calls to the
   // platform-specific implementation layer provided by
@@ -219,6 +296,22 @@
   static const bool value = (sizeof(yes) == sizeof(test(test_value)));
 };
 
+// Define FetchAndAdd and AddAndFetch helper classes before including
+// platform file, which may use these as base classes, requiring they
+// be complete.
+
+template<typename Derived>
+struct Atomic::FetchAndAdd VALUE_OBJ_CLASS_SPEC {
+  template<typename I, typename D>
+  D operator()(I add_value, D volatile* dest) const;
+};
+
+template<typename Derived>
+struct Atomic::AddAndFetch VALUE_OBJ_CLASS_SPEC {
+  template<typename I, typename D>
+  D operator()(I add_value, D volatile* dest) const;
+};
+
 // Define the class before including platform file, which may specialize
 // the operator definition.  No generic definition of specializations
 // of the operator template are provided, nor are there any generic
@@ -255,8 +348,93 @@
 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
 #endif
 
-inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
-  return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
+template<typename I, typename D>
+inline D Atomic::add(I add_value, D volatile* dest) {
+  return AddImpl<I, D>()(add_value, dest);
+}
+
+template<typename I, typename D>
+struct Atomic::AddImpl<
+  I, D,
+  typename EnableIf<IsIntegral<I>::value &&
+                    IsIntegral<D>::value &&
+                    (sizeof(I) <= sizeof(D)) &&
+                    (IsSigned<I>::value == IsSigned<D>::value)>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  D operator()(I add_value, D volatile* dest) const {
+    D addend = add_value;
+    return PlatformAdd<sizeof(D)>()(addend, dest);
+  }
+};
+
+template<typename I, typename P>
+struct Atomic::AddImpl<
+  I, P*,
+  typename EnableIf<IsIntegral<I>::value && (sizeof(I) <= sizeof(P*))>::type>
+  VALUE_OBJ_CLASS_SPEC
+{
+  P* operator()(I add_value, P* volatile* dest) const {
+    STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
+    STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
+    typedef typename Conditional<IsSigned<I>::value,
+                                 intptr_t,
+                                 uintptr_t>::type CI;
+    CI addend = add_value;
+    return PlatformAdd<sizeof(P*)>()(addend, dest);
+  }
+};
+
+// Most platforms do not support atomic add on a 2-byte value. However,
+// if the value occupies the most significant 16 bits of an aligned 32-bit
+// word, then we can do this with an atomic add of (add_value << 16)
+// to the 32-bit word.
+//
+// The least significant parts of this 32-bit word will never be affected, even
+// in case of overflow/underflow.
+//
+// Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
+template<>
+struct Atomic::AddImpl<jshort, jshort> VALUE_OBJ_CLASS_SPEC {
+  jshort operator()(jshort add_value, jshort volatile* dest) const {
+#ifdef VM_LITTLE_ENDIAN
+    assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
+    jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
+#else
+    assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
+    jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
+#endif
+    return (jshort)(new_value >> 16); // preserves sign
+  }
+};
+
+template<typename Derived>
+template<typename I, typename D>
+inline D Atomic::FetchAndAdd<Derived>::operator()(I add_value, D volatile* dest) const {
+  I addend = add_value;
+  // If D is a pointer type P*, scale by sizeof(P).
+  if (IsPointer<D>::value) {
+    addend *= sizeof(typename RemovePointer<D>::type);
+  }
+  D old = static_cast<const Derived*>(this)->fetch_and_add(addend, dest);
+  return old + add_value;
+}
+
+template<typename Derived>
+template<typename I, typename D>
+inline D Atomic::AddAndFetch<Derived>::operator()(I add_value, D volatile* dest) const {
+  // If D is a pointer type P*, scale by sizeof(P).
+  if (IsPointer<D>::value) {
+    add_value *= sizeof(typename RemovePointer<D>::type);
+  }
+  return static_cast<const Derived*>(this)->add_and_fetch(add_value, dest);
+}
+
+template<typename Type, typename Fn, typename I, typename D>
+inline D Atomic::add_using_helper(Fn fn, I add_value, D volatile* dest) {
+  return PrimitiveConversions::cast<D>(
+    fn(PrimitiveConversions::cast<Type>(add_value),
+       reinterpret_cast<Type volatile*>(dest)));
 }
 
 inline void Atomic::inc(volatile size_t* dest) {
@@ -413,32 +591,12 @@
   return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
 }
 
-inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
-  // Most platforms do not support atomic add on a 2-byte value. However,
-  // if the value occupies the most significant 16 bits of an aligned 32-bit
-  // word, then we can do this with an atomic add of (add_value << 16)
-  // to the 32-bit word.
-  //
-  // The least significant parts of this 32-bit word will never be affected, even
-  // in case of overflow/underflow.
-  //
-  // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
-#ifdef VM_LITTLE_ENDIAN
-  assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
-  jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
-#else
-  assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
-  jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
-#endif
-  return (jshort)(new_value >> 16); // preserves sign
-}
-
 inline void Atomic::inc(volatile jshort* dest) {
-  (void)add(1, dest);
+  (void)add(jshort(1), dest);
 }
 
 inline void Atomic::dec(volatile jshort* dest) {
-  (void)add(-1, dest);
+  (void)add(jshort(-1), dest);
 }
 
 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
--- a/src/share/vm/runtime/deoptimization.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/runtime/deoptimization.cpp	Tue Sep 26 13:09:56 2017 +0200
@@ -1453,6 +1453,30 @@
 
 }
 
+#if INCLUDE_JVMCI
+address Deoptimization::deoptimize_for_missing_exception_handler(CompiledMethod* cm) {
+  // there is no exception handler for this pc => deoptimize
+  cm->make_not_entrant();
+
+  // Use Deoptimization::deoptimize for all of its side-effects:
+  // revoking biases of monitors, gathering traps statistics, logging...
+  // it also patches the return pc but we do not care about that
+  // since we return a continuation to the deopt_blob below.
+  JavaThread* thread = JavaThread::current();
+  RegisterMap reg_map(thread, UseBiasedLocking);
+  frame runtime_frame = thread->last_frame();
+  frame caller_frame = runtime_frame.sender(&reg_map);
+  assert(caller_frame.cb()->as_nmethod_or_null() == cm, "expect top frame nmethod");
+  Deoptimization::deoptimize(thread, caller_frame, &reg_map, Deoptimization::Reason_not_compiled_exception_handler);
+
+  MethodData* trap_mdo = get_method_data(thread, cm->method(), true);
+  if (trap_mdo != NULL) {
+    trap_mdo->inc_trap_count(Deoptimization::Reason_not_compiled_exception_handler);
+  }
+
+  return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
+}
+#endif
 
 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id, DeoptReason reason) {
   assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
--- a/src/share/vm/runtime/deoptimization.hpp	Fri Sep 22 13:51:12 2017 +0200
+++ b/src/share/vm/runtime/deoptimization.hpp	Tue Sep 26 13:09:56 2017 +0200
@@ -136,6 +136,10 @@
   static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map);
   static void deoptimize(JavaThread* thread, frame fr, RegisterMap *reg_map, DeoptReason reason);
 
+#if INCLUDE_JVMCI
+  static address deoptimize_for_missing_exception_handler(CompiledMethod* cm);
+#endif
+
   private:
   // Does the actual work for deoptimizing a single frame
   static void deoptimize_single_frame(JavaThread* thread, frame fr, DeoptReason reason);
--- a/src/share/vm/runtime/fprofiler.cpp	Fri Sep 22 13:51:12 2017 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,1623 +0,0 @@
-/*
- * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "classfile/classLoader.hpp"
-#include "code/codeCache.hpp"
-#include "code/vtableStubs.hpp"
-#include "gc/shared/collectedHeap.inline.hpp"
-#include "interpreter/interpreter.hpp"
-#include "memory/allocation.inline.hpp"
-#include "memory/resourceArea.hpp"
-#include "memory/universe.inline.hpp"
-#include "oops/oop.inline.hpp"
-#include "oops/symbol.hpp"
-#include "runtime/deoptimization.hpp"
-#include "runtime/fprofiler.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/stubCodeGenerator.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/task.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/vframe.hpp"
-#include "utilities/macros.hpp"
-
-// Static fields of FlatProfiler
-int               FlatProfiler::received_gc_ticks   = 0;
-int               FlatProfiler::vm_operation_ticks  = 0;
-int               FlatProfiler::threads_lock_ticks  = 0;
-int               FlatProfiler::class_loader_ticks  = 0;
-int               FlatProfiler::extra_ticks         = 0;
-int               FlatProfiler::blocked_ticks       = 0;
-int               FlatProfiler::deopt_ticks         = 0;
-int               FlatProfiler::unknown_ticks       = 0;
-int               FlatProfiler::interpreter_ticks   = 0;
-int               FlatProfiler::compiler_ticks      = 0;
-int               FlatProfiler::received_ticks      = 0;
-int               FlatProfiler::delivered_ticks     = 0;
-int*              FlatProfiler::bytecode_ticks      = NULL;
-int*              FlatProfiler::bytecode_ticks_stub = NULL;
-int               FlatProfiler::all_int_ticks       = 0;
-int               FlatProfiler::all_comp_ticks      = 0;
-int               FlatProfiler::all_ticks           = 0;
-bool              FlatProfiler::full_profile_flag   = false;
-ThreadProfiler*   FlatProfiler::thread_profiler     = NULL;
-ThreadProfiler*   FlatProfiler::vm_thread_profiler  = NULL;
-FlatProfilerTask* FlatProfiler::task                = NULL;
-elapsedTimer      FlatProfiler::timer;
-int               FlatProfiler::interval_ticks_previous = 0;
-IntervalData*     FlatProfiler::interval_data       = NULL;
-
-ThreadProfiler::ThreadProfiler() {
-  // Space for the ProfilerNodes
-  const int area_size = 1 * ProfilerNodeSize * 1024;
-  area_bottom = AllocateHeap(area_size, mtInternal);
-  area_top    = area_bottom;
-  area_limit  = area_bottom + area_size;
-
-  // ProfilerNode pointer table
-  table = NEW_C_HEAP_ARRAY(ProfilerNode*, table_size, mtInternal);
-  initialize();
-  engaged = false;
-}
-
-ThreadProfiler::~ThreadProfiler() {
-  FreeHeap(area_bottom);
-  area_bottom = NULL;
-  area_top = NULL;
-  area_limit = NULL;
-  FreeHeap(table);
-  table = NULL;
-}
-
-// Statics for ThreadProfiler
-int ThreadProfiler::table_size = 1024;
-
-int ThreadProfiler::entry(int  value) {
-  value = (value > 0) ? value : -value;
-  return value % table_size;
-}
-
-ThreadProfilerMark::ThreadProfilerMark(ThreadProfilerMark::Region r) {
-  _r = r;
-  _pp = NULL;
-  assert(((r > ThreadProfilerMark::noRegion) && (r < ThreadProfilerMark::maxRegion)), "ThreadProfilerMark::Region out of bounds");
-  Thread* tp = Thread::current();
-  if (tp != NULL && tp->is_Java_thread()) {
-    JavaThread* jtp = (JavaThread*) tp;
-    ThreadProfiler* pp = jtp->get_thread_profiler();
-    _pp = pp;
-    if (pp != NULL) {
-      pp->region_flag[r] = true;
-    }
-  }
-}
-
-ThreadProfilerMark::~ThreadProfilerMark() {
-  if (_pp != NULL) {
-    _pp->region_flag[_r] = false;
-  }
-  _pp = NULL;
-}
-
-// Random other statics
-static const int col1 = 2;      // position of output column 1
-static const int col2 = 11;     // position of output column 2
-static const int col3 = 25;     // position of output column 3
-static const int col4 = 55;     // position of output column 4
-
-
-// Used for detailed profiling of nmethods.
-class PCRecorder : AllStatic {
- private:
-  static int*    counters;
-  static address base;
-  enum {
-   bucket_size = 16
-  };
-  static int     index_for(address pc) { return (pc - base)/bucket_size;   }
-  static address pc_for(int index)     { return base + (index * bucket_size); }
-  static int     size() {
-    return ((int)CodeCache::max_capacity())/bucket_size * BytesPerWord;
-  }
- public:
-  static address bucket_start_for(address pc) {
-    if (counters == NULL) return NULL;
-    return pc_for(index_for(pc));
-  }
-  static int bucket_count_for(address pc)  { return counters[index_for(pc)]; }
-  static void init();
-  static void record(address pc);
-  static void print();
-  static void print_blobs(CodeBlob* cb);
-};
-
-int*    PCRecorder::counters = NULL;
-address PCRecorder::base     = NULL;
-
-void PCRecorder::init() {
-  MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-  int s = size();
-  counters = NEW_C_HEAP_ARRAY(int, s, mtInternal);
-  for (int index = 0; index < s; index++) {
-    counters[index] = 0;
-  }
-  base = CodeCache::low_bound();
-}
-
-void PCRecorder::record(address pc) {
-  if (counters == NULL) return;
-  assert(CodeCache::contains(pc), "must be in CodeCache");
-  counters[index_for(pc)]++;
-}
-
-
-address FlatProfiler::bucket_start_for(address pc) {
-  return PCRecorder::bucket_start_for(pc);
-}
-
-int FlatProfiler::bucket_count_for(address pc) {
-  return PCRecorder::bucket_count_for(pc);
-}
-
-void PCRecorder::print() {
-  if (counters == NULL) return;
-
-  tty->cr();
-  tty->print_cr("Printing compiled methods with PC buckets having more than " INTX_FORMAT " ticks", ProfilerPCTickThreshold);
-  tty->print_cr("===================================================================");
-  tty->cr();
-
-  GrowableArray<CodeBlob*>* candidates = new GrowableArray<CodeBlob*>(20);
-
-
-  int s;
-  {
-    MutexLockerEx lm(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    s = size();
-  }
-
-  for (int index = 0; index < s; index++) {
-    int count = counters[index];
-    if (count > ProfilerPCTickThreshold) {
-      address pc = pc_for(index);
-      CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
-      if (cb != NULL && candidates->find(cb) < 0) {
-        candidates->push(cb);
-      }
-    }
-  }
-  for (int i = 0; i < candidates->length(); i++) {
-    print_blobs(candidates->at(i));
-  }
-}
-
-void PCRecorder::print_blobs(CodeBlob* cb) {
-  if (cb != NULL) {
-    cb->print();
-    if (cb->is_nmethod()) {
-      ((nmethod*)cb)->print_code();
-    }
-    tty->cr();
-  } else {
-    tty->print_cr("stub code");
-  }
-}
-
-class tick_counter {            // holds tick info for one node
- public:
-  int ticks_in_code;
-  int ticks_in_native;
-
-  tick_counter()                     {  ticks_in_code = ticks_in_native = 0; }
-  tick_counter(int code, int native) {  ticks_in_code = code; ticks_in_native = native; }
-
-  int total() const {
-    return (ticks_in_code + ticks_in_native);
-  }
-
-  void add(tick_counter* a) {
-    ticks_in_code += a->ticks_in_code;
-    ticks_in_native += a->ticks_in_native;
-  }
-
-  void update(TickPosition where) {
-    switch(where) {
-      case tp_code:     ticks_in_code++;       break;
-      case tp_native:   ticks_in_native++;      break;
-    }
-  }
-
-  void print_code(outputStream* st, int total_ticks) {
-    st->print("%5.1f%% %5d ", total() * 100.0 / total_ticks, ticks_in_code);
-  }
-
-  void print_native(outputStream* st) {
-    st->print(" + %5d ", ticks_in_native);
-  }
-};
-
-class ProfilerNode {
- private:
-  ProfilerNode* _next;
- public:
-  tick_counter ticks;
-
- public:
-
-  void* operator new(size_t size, ThreadProfiler* tp) throw();
-  void  operator delete(void* p);
-
-  ProfilerNode() {
-    _next = NULL;
-  }
-
-  virtual ~ProfilerNode() {
-    if (_next)
-      delete _next;
-  }
-
-  void set_next(ProfilerNode* n) { _next = n; }
-  ProfilerNode* next()           { return _next; }
-
-  void update(TickPosition where) { ticks.update(where);}
-  int total_ticks() { return ticks.total(); }
-
-  virtual bool is_interpreted() const { return false; }
-  virtual bool is_compiled()    const { return false; }
-  virtual bool is_stub()        const { return false; }
-  virtual bool is_runtime_stub() const{ return false; }
-  virtual void oops_do(OopClosure* f) = 0;
-
-  virtual bool interpreted_match(Method* m) const { return false; }
-  virtual bool compiled_match(Method* m ) const { return false; }
-  virtual bool stub_match(Method* m, const char* name) const { return false; }
-  virtual bool adapter_match() const { return false; }
-  virtual bool runtimeStub_match(const CodeBlob* stub, const char* name) const { return false; }
-  virtual bool unknown_compiled_match(const CodeBlob* cb) const { return false; }
-
-  static void print_title(outputStream* st) {
-    st->print(" + native");
-    st->fill_to(col3);
-    st->print("Method");
-    st->fill_to(col4);
-    st->cr();
-  }
-
-  static void print_total(outputStream* st, tick_counter* t, int total, const char* msg) {
-    t->print_code(st, total);
-    st->fill_to(col2);
-    t->print_native(st);
-    st->fill_to(col3);
-    st->print("%s", msg);
-    st->cr();
-  }
-
-  virtual Method* method()         = 0;
-
-  virtual void print_method_on(outputStream* st) {
-    int limit;
-    int i;
-    Method* m = method();
-    Symbol* k = m->klass_name();
-    // Print the class name with dots instead of slashes
-    limit = k->utf8_length();
-    for (i = 0 ; i < limit ; i += 1) {
-      char c = (char) k->byte_at(i);
-      if (c == '/') {
-        c = '.';
-      }
-      st->print("%c", c);
-    }
-    if (limit > 0) {
-      st->print(".");
-    }
-    Symbol* n = m->name();
-    limit = n->utf8_length();
-    for (i = 0 ; i < limit ; i += 1) {
-      char c = (char) n->byte_at(i);
-      st->print("%c", c);
-    }
-    if (Verbose || WizardMode) {
-      // Disambiguate overloaded methods
-      Symbol* sig = m->signature();
-      sig->print_symbol_on(st);
-    } else if (MethodHandles::is_signature_polymorphic(m->intrinsic_id()))
-      // compare with Method::print_short_name
-      MethodHandles::print_as_basic_type_signature_on(st, m->signature(), true);
-  }
-
-  virtual void print(outputStream* st, int total_ticks) {
-    ticks.print_code(st, total_ticks);
-    st->fill_to(col2);
-    ticks.print_native(st);
-    st->fill_to(col3);
-    print_method_on(st);
-    st->cr();
-  }
-
-  // for hashing into the table
-  static int hash(Method* method) {
-      // The point here is to try to make something fairly unique
-      // out of the fields we can read without grabbing any locks
-      // since the method may be locked when we need the hash.
-      return (
-          method->code_size() ^
-          method->max_stack() ^
-          method->max_locals() ^
-          method->size_of_parameters());
-  }
-
-  // for sorting
-  static int compare(ProfilerNode** a, ProfilerNode** b) {
-    return (*b)->total_ticks() - (*a)->total_ticks();
-  }
-};
-
-void* ProfilerNode::operator new(size_t size, ThreadProfiler* tp) throw() {
-  void* result = (void*) tp->area_top;
-  tp->area_top += size;
-
-  if (tp->area_top > tp->area_limit) {
-    fatal("flat profiler buffer overflow");
-  }
-  return result;
-}
-
-void ProfilerNode::operator delete(void* p){
-}
-
-class interpretedNode : public ProfilerNode {
- private:
-   Method* _method;
-   oop       _class_loader;  // needed to keep metadata for the method alive
- public:
-   interpretedNode(Method* method, TickPosition where) : ProfilerNode() {
-     _method = method;
-     _class_loader = method->method_holder()->class_loader();
-     update(where);
-   }
-
-   bool is_interpreted() const { return true; }
-
-   bool interpreted_match(Method* m) const {
-      return _method == m;
-   }
-
-   void oops_do(OopClosure* f) {
-     f->do_oop(&_class_loader);
-   }
-
-   Method* method() { return _method; }
-
-   static void print_title(outputStream* st) {
-     st->fill_to(col1);
-     st->print("%11s", "Interpreted");
-     ProfilerNode::print_title(st);
-   }
-
-   void print(outputStream* st, int total_ticks) {
-     ProfilerNode::print(st, total_ticks);
-   }
-
-   void print_method_on(outputStream* st) {
-     ProfilerNode::print_method_on(st);
-     MethodCounters* mcs = method()->method_counters();
-     if (Verbose && mcs != NULL) mcs->invocation_counter()->print_short();
-   }
-};
-
-class compiledNode : public ProfilerNode {
- private:
-   Method* _method;
-   oop       _class_loader;  // needed to keep metadata for the method alive
- public:
-   compiledNode(Method* method, TickPosition where) : ProfilerNode() {
-     _method = method;
-     _class_loader = method->method_holder()->class_loader();
-     update(where);
-  }
-  bool is_compiled()    const { return true; }
-
-  bool compiled_match(Method* m) const {
-    return _method == m;
-  }
-
-  Method* method()         { return _method; }
-
-  void oops_do(OopClosure* f) {
-    f->do_oop(&_class_loader);
-  }
-
-  static void print_title(outputStream* st) {
-    st->fill_to(col1);
-    st->print("%11s", "Compiled");
-    ProfilerNode::print_title(st);
-  }
-
-  void print(outputStream* st, int total_ticks) {
-    ProfilerNode::print(st, total_ticks);
-  }
-
-  void print_method_on(outputStream* st) {
-    ProfilerNode::print_method_on(st);
-  }
-};
-
-class stubNode : public ProfilerNode {
- private:
-  Method* _method;
-  oop       _class_loader;  // needed to keep metadata for the method alive
-  const char* _symbol;   // The name of the nearest VM symbol (for +ProfileVM). Points to a unique string
- public:
-   stubNode(Method* method, const char* name, TickPosition where) : ProfilerNode() {
-     _method = method;
-     _class_loader = method->method_holder()->class_loader();
-     _symbol = name;
-     update(where);
-   }
-
-   bool is_stub() const { return true; }
-
-   void oops_do(OopClosure* f) {
-     f->do_oop(&_class_loader);
-   }
-
-   bool stub_match(Method* m, const char* name) const {
-     return (_method == m) && (_symbol == name);
-   }
-
-   Method* method() { return _method; }
-
-   static void print_title(outputStream* st) {
-     st->fill_to(col1);
-     st->print("%11s", "Stub");
-     ProfilerNode::print_title(st);
-   }
-
-   void print(outputStream* st, int total_ticks) {
-     ProfilerNode::print(st, total_ticks);
-   }
-
-   void print_method_on(outputStream* st) {
-     ProfilerNode::print_method_on(st);
-     print_symbol_on(st);
-   }
-
-  void print_symbol_on(outputStream* st) {
-    if(_symbol) {
-      st->print("  (%s)", _symbol);
-    }
-  }
-};
-
-class adapterNode : public ProfilerNode {
- public:
-   adapterNode(TickPosition where) : ProfilerNode() {
-     update(where);
-  }
-  bool is_compiled()    const { return true; }
-
-  bool adapter_match() const { return true; }
-
-  Method* method()         { return NULL; }
-
-  void oops_do(OopClosure* f) {
-    ;
-  }
-
-  void print(outputStream* st, int total_ticks) {
-    ProfilerNode::print(st, total_ticks);
-  }
-
-  void print_method_on(outputStream* st) {
-    st->print("%s", "adapters");
-  }
-};
-
-class runtimeStubNode : public ProfilerNode {
- private:
-  const RuntimeStub* _stub;
-  const char* _symbol;     // The name of the nearest VM symbol when ProfileVM is on. Points to a unique string.
- public:
-   runtimeStubNode(const CodeBlob* stub, const char* name, TickPosition where) : ProfilerNode(), _stub(NULL),  _symbol(name) {
-     assert(stub->is_runtime_stub(), "wrong code blob");
-     _stub = (RuntimeStub*) stub;
-     update(where);
-   }
-
-  bool is_runtime_stub() const { return true; }
-
-  bool runtimeStub_match(const CodeBlob* stub, const char* name) const {
-    assert(stub->is_runtime_stub(), "wrong code blob");
-    return _stub->entry_point() == ((RuntimeStub*)stub)->entry_point() &&
-            (_symbol == name);
-  }
-
-  Method* method() { return NULL; }
-
-  static void print_title(outputStream* st) {
-    st->fill_to(col1);
-    st->print("%11s", "Runtime stub");
-    ProfilerNode::print_title(st);
-  }
-
-  void oops_do(OopClosure* f) {
-    ;
-  }
-
-  void print(outputStream* st, int total_ticks) {
-    ProfilerNode::print(st, total_ticks);
-  }
-
-  void print_method_on(outputStream* st) {
-    st->print("%s", _stub->name());
-    print_symbol_on(st);
-  }
-
-  void print_symbol_on(outputStream* st) {
-    if(_symbol) {
-      st->print("  (%s)", _symbol);
-    }
-  }
-};
-
-
-class unknown_compiledNode : public ProfilerNode {
- const char *_name;
- public:
-   unknown_compiledNode(const CodeBlob* cb, TickPosition where) : ProfilerNode() {
-     if ( cb->is_buffer_blob() )
-       _name = ((const BufferBlob*)cb)->name();
-     else
-       _name = ((const SingletonBlob*)cb)->name();
-     update(where);
-  }
-  bool is_compiled()    const { return true; }
-
-  bool unknown_compiled_match(const CodeBlob* cb) const {
-     if ( cb->is_buffer_blob() )
-       return !strcmp(((const BufferBlob*)cb)->name(), _name);
-     else
-       return !strcmp(((const SingletonBlob*)cb)->name(), _name);
-  }
-
-  Method* method()         { return NULL; }
-
-  void oops_do(OopClosure* f) {
-    ;
-  }
-
-  void print(outputStream* st, int total_ticks) {
-    ProfilerNode::print(st, total_ticks);
-  }
-
-  void print_method_on(outputStream* st) {
-    st->print("%s", _name);
-  }
-};
-
-class vmNode : public ProfilerNode {
- private:
-  const char* _name; // "optional" name obtained by os means such as dll lookup
- public:
-  vmNode(const TickPosition where) : ProfilerNode() {
-    _name = NULL;
-    update(where);
-  }
-
-  vmNode(const char* name, const TickPosition where) : ProfilerNode() {
-    _name = os::strdup(name);
-    update(where);
-  }
-
-  ~vmNode() {
-    if (_name != NULL) {
-      os::free((void*)_name);
-    }
-  }
-
-  const char *name()    const { return _name; }
-  bool is_compiled()    const { return true; }
-
-  bool vm_match(const char* name) const { return strcmp(name, _name) == 0; }
-
-  Method* method()          { return NULL; }
-
-  static int hash(const char* name){
-    // Compute a simple hash
-    const char* cp = name;
-    int h = 0;
-
-    if(name != NULL){
-      while(*cp != '\0'){
-        h = (h << 1) ^ *cp;
-        cp++;
-      }
-    }
-    return h;
-  }
-
-  void oops_do(OopClosure* f) {
-    ;
-  }
-
-  void print(outputStream* st, int total_ticks) {
-    ProfilerNode::print(st, total_ticks);
-  }
-
-  void print_method_on(outputStream* st) {
-    if(_name==NULL){
-      st->print("%s", "unknown code");
-    }
-    else {
-      st->print("%s", _name);
-    }
-  }
-};
-
-void ThreadProfiler::interpreted_update(Method* method, TickPosition where) {
-  int index = entry(ProfilerNode::hash(method));
-  if (!table[index]) {
-    table[index] = new (this) interpretedNode(method, where);
-  } else {
-    ProfilerNode* prev = table[index];
-    for(ProfilerNode* node = prev; node; node = node->next()) {
-      if (node->interpreted_match(method)) {
-        node->update(where);
-        return;
-      }
-      prev = node;
-    }
-    prev->set_next(new (this) interpretedNode(method, where));
-  }
-}
-
-void ThreadProfiler::compiled_update(Method* method, TickPosition where) {
-  int index = entry(ProfilerNode::hash(method));
-  if (!table[index]) {
-    table[index] = new (this) compiledNode(method, where);
-  } else {
-    ProfilerNode* prev = table[index];
-    for(ProfilerNode* node = prev; node; node = node->next()) {
-      if (node->compiled_match(method)) {
-        node->update(where);
-        return;
-      }
-      prev = node;
-    }
-    prev->set_next(new (this) compiledNode(method, where));
-  }
-}
-
-void ThreadProfiler::stub_update(Method* method, const char* name, TickPosition where) {
-  int index = entry(ProfilerNode::hash(method));
-  if (!table[index]) {
-    table[index] = new (this) stubNode(method, name, where);
-  } else {
-    ProfilerNode* prev = table[index];
-    for(ProfilerNode* node = prev; node; node = node->next()) {
-      if (node->stub_match(method, name)) {
-        node->update(where);
-        return;
-      }
-      prev = node;
-    }
-    prev->set_next(new (this) stubNode(method, name, where));
-  }
-}
-
-void ThreadProfiler::adapter_update(TickPosition where) {
-  int index = 0;
-  if (!table[index]) {
-    table[index] = new (this) adapterNode(where);
-  } else {
-    ProfilerNode* prev = table[index];
-    for(ProfilerNode* node = prev; node; node = node->next()) {
-      if (node->adapter_match()) {
-        node->update(where);
-        return;
-      }
-      prev = node;
-    }
-    prev->set_next(new (this) adapterNode(where));
-  }
-}
-
-void ThreadProfiler::runtime_stub_update(const CodeBlob* stub, const char* name, TickPosition where) {
-  int index = 0;
-  if (!table[index]) {
-    table[index] = new (this) runtimeStubNode(stub, name, where);
-  } else {
-    ProfilerNode* prev = table[index];
-    for(ProfilerNode* node = prev; node; node = node->next()) {
-      if (node->runtimeStub_match(stub, name)) {
-        node->update(where);
-        return;
-      }
-      prev = node;
-    }
-    prev->set_next(new (this) runtimeStubNode(stub, name, where));
-  }
-}
-
-
-void ThreadProfiler::unknown_compiled_update(const CodeBlob* cb, TickPosition where) {
-  int index = 0;
-  if (!table[index]) {
-    table[index] = new (this) unknown_compiledNode(cb, where);
-  } else {
-    ProfilerNode* prev = table[index];
-    for(ProfilerNode* node = prev; node; node = node->next()) {
-      if (node->unknown_compiled_match(cb)) {
-        node->update(where);
-        return;
-      }
-      prev = node;
-    }
-    prev->set_next(new (this) unknown_compiledNode(cb, where));
-  }
-}
-
-void ThreadProfiler::vm_update(TickPosition where) {
-  vm_update(NULL, where);
-}
-
-void ThreadProfiler::vm_update(const char* name, TickPosition where) {
-  int index = entry(vmNode::hash(name));
-  assert(index >= 0, "Must be positive");
-  // Note that we call strdup below since the symbol may be resource allocated
-  if (!table[index]) {
-    table[index] = new (this) vmNode(name, where);
-  } else {
-    ProfilerNode* prev = table[index];
-    for(ProfilerNode* node = prev; node; node = node->next()) {
-      if (((vmNode *)node)->vm_match(name)) {
-        node->update(where);
-        return;
-      }
-      prev = node;
-    }
-    prev->set_next(new (this) vmNode(name, where));
-  }
-}
-
-
-class FlatProfilerTask : public PeriodicTask {
-public:
-  FlatProfilerTask(int interval_time) : PeriodicTask(interval_time) {}
-  void task();
-};
-
-void FlatProfiler::record_vm_operation() {
-  if (Universe::heap()->is_gc_active()) {
-    FlatProfiler::received_gc_ticks += 1;
-    return;
-  }
-
-  if (DeoptimizationMarker::is_active()) {
-    FlatProfiler::deopt_ticks += 1;
-    return;
-  }
-
-  FlatProfiler::vm_operation_ticks += 1;
-}
-
-void FlatProfiler::record_vm_tick() {
-  // Profile the VM Thread itself if needed
-  // This is done without getting the Threads_lock and we can go deep
-  // inside Safepoint, etc.
-  if( ProfileVM  ) {
-    ResourceMark rm;
-    ExtendedPC epc;
-    const char *name = NULL;
-    char buf[256];
-    buf[0] = '\0';
-
-    vm_thread_profiler->inc_thread_ticks();
-
-    // Get a snapshot of a current VMThread pc (and leave it running!)
-    // The call may fail in some circumstances
-    epc = os::get_thread_pc(VMThread::vm_thread());
-    if(epc.pc() != NULL) {
-      if (os::dll_address_to_function_name(epc.pc(), buf, sizeof(buf), NULL)) {
-         name = buf;
-      }
-    }
-    if (name != NULL) {
-      vm_thread_profiler->vm_update(name, tp_native);
-    }
-  }
-}
-
-void FlatProfiler::record_thread_ticks() {
-
-  int maxthreads, suspendedthreadcount;
-  JavaThread** threadsList;
-  bool interval_expired = false;
-
-  if (ProfileIntervals &&
-      (FlatProfiler::received_ticks >= interval_ticks_previous + ProfileIntervalsTicks)) {
-    interval_expired = true;
-    interval_ticks_previous = FlatProfiler::received_ticks;
-  }
-
-  // Try not to wait for the Threads_lock
-  if (Threads_lock->try_lock()) {
-    {  // Threads_lock scope
-      maxthreads = Threads::number_of_threads();
-      threadsList = NEW_C_HEAP_ARRAY(JavaThread *, maxthreads, mtInternal);
-      suspendedthreadcount = 0;
-      for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
-        if (tp->is_Compiler_thread()) {
-          // Only record ticks for active compiler threads
-          CompilerThread* cthread = (CompilerThread*)tp;
-          if (cthread->task() != NULL) {
-            // The compiler is active.  If we need to access any of the fields
-            // of the compiler task we should suspend the CompilerThread first.
-            FlatProfiler::compiler_ticks += 1;
-            continue;
-          }
-        }
-
-        // First externally suspend all threads by marking each for
-        // external suspension - so it will stop at its next transition
-        // Then do a safepoint
-        ThreadProfiler* pp = tp->get_thread_profiler();
-        if (pp != NULL && pp->engaged) {
-          MutexLockerEx ml(tp->SR_lock(), Mutex::_no_safepoint_check_flag);
-          if (!tp->is_external_suspend() && !tp->is_exiting()) {
-            tp->set_external_suspend();
-            threadsList[suspendedthreadcount++] = tp;
-          }
-        }
-      }
-      Threads_lock->unlock();
-    }
-    // Suspend each thread. This call should just return
-    // for any threads that have already self-suspended
-    // Net result should be one safepoint
-    for (int j = 0; j < suspendedthreadcount; j++) {
-      JavaThread *tp = threadsList[j];
-      if (tp) {
-        tp->java_suspend();
-      }
-    }
-
-    // We are responsible for resuming any thread on this list
-    for (int i = 0; i < suspendedthreadcount; i++) {
-      JavaThread *tp = threadsList[i];
-      if (tp) {
-        ThreadProfiler* pp = tp->get_thread_profiler();
-        if (pp != NULL && pp->engaged) {
-          HandleMark hm;
-          FlatProfiler::delivered_ticks += 1;
-          if (interval_expired) {
-          FlatProfiler::interval_record_thread(pp);
-          }
-          // This is the place where we check to see if a user thread is
-          // blocked waiting for compilation.
-          if (tp->blocked_on_compilation()) {
-            pp->compiler_ticks += 1;
-            pp->interval_data_ref()->inc_compiling();
-          } else {
-            pp->record_tick(tp);
-          }
-        }
-        MutexLocker ml(Threads_lock);
-        tp->java_resume();
-      }
-    }
-    if (interval_expired) {
-      FlatProfiler::interval_print();
-      FlatProfiler::interval_reset();
-    }
-
-    FREE_C_HEAP_ARRAY(JavaThread *, threadsList);
-  } else {
-    // Couldn't get the threads lock, just record that rather than blocking
-    FlatProfiler::threads_lock_ticks += 1;
-  }
-
-}
-
-void FlatProfilerTask::task() {
-  FlatProfiler::received_ticks += 1;
-
-  if (ProfileVM) {
-    FlatProfiler::record_vm_tick();
-  }
-
-  VM_Operation* op = VMThread::vm_operation();
-  if (op != NULL) {
-    FlatProfiler::record_vm_operation();
-    if (SafepointSynchronize::is_at_safepoint()) {
-      return;
-    }
-  }
-  FlatProfiler::record_thread_ticks();
-}
-
-void ThreadProfiler::record_interpreted_tick(JavaThread* thread, frame fr, TickPosition where, int* ticks) {
-  FlatProfiler::all_int_ticks++;
-  if (!FlatProfiler::full_profile()) {
-    return;
-  }
-
-  if (!fr.is_interpreted_frame_valid(thread)) {
-    // tick came at a bad time
-    interpreter_ticks += 1;
-    FlatProfiler::interpreter_ticks += 1;
-    return;
-  }
-
-  // The frame has been fully validated so we can trust the method and bci
-
-  Method* method = *fr.interpreter_frame_method_addr();
-
-  interpreted_update(method, where);
-
-  // update byte code table
-  InterpreterCodelet* desc = Interpreter::codelet_containing(fr.pc());
-  if (desc != NULL && desc->bytecode() >= 0) {
-    ticks[desc->bytecode()]++;
-  }
-}
-
-void ThreadProfiler::record_compiled_tick(JavaThread* thread, frame fr, TickPosition where) {
-  const char *name = NULL;
-  TickPosition localwhere = where;
-
-  FlatProfiler::all_comp_ticks++;
-  if (!FlatProfiler::full_profile()) return;
-
-  CodeBlob* cb = fr.cb();
-
-  // For runtime stubs, record as native rather than as compiled
-  if (cb->is_runtime_stub()) {
-    RegisterMap map(thread, false);
-    fr = fr.sender(&map);
-    cb = fr.cb();
-    localwhere = tp_native;
- }
-
-  Method* method = cb->is_compiled() ? cb->as_compiled_method()->method() : (Method*) NULL;
-  if (method == NULL) {
-    if (cb->is_runtime_stub())
-      runtime_stub_update(cb, name, localwhere);
-    else
-      unknown_compiled_update(cb, localwhere);
-  }
-  else {
-    if (method->is_native()) {
-      stub_update(method, name, localwhere);
-    } else {
-      compiled_update(method, localwhere);
-    }
-  }
-}
-
-extern "C" void find(int x);
-
-
-void ThreadProfiler::record_tick_for_running_frame(JavaThread* thread, frame fr) {
-  // The tick happened in real code -> non VM code
-  if (fr.is_interpreted_frame()) {
-    interval_data_ref()->inc_interpreted();
-    record_interpreted_tick(thread, fr, tp_code, FlatProfiler::bytecode_ticks);
-    return;
-  }
-
-  if (CodeCache::contains(fr.pc())) {
-    interval_data_ref()->inc_compiled();
-    PCRecorder::record(fr.pc());
-    record_compiled_tick(thread, fr, tp_code);
-    return;
-  }
-
-  if (VtableStubs::stub_containing(fr.pc()) != NULL) {
-    unknown_ticks_array[ut_vtable_stubs] += 1;
-    return;
-  }
-
-  frame caller = fr.profile_find_Java_sender_frame(thread);
-
-  if (caller.sp() != NULL && caller.pc() != NULL) {
-    record_tick_for_calling_frame(thread, caller);
-    return;
-  }
-
-  unknown_ticks_array[ut_running_frame] += 1;
-  FlatProfiler::unknown_ticks += 1;
-}
-
-void ThreadProfiler::record_tick_for_calling_frame(JavaThread* thread, frame fr) {
-  // The tick happened in VM code
-  interval_data_ref()->inc_native();
-  if (fr.is_interpreted_frame()) {
-    record_interpreted_tick(thread, fr, tp_native, FlatProfiler::bytecode_ticks_stub);
-    return;
-  }
-  if (CodeCache::contains(fr.pc())) {
-    record_compiled_tick(thread, fr, tp_native);
-    return;
-  }
-
-  frame caller = fr.profile_find_Java_sender_frame(thread);
-
-  if (caller.sp() != NULL && caller.pc() != NULL) {
-    record_tick_for_calling_frame(thread, caller);
-    return;
-  }
-
-  unknown_ticks_array[ut_calling_frame] += 1;
-  FlatProfiler::unknown_ticks += 1;
-}
-
-void ThreadProfiler::record_tick(JavaThread* thread) {
-  FlatProfiler::all_ticks++;
-  thread_ticks += 1;
-
-  // Here's another way to track global state changes.
-  // When the class loader starts it marks the ThreadProfiler to tell it it is in the class loader
-  // and we check that here.
-  // This is more direct, and more than one thread can be in the class loader at a time,
-  // but it does mean the class loader has to know about the profiler.
-  if (region_flag[ThreadProfilerMark::classLoaderRegion]) {
-    class_loader_ticks += 1;
-    FlatProfiler::class_loader_ticks += 1;
-    return;
-  } else if (region_flag[ThreadProfilerMark::extraRegion]) {
-    extra_ticks += 1;
-    FlatProfiler::extra_ticks += 1;
-    return;
-  }
-  // Note that the WatcherThread can now stop for safepoints
-  uint32_t debug_bits = 0;
-  if (!thread->wait_for_ext_suspend_completion(SuspendRetryCount,
-      SuspendRetryDelay, &debug_bits)) {
-    unknown_ticks_array[ut_unknown_thread_state] += 1;
-    FlatProfiler::unknown_ticks += 1;
-    return;
-  }
-
-  frame fr;
-
-  switch (thread->thread_state()) {
-  case _thread_in_native:
-  case _thread_in_native_trans:
-  case _thread_in_vm:
-  case _thread_in_vm_trans:
-    if (thread->profile_last_Java_frame(&fr)) {
-      if (fr.is_runtime_frame()) {
-        RegisterMap map(thread, false);
-        fr = fr.sender(&map);
-      }
-      record_tick_for_calling_frame(thread, fr);
-    } else {
-      unknown_ticks_array[ut_no_last_Java_frame] += 1;
-      FlatProfiler::unknown_ticks += 1;
-    }
-    break;
-  // handle_special_runtime_exit_condition self-suspends threads in Java
-  case _thread_in_Java:
-  case _thread_in_Java_trans:
-    if (thread->profile_last_Java_frame(&fr)) {
-      if (fr.is_safepoint_blob_frame()) {
-        RegisterMap map(thread, false);
-        fr = fr.sender(&map);
-      }
-      record_tick_for_running_frame(thread, fr);
-    } else {
-      unknown_ticks_array[ut_no_last_Java_frame] += 1;
-      FlatProfiler::unknown_ticks += 1;
-    }
-    break;
-  case _thread_blocked:
-  case _thread_blocked_trans:
-    if (thread->osthread() && thread->osthread()->get_state() == RUNNABLE) {
-        if (thread->profile_last_Java_frame(&fr)) {
-          if (fr.is_safepoint_blob_frame()) {
-            RegisterMap map(thread, false);
-            fr = fr.sender(&map);
-            record_tick_for_running_frame(thread, fr);
-          } else {
-            record_tick_for_calling_frame(thread, fr);
-          }
-        } else {
-          unknown_ticks_array[ut_no_last_Java_frame] += 1;
-          FlatProfiler::unknown_ticks += 1;
-        }
-    } else {
-          blocked_ticks += 1;
-          FlatProfiler::blocked_ticks += 1;
-    }
-    break;
-  case _thread_uninitialized:
-  case _thread_new:
-  // not used, included for completeness
-  case _thread_new_trans:
-     unknown_ticks_array[ut_no_last_Java_frame] += 1;
-     FlatProfiler::unknown_ticks += 1;
-     break;
-  default:
-    unknown_ticks_array[ut_unknown_thread_state] += 1;
-    FlatProfiler::unknown_ticks += 1;
-    break;
-  }
-  return;
-}
-
-void ThreadProfiler::engage() {
-  engaged = true;
-  timer.start();
-}
-
-void ThreadProfiler::disengage() {
-  engaged = false;
-  timer.stop();
-}
-
-void ThreadProfiler::initialize() {
-  for (int index = 0; index < table_size; index++) {
-    table[index] = NULL;
-  }
-  thread_ticks = 0;
-  blocked_ticks = 0;
-  compiler_ticks = 0;
-  interpreter_ticks = 0;
-  for (int ut = 0; ut < ut_end; ut += 1) {
-    unknown_ticks_array[ut] = 0;
-  }
-  region_flag[ThreadProfilerMark::classLoaderRegion] = false;
-  class_loader_ticks = 0;
-  region_flag[ThreadProfilerMark::extraRegion] = false;
-  extra_ticks = 0;
-  timer.start();
-  interval_data_ref()->reset();
-}
-
-void ThreadProfiler::reset() {
-  timer.stop();
-  if (table != NULL) {
-    for (int index = 0; index < table_size; index++) {
-      ProfilerNode* n = table[index];
-      if (n != NULL) {
-        delete n;
-      }
-    }
-  }
-  initialize();
-}
-
-void FlatProfiler::allocate_table() {
-  { // Bytecode table
-    bytecode_ticks      = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal);
-    bytecode_ticks_stub = NEW_C_HEAP_ARRAY(int, Bytecodes::number_of_codes, mtInternal);
-    for(int index = 0; index < Bytecodes::number_of_codes; index++) {
-      bytecode_ticks[index]      = 0;
-      bytecode_ticks_stub[index] = 0;
-    }
-  }
-
-  if (ProfilerRecordPC) PCRecorder::init();
-
-  interval_data         = NEW_C_HEAP_ARRAY(IntervalData, interval_print_size, mtInternal);
-  FlatProfiler::interval_reset();
-}
-
-void FlatProfiler::engage(JavaThread* mainThread, bool fullProfile) {
-  full_profile_flag = fullProfile;
-  if (bytecode_ticks == NULL) {
-    allocate_table();
-  }
-  if(ProfileVM && (vm_thread_profiler == NULL)){
-    vm_thread_profiler = new ThreadProfiler();
-  }
-  if (task == NULL) {
-    task = new FlatProfilerTask(WatcherThread::delay_interval);
-    task->enroll();
-  }
-  timer.start();
-  if (mainThread != NULL) {
-    // When mainThread was created, it might not have a ThreadProfiler
-    ThreadProfiler* pp = mainThread->get_thread_profiler();
-    if (pp == NULL) {
-      mainThread->set_thread_profiler(new ThreadProfiler());
-    } else {
-      pp->reset();
-    }
-    mainThread->get_thread_profiler()->engage();
-  }
-  // This is where we would assign thread_profiler
-  // if we wanted only one thread_profiler for all threads.
-  thread_profiler = NULL;
-}
-
-void FlatProfiler::disengage() {
-  if (!task) {
-    return;
-  }
-  timer.stop();
-  task->disenroll();
-  delete task;
-  task = NULL;
-  if (thread_profiler != NULL) {
-    thread_profiler->disengage();
-  } else {
-    MutexLocker tl(Threads_lock);
-    for (JavaThread* tp = Threads::first(); tp != NULL; tp = tp->next()) {
-      ThreadProfiler* pp = tp->get_thread_profiler();
-      if (pp != NULL) {
-        pp->disengage();
-      }
-    }
-  }
-}
-
-void FlatProfiler::reset() {
-  if (task) {
-    disengage();
-  }
-
-  class_loader_ticks = 0;
-  extra_ticks        = 0;
-  received_gc_ticks  = 0;
-  vm_operation_ticks = 0;
-  compiler_ticks     = 0;
-  deopt_ticks        = 0;
-  interpreter_ticks  = 0;
-  blocked_ticks      = 0;
-  unknown_ticks      = 0;
-  received_ticks     = 0;
-  delivered_ticks    = 0;
-  timer.stop();
-}
-
-bool FlatProfiler::is_active() {
-  return task != NULL;
-}
-
-void FlatProfiler::print_byte_code_statistics() {
-  GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
-
-  tty->print_cr(" Bytecode ticks:");
-  for (int index = 0; index < Bytecodes::number_of_codes; index++) {
-    if (FlatProfiler::bytecode_ticks[index] > 0 || FlatProfiler::bytecode_ticks_stub[index] > 0) {
-      tty->print_cr("  %4d %4d = %s",
-        FlatProfiler::bytecode_ticks[index],
-        FlatProfiler::bytecode_ticks_stub[index],
-        Bytecodes::name( (Bytecodes::Code) index));
-    }
-  }
-  tty->cr();
-}
-
-void print_ticks(const char* title, int ticks, int total) {
-  if (ticks > 0) {
-    tty->print("%5.1f%% %5d", ticks * 100.0 / total, ticks);
-    tty->fill_to(col3);
-    tty->print("%s", title);
-    tty->cr();
-  }
-}
-
-void ThreadProfiler::print(const char* thread_name) {
-  ResourceMark rm;
-  MutexLocker ppl(ProfilePrint_lock);
-  int index = 0; // Declared outside for loops for portability
-
-  if (table == NULL) {
-    return;
-  }
-
-  if (thread_ticks <= 0) {
-    return;
-  }
-
-  const char* title = "too soon to tell";
-  double secs = timer.seconds();
-
-  GrowableArray <ProfilerNode*>* array = new GrowableArray<ProfilerNode*>(200);
-  for(index = 0; index < table_size; index++) {
-    for(ProfilerNode* node = table[index]; node; node = node->next())
-      array->append(node);
-  }
-
-  array->sort(&ProfilerNode::compare);
-
-  // compute total (sanity check)
-  int active =
-    class_loader_ticks +
-    compiler_ticks +
-    interpreter_ticks +
-    unknown_ticks();
-  for (index = 0; index < array->length(); index++) {
-    active += array->at(index)->ticks.total();
-  }
-  int total = active + blocked_ticks;
-
-  tty->cr();
-  tty->print_cr("Flat profile of %3.2f secs (%d total ticks): %s", secs, total, thread_name);
-  if (total != thread_ticks) {
-    print_ticks("Lost ticks", thread_ticks-total, thread_ticks);
-  }
-  tty->cr();
-
-  // print interpreted methods
-  tick_counter interpreted_ticks;
-  bool has_interpreted_ticks = false;
-  int print_count = 0;
-  for (index = 0; index < array->length(); index++) {
-    ProfilerNode* n = array->at(index);
-    if (n->is_interpreted()) {
-      interpreted_ticks.add(&n->ticks);
-      if (!has_interpreted_ticks) {
-        interpretedNode::print_title(tty);
-        has_interpreted_ticks = true;
-      }
-      if (print_count++ < ProfilerNumberOfInterpretedMethods) {
-        n->print(tty, active);
-      }
-    }
-  }
-  if (has_interpreted_ticks) {
-    if (print_count <= ProfilerNumberOfInterpretedMethods) {
-      title = "Total interpreted";
-    } else {
-      title = "Total interpreted (including elided)";
-    }
-    interpretedNode::print_total(tty, &interpreted_ticks, active, title);
-    tty->cr();
-  }
-
-  // print compiled methods
-  tick_counter compiled_ticks;
-  bool has_compiled_ticks = false;
-  print_count = 0;
-  for (index = 0; index < array->length(); index++) {
-    ProfilerNode* n = array->at(index);
-    if (n->is_compiled()) {
-      compiled_ticks.add(&n->ticks);
-      if (!has_compiled_ticks) {
-        compiledNode::print_title(tty);
-        has_compiled_ticks = true;
-      }
-      if (print_count++ < ProfilerNumberOfCompiledMethods) {
-        n->print(tty, active);
-      }
-    }
-  }
-  if (has_compiled_ticks) {
-    if (print_count <= ProfilerNumberOfCompiledMethods) {
-      title = "Total compiled";
-    } else {
-      title = "Total compiled (including elided)";
-    }
-    compiledNode::print_total(tty, &compiled_ticks, active, title);
-    tty->cr();
-  }
-
-  // print stub methods
-  tick_counter stub_ticks;
-  bool has_stub_ticks = false;
-  print_count = 0;
-  for (index = 0; index < array->length(); index++) {
-    ProfilerNode* n = array->at(index);
-    if (n->is_stub()) {
-      stub_ticks.add(&n->ticks);
-      if (!has_stub_ticks) {
-        stubNode::print_title(tty);
-        has_stub_ticks = true;
-      }
-      if (print_count++ < ProfilerNumberOfStubMethods) {
-        n->print(tty, active);
-      }
-    }
-  }
-  if (has_stub_ticks) {
-    if (print_count <= ProfilerNumberOfStubMethods) {
-      title = "Total stub";
-    } else {
-      title = "Total stub (including elided)";
-    }
-    stubNode::print_total(tty, &stub_ticks, active, title);
-    tty->cr();
-  }
-
-  // print runtime stubs
-  tick_counter runtime_stub_ticks;
-  bool has_runtime_stub_ticks = false;
-  print_count = 0;
-  for (index = 0; index < array->length(); index++) {
-    ProfilerNode* n = array->at(index);
-    if (n->is_runtime_stub()) {
-      runtime_stub_ticks.add(&n->ticks);
-      if (!has_runtime_stub_ticks) {
-        runtimeStubNode::print_title(tty);
-        has_runtime_stub_ticks = true;
-      }
-      if (print_count++ < ProfilerNumberOfRuntimeStubNodes) {
-        n->print(tty, active);
-      }
-    }
-  }
-  if (has_runtime_stub_ticks) {
-    if (print_count <= ProfilerNumberOfRuntimeStubNodes) {
-      title = "Total runtime stubs";
-    } else {
-      title = "Total runtime stubs (including elide