changeset 55148:c6b18dd94973

Merge
author psadhukhan
date Tue, 26 Feb 2019 11:17:12 +0530
parents a986e16d8449 616a32d6b463
children 7f715085caac 17da5f618aaf
files src/hotspot/share/gc/z/zStatTLAB.cpp src/hotspot/share/gc/z/zStatTLAB.hpp src/hotspot/share/oops/array.inline.hpp src/jdk.javadoc/share/classes/com/sun/javadoc/AnnotatedType.java src/jdk.javadoc/share/classes/com/sun/javadoc/AnnotationDesc.java src/jdk.javadoc/share/classes/com/sun/javadoc/AnnotationTypeDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/AnnotationTypeElementDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/AnnotationValue.java src/jdk.javadoc/share/classes/com/sun/javadoc/ClassDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/ConstructorDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/Doc.java src/jdk.javadoc/share/classes/com/sun/javadoc/DocErrorReporter.java src/jdk.javadoc/share/classes/com/sun/javadoc/Doclet.java src/jdk.javadoc/share/classes/com/sun/javadoc/ExecutableMemberDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/FieldDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/LanguageVersion.java src/jdk.javadoc/share/classes/com/sun/javadoc/MemberDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/MethodDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/PackageDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/ParamTag.java src/jdk.javadoc/share/classes/com/sun/javadoc/Parameter.java src/jdk.javadoc/share/classes/com/sun/javadoc/ParameterizedType.java src/jdk.javadoc/share/classes/com/sun/javadoc/ProgramElementDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/RootDoc.java src/jdk.javadoc/share/classes/com/sun/javadoc/SeeTag.java src/jdk.javadoc/share/classes/com/sun/javadoc/SerialFieldTag.java src/jdk.javadoc/share/classes/com/sun/javadoc/SourcePosition.java src/jdk.javadoc/share/classes/com/sun/javadoc/Tag.java src/jdk.javadoc/share/classes/com/sun/javadoc/ThrowsTag.java src/jdk.javadoc/share/classes/com/sun/javadoc/Type.java src/jdk.javadoc/share/classes/com/sun/javadoc/TypeVariable.java src/jdk.javadoc/share/classes/com/sun/javadoc/WildcardType.java src/jdk.javadoc/share/classes/com/sun/javadoc/package-info.java src/jdk.javadoc/share/classes/com/sun/tools/doclets/standard/Standard.java src/jdk.javadoc/share/classes/com/sun/tools/doclets/standard/package-info.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/Main.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/AbstractTypeImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/AnnotatedTypeImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/AnnotationDescImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/AnnotationTypeDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/AnnotationTypeElementDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/AnnotationValueImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/ClassDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/Comment.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/ConstructorDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/DocEnv.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/DocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/DocLocale.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/DocletInvoker.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/ExecutableMemberDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/FieldDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/JavaScriptScanner.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/JavadocClassFinder.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/JavadocEnter.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/JavadocMemberEnter.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/JavadocTodo.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/JavadocTool.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/MemberDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/Messager.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/MethodDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/ModifierFilter.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/PackageDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/ParamTagImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/ParameterImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/ParameterizedTypeImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/PrimitiveType.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/ProgramElementDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/RootDocImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/SeeTagImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/SerialFieldTagImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/SerializedForm.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/SourcePositionImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/Start.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/TagImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/ThrowsTagImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/ToolOption.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/TypeMaker.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/TypeVariableImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/main/WildcardTypeImpl.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/package-info.java src/jdk.javadoc/share/classes/com/sun/tools/javadoc/resources/javadoc.properties src/jdk.javadoc/share/classes/com/sun/tools/javadoc/resources/javadoc_ja.properties src/jdk.javadoc/share/classes/com/sun/tools/javadoc/resources/javadoc_zh_CN.properties src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/markup/HtmlConstants.java test/langtools/jdk/javadoc/tool/api/basic/GetTask_DocletClassTest.java test/langtools/tools/javadoc/6176978/T6176978.java test/langtools/tools/javadoc/6176978/X.java test/langtools/tools/javadoc/6227454/Test.java test/langtools/tools/javadoc/6942366/T6942366.java test/langtools/tools/javadoc/6942366/Test.java test/langtools/tools/javadoc/6942366/p/Base.java test/langtools/tools/javadoc/6958836/Test.java test/langtools/tools/javadoc/6964914/Error.java test/langtools/tools/javadoc/6964914/JavacWarning.java test/langtools/tools/javadoc/6964914/JavadocWarning.java test/langtools/tools/javadoc/6964914/Test.java test/langtools/tools/javadoc/6964914/TestStdDoclet.java test/langtools/tools/javadoc/6964914/TestUserDoclet.java test/langtools/tools/javadoc/8025693/Test.java test/langtools/tools/javadoc/8147801/T8147801.java test/langtools/tools/javadoc/8147801/jarsrc/lib/Lib1.java test/langtools/tools/javadoc/8147801/jarsrc/lib/Lib2.java test/langtools/tools/javadoc/8147801/p/Test.java test/langtools/tools/javadoc/AddOpensTest.java test/langtools/tools/javadoc/BadOptionsTest.java test/langtools/tools/javadoc/BooleanConst.java test/langtools/tools/javadoc/BreakIteratorWarning.java test/langtools/tools/javadoc/CheckResourceKeys.java test/langtools/tools/javadoc/CompletionError.java test/langtools/tools/javadoc/EncodingTest.java test/langtools/tools/javadoc/FlagsTooEarly.java test/langtools/tools/javadoc/InlineTagsWithBraces.java test/langtools/tools/javadoc/LangVers.java test/langtools/tools/javadoc/MaxWarns.java test/langtools/tools/javadoc/MethodLinks.java test/langtools/tools/javadoc/NoStar.java test/langtools/tools/javadoc/ReleaseOption.java test/langtools/tools/javadoc/ReleaseOptionSource.java test/langtools/tools/javadoc/T4994049/FileWithTabs.java test/langtools/tools/javadoc/T4994049/T4994049.java test/langtools/tools/javadoc/T6968833.java test/langtools/tools/javadoc/XWerror.java test/langtools/tools/javadoc/annotations/annotateMethodsFields/Main.java test/langtools/tools/javadoc/annotations/annotateMethodsFields/expected.out test/langtools/tools/javadoc/annotations/annotateMethodsFields/pkg1/A.java test/langtools/tools/javadoc/annotations/annotateMethodsFields/pkg1/B.java test/langtools/tools/javadoc/annotations/annotateMethodsFields/pkg1/E.java test/langtools/tools/javadoc/annotations/annotatePackage/Main.java test/langtools/tools/javadoc/annotations/annotatePackage/expected.out test/langtools/tools/javadoc/annotations/annotatePackage/pkg1/A.java test/langtools/tools/javadoc/annotations/annotatePackage/pkg1/package-info.java test/langtools/tools/javadoc/annotations/annotatePackage/pkg1/package.html test/langtools/tools/javadoc/annotations/annotatePackage/pkg2/B.java test/langtools/tools/javadoc/annotations/annotatePackage/pkg2/package.html test/langtools/tools/javadoc/annotations/annotateParams/Main.java test/langtools/tools/javadoc/annotations/annotateParams/expected.out test/langtools/tools/javadoc/annotations/annotateParams/pkg1/A.java test/langtools/tools/javadoc/annotations/annotateParams/pkg1/C.java test/langtools/tools/javadoc/annotations/badVals/Main.java test/langtools/tools/javadoc/annotations/badVals/pkg1/A.java test/langtools/tools/javadoc/annotations/defaults/Main.java test/langtools/tools/javadoc/annotations/defaults/expected.out test/langtools/tools/javadoc/annotations/defaults/pkg1/A.java test/langtools/tools/javadoc/annotations/defaults/pkg1/B.java test/langtools/tools/javadoc/annotations/elementTypes/Main.java test/langtools/tools/javadoc/annotations/elementTypes/expected.out test/langtools/tools/javadoc/annotations/elementTypes/pkg1/A.java test/langtools/tools/javadoc/annotations/elementTypes/pkg1/B.java test/langtools/tools/javadoc/annotations/missing/Main.java test/langtools/tools/javadoc/annotations/missing/somepackage/MissingAnnotationClass.java test/langtools/tools/javadoc/annotations/shortcuts/Main.java test/langtools/tools/javadoc/annotations/shortcuts/expected.out test/langtools/tools/javadoc/annotations/shortcuts/pkg1/A.java test/langtools/tools/javadoc/annotations/shortcuts/pkg1/Array.java test/langtools/tools/javadoc/annotations/shortcuts/pkg1/Marker.java test/langtools/tools/javadoc/annotations/shortcuts/pkg1/Value.java test/langtools/tools/javadoc/api/basic/APITest.java test/langtools/tools/javadoc/api/basic/DocletPathTest.java test/langtools/tools/javadoc/api/basic/DocumentationToolLocationTest.java test/langtools/tools/javadoc/api/basic/GetSourceVersionsTest.java test/langtools/tools/javadoc/api/basic/GetTask_DiagListenerTest.java test/langtools/tools/javadoc/api/basic/GetTask_DocletClassTest.java test/langtools/tools/javadoc/api/basic/GetTask_FileManagerTest.java test/langtools/tools/javadoc/api/basic/GetTask_FileObjectsTest.java test/langtools/tools/javadoc/api/basic/GetTask_OptionsTest.java test/langtools/tools/javadoc/api/basic/GetTask_WriterTest.java test/langtools/tools/javadoc/api/basic/Task_reuseTest.java test/langtools/tools/javadoc/api/basic/pkg/C.java test/langtools/tools/javadoc/api/basic/taglets/UnderlineTaglet.java test/langtools/tools/javadoc/completionFailure/CompletionFailure.java test/langtools/tools/javadoc/completionFailure/pkg/A.java test/langtools/tools/javadoc/completionFailure/pkg/B.java test/langtools/tools/javadoc/dupOk/DupOk.java test/langtools/tools/javadoc/dupOk/sp1/p/A.java test/langtools/tools/javadoc/dupOk/sp2/p/A.java test/langtools/tools/javadoc/dupOk/sp2/p/B.java test/langtools/tools/javadoc/enum/docComments/Main.java test/langtools/tools/javadoc/enum/docComments/pkg1/Operation.java test/langtools/tools/javadoc/enum/enumType/Main.java test/langtools/tools/javadoc/enum/enumType/expected.out test/langtools/tools/javadoc/enum/enumType/pkg1/QuotablePerson.java test/langtools/tools/javadoc/generics/genericClass/Main.java test/langtools/tools/javadoc/generics/genericClass/expected.out test/langtools/tools/javadoc/generics/genericClass/pkg1/A.java test/langtools/tools/javadoc/generics/genericInnerAndOuter/Main.java test/langtools/tools/javadoc/generics/genericInnerAndOuter/expected.out test/langtools/tools/javadoc/generics/genericInnerAndOuter/pkg1/O.java test/langtools/tools/javadoc/generics/genericInnerAndOuter/pkg1/X.java test/langtools/tools/javadoc/generics/genericInterface/Main.java test/langtools/tools/javadoc/generics/genericInterface/expected.out test/langtools/tools/javadoc/generics/genericInterface/pkg1/A.java test/langtools/tools/javadoc/generics/genericMethod/Main.java test/langtools/tools/javadoc/generics/genericMethod/expected.out test/langtools/tools/javadoc/generics/genericMethod/pkg1/A.java test/langtools/tools/javadoc/generics/genericSuper/Main.java test/langtools/tools/javadoc/generics/genericSuper/expected.out test/langtools/tools/javadoc/generics/genericSuper/pkg1/A.java test/langtools/tools/javadoc/generics/supertypes/Main.java test/langtools/tools/javadoc/generics/supertypes/expected.out test/langtools/tools/javadoc/generics/supertypes/pkg1/A.java test/langtools/tools/javadoc/generics/supertypes/pkg1/B.java test/langtools/tools/javadoc/generics/throwsGeneric/Main.java test/langtools/tools/javadoc/generics/throwsGeneric/expected.out test/langtools/tools/javadoc/generics/throwsGeneric/pkg1/A.java test/langtools/tools/javadoc/generics/tparamCycle/Main.java test/langtools/tools/javadoc/generics/tparamCycle/pkg1/LikeEnum.java test/langtools/tools/javadoc/generics/tparamTagOnMethod/Main.java test/langtools/tools/javadoc/generics/tparamTagOnMethod/expected.out test/langtools/tools/javadoc/generics/tparamTagOnMethod/pkg1/A.java test/langtools/tools/javadoc/generics/tparamTagOnType/Main.java test/langtools/tools/javadoc/generics/tparamTagOnType/expected.out test/langtools/tools/javadoc/generics/tparamTagOnType/pkg1/A.java test/langtools/tools/javadoc/generics/wildcards/Main.java test/langtools/tools/javadoc/generics/wildcards/expected.out test/langtools/tools/javadoc/generics/wildcards/pkg1/A.java test/langtools/tools/javadoc/imports/I.java test/langtools/tools/javadoc/imports/MissingImport.java test/langtools/tools/javadoc/lib/OldToolTester.java test/langtools/tools/javadoc/lib/ToyDoclet.java test/langtools/tools/javadoc/nestedClass/NestedClass.java test/langtools/tools/javadoc/nestedClass/NestedClassB.java test/langtools/tools/javadoc/nonConstExprs/Test.java test/langtools/tools/javadoc/outputRedirect/Test.java test/langtools/tools/javadoc/outputRedirect/p/OutputRedirect.java test/langtools/tools/javadoc/parser/7091528/T7091528.java test/langtools/tools/javadoc/parser/7091528/p/C1.java test/langtools/tools/javadoc/parser/7091528/p/C3.java test/langtools/tools/javadoc/parser/7091528/p/q/C2.java test/langtools/tools/javadoc/sourceOnly/Test.java test/langtools/tools/javadoc/sourceOnly/p/NonSource.jasm test/langtools/tools/javadoc/sourceOnly/p/SourceOnly.java test/langtools/tools/javadoc/sourceOption/SourceOption.java test/langtools/tools/javadoc/sourceOption/p/LambdaConstructTest.java test/langtools/tools/javadoc/subpackageIgnore/SubpackageIgnore.java test/langtools/tools/javadoc/subpackageIgnore/pkg1/not-subpkg/SomeJavaFile.java test/langtools/tools/javadoc/varArgs/Main.java test/langtools/tools/javadoc/varArgs/expected.out test/langtools/tools/javadoc/varArgs/pkg1/A.java
diffstat 809 files changed, 7389 insertions(+), 29780 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Feb 26 11:08:07 2019 +0530
+++ b/.hgtags	Tue Feb 26 11:17:12 2019 +0530
@@ -543,3 +543,6 @@
 021917019cda1c0c5853255322274f37693a2431 jdk-13+7
 b5f7bb57de2f797be34f6c75d45c3245ad37ab97 jdk-12+31
 a535ba736cabc6886acdff36de3a096c46e5ddc5 jdk-13+8
+4ce47bc1fb92cf94c6e3d1f49d582f02dcb851ab jdk-12+32
+c081f3ea6b9300265a4a34e38f970b1e3ddaae9f jdk-13+9
+b67884871b5fff79c5ef3eb8ac74dd48d71ea9b1 jdk-12+33
--- a/make/RunTests.gmk	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/RunTests.gmk	Tue Feb 26 11:17:12 2019 +0530
@@ -112,6 +112,9 @@
 GTEST_VARIANTS := $(strip $(patsubst $(TEST_IMAGE_DIR)/hotspot/gtest/%, %, \
     $(GTEST_LAUNCHER_DIRS)))
 
+COV_ENVIRONMENT :=
+JTREG_COV_OPTIONS :=
+
 ifeq ($(TEST_OPTS_JCOV), true)
   JCOV_OUTPUT_DIR := $(TEST_RESULTS_DIR)/jcov-output
   JCOV_GRABBER_LOG := $(JCOV_OUTPUT_DIR)/grabber.log
@@ -122,12 +125,18 @@
   # Replace our normal test JDK with the JCov image.
   JDK_UNDER_TEST := $(JCOV_IMAGE_DIR)
 
-  JCOV_ENVIRONMENT := JAVA_TOOL_OPTIONS="$(JCOV_MEM_OPTIONS)" \
+  COV_ENVIRONMENT += JAVA_TOOL_OPTIONS="$(JCOV_MEM_OPTIONS)" \
       _JAVA_OPTIONS="$(JCOV_MEM_OPTIONS)"
-  JTREG_JCOV_OPTIONS := -e:JAVA_TOOL_OPTIONS='$(JCOV_MEM_OPTIONS)' \
+  JTREG_COV_OPTIONS += -e:JAVA_TOOL_OPTIONS='$(JCOV_MEM_OPTIONS)' \
       -e:_JAVA_OPTIONS='$(JCOV_MEM_OPTIONS)'
 endif
 
+ifeq ($(GCOV_ENABLED), true)
+  GCOV_OUTPUT_DIR := $(TEST_RESULTS_DIR)/gcov-output
+  COV_ENVIRONMENT += GCOV_PREFIX="$(GCOV_OUTPUT_DIR)"
+  JTREG_COV_OPTIONS += -e:GCOV_PREFIX="$(GCOV_OUTPUT_DIR)"
+endif
+
 ################################################################################
 # Optionally create AOT libraries for specified modules before running tests.
 # Note, this could not be done during JDK build time.
@@ -170,6 +179,7 @@
 	$$(call LogWarn, Generating $$(patsubst $$(OUTPUTDIR)/%, %, $$@))
 	$$(call MakeTargetDir)
 	$$(call ExecuteWithLog, $$@, \
+	    $((COV_ENVIRONMENT) \
 	    $$(FIXPATH) $$(JDK_UNDER_TEST)/bin/jaotc \
 	        $$($1_JAOTC_OPTS) --output $$@ --module $$($1_MODULE) \
 	)
@@ -658,6 +668,8 @@
 
   # Current tests needs to open java.io
   $1_MICRO_JAVA_OPTIONS += --add-opens=java.base/java.io=ALL-UNNAMED
+  # Set library path for native dependencies
+  $1_MICRO_JAVA_OPTIONS += -Djava.library.path=$$(TEST_IMAGE_DIR)/micro/native
 
   # Save output as JSON or CSV file
   ifneq ($$(MICRO_RESULTS_FORMAT), )
@@ -690,7 +702,8 @@
 	$$(call LogWarn, Running test '$$($1_TEST)')
 	$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
 	$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/micro, \
-	    $$($1_MICRO_TEST_JDK)/bin/java $$($1_MICRO_JAVA_OPTIONS) -jar $$($1_MICRO_BENCHMARKS_JAR) \
+	    $$(FIXPATH) $$($1_MICRO_TEST_JDK)/bin/java $$($1_MICRO_JAVA_OPTIONS) \
+	        -jar $$($1_MICRO_BENCHMARKS_JAR) \
 	        $$($1_MICRO_ITER) $$($1_MICRO_FORK) $$($1_MICRO_TIME) \
 	        $$($1_MICRO_WARMUP_ITER) $$($1_MICRO_WARMUP_TIME) \
 	        $$($1_MICRO_VM_OPTIONS) $$($1_MICRO_BASIC_OPTIONS) $$(MICRO_OPTIONS) \
@@ -904,7 +917,7 @@
 	$$(call LogWarn, Running test '$$($1_TEST)')
 	$$(call MakeDir, $$($1_TEST_RESULTS_DIR) $$($1_TEST_SUPPORT_DIR))
 	$$(call ExecuteWithLog, $$($1_TEST_SUPPORT_DIR)/jtreg, \
-	    $$(JCOV_ENVIRONMENT) \
+	    $$(COV_ENVIRONMENT) \
 	    $$(JAVA) $$($1_JTREG_LAUNCHER_OPTIONS) \
 	        -Dprogram=jtreg -jar $$(JT_HOME)/lib/jtreg.jar \
 	        $$($1_JTREG_BASIC_OPTIONS) \
@@ -914,7 +927,7 @@
 	        -workDir:$$($1_TEST_SUPPORT_DIR) \
 	        $$(JTREG_OPTIONS) \
 	        $$(JTREG_FAILURE_HANDLER_OPTIONS) \
-	        $$(JTREG_JCOV_OPTIONS) \
+	        $$(JTREG_COV_OPTIONS) \
 	        $$($1_TEST_NAME) \
 	    && $$(ECHO) $$$$? > $$($1_EXITCODE) \
 	    || $$(ECHO) $$$$? > $$($1_EXITCODE) \
--- a/make/autoconf/basics.m4	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/autoconf/basics.m4	Tue Feb 26 11:17:12 2019 +0530
@@ -618,14 +618,6 @@
   BASIC_PATH_PROGS(CPIO, [cpio bsdcpio])
   BASIC_PATH_PROGS(NICE, nice)
 
-  BASIC_PATH_PROGS(PANDOC, pandoc)
-  if test -n "$PANDOC"; then
-    ENABLE_PANDOC="true"
-  else
-    ENABLE_PANDOC="false"
-  fi
-  AC_SUBST(ENABLE_PANDOC)
-
   BASIC_PATH_PROGS(LSB_RELEASE, lsb_release)
   BASIC_PATH_PROGS(CMD, [cmd.exe /mnt/c/Windows/System32/cmd.exe])
 ])
@@ -1193,6 +1185,7 @@
   BASIC_CHECK_FIND_DELETE
   BASIC_CHECK_TAR
   BASIC_CHECK_GREP
+  BASIC_SETUP_PANDOC
 
   # These tools might not be installed by default,
   # need hint on how to install them.
@@ -1379,6 +1372,34 @@
 
 ################################################################################
 #
+# Setup Pandoc
+#
+AC_DEFUN_ONCE([BASIC_SETUP_PANDOC],
+[
+  BASIC_PATH_PROGS(PANDOC, pandoc)
+
+  PANDOC_MARKDOWN_FLAG="markdown"
+  if test -n "$PANDOC"; then
+    AC_MSG_CHECKING(if the pandoc smart extension needs to be disabled for markdown)
+    if $PANDOC --list-extensions | $GREP -q '\+smart'; then
+      AC_MSG_RESULT([yes])
+      PANDOC_MARKDOWN_FLAG="markdown-smart"
+    else
+      AC_MSG_RESULT([no])
+    fi
+  fi
+
+  if test -n "$PANDOC"; then
+    ENABLE_PANDOC="true"
+  else
+    ENABLE_PANDOC="false"
+  fi
+  AC_SUBST(ENABLE_PANDOC)
+  AC_SUBST(PANDOC_MARKDOWN_FLAG)
+])
+
+################################################################################
+#
 # Default make target
 #
 AC_DEFUN_ONCE([BASIC_SETUP_DEFAULT_MAKE_TARGET],
--- a/make/autoconf/flags-cflags.m4	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/autoconf/flags-cflags.m4	Tue Feb 26 11:17:12 2019 +0530
@@ -121,7 +121,11 @@
     # -g0 enables debug symbols without disabling inlining.
     CFLAGS_DEBUG_SYMBOLS="-g0 -xs"
   elif test "x$TOOLCHAIN_TYPE" = xxlc; then
-    CFLAGS_DEBUG_SYMBOLS="-g"
+    if test "x$XLC_USES_CLANG" = xtrue; then
+      CFLAGS_DEBUG_SYMBOLS="-g1"
+    else
+      CFLAGS_DEBUG_SYMBOLS="-g"
+    fi
   elif test "x$TOOLCHAIN_TYPE" = xmicrosoft; then
     CFLAGS_DEBUG_SYMBOLS="-Z7 -d2Zi+"
   fi
--- a/make/autoconf/jdk-options.m4	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/autoconf/jdk-options.m4	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -364,7 +364,7 @@
 
 ################################################################################
 #
-# Gcov coverage data for hotspot
+# Native and Java code coverage
 #
 AC_DEFUN_ONCE([JDKOPT_SETUP_CODE_COVERAGE],
 [
@@ -372,23 +372,26 @@
       [enable native compilation with code coverage data@<:@disabled@:>@])])
   GCOV_ENABLED="false"
   if test "x$enable_native_coverage" = "xyes"; then
-    if test "x$TOOLCHAIN_TYPE" = "xgcc"; then
-      AC_MSG_CHECKING([if native coverage is enabled])
-      AC_MSG_RESULT([yes])
-      GCOV_CFLAGS="-fprofile-arcs -ftest-coverage -fno-inline"
-      GCOV_LDFLAGS="-fprofile-arcs"
-      JVM_CFLAGS="$JVM_CFLAGS $GCOV_CFLAGS"
-      JVM_LDFLAGS="$JVM_LDFLAGS $GCOV_LDFLAGS"
-      CFLAGS_JDKLIB="$CFLAGS_JDKLIB $GCOV_CFLAGS"
-      CFLAGS_JDKEXE="$CFLAGS_JDKEXE $GCOV_CFLAGS"
-      CXXFLAGS_JDKLIB="$CXXFLAGS_JDKLIB $GCOV_CFLAGS"
-      CXXFLAGS_JDKEXE="$CXXFLAGS_JDKEXE $GCOV_CFLAGS"
-      LDFLAGS_JDKLIB="$LDFLAGS_JDKLIB $GCOV_LDFLAGS"
-      LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE $GCOV_LDFLAGS"
-      GCOV_ENABLED="true"
-    else
-      AC_MSG_ERROR([--enable-native-coverage only works with toolchain type gcc])
-    fi
+    case $TOOLCHAIN_TYPE in
+      gcc | clang)
+        AC_MSG_CHECKING([if native coverage is enabled])
+        AC_MSG_RESULT([yes])
+        GCOV_CFLAGS="-fprofile-arcs -ftest-coverage -fno-inline"
+        GCOV_LDFLAGS="-fprofile-arcs"
+        JVM_CFLAGS="$JVM_CFLAGS $GCOV_CFLAGS"
+        JVM_LDFLAGS="$JVM_LDFLAGS $GCOV_LDFLAGS"
+        CFLAGS_JDKLIB="$CFLAGS_JDKLIB $GCOV_CFLAGS"
+        CFLAGS_JDKEXE="$CFLAGS_JDKEXE $GCOV_CFLAGS"
+        CXXFLAGS_JDKLIB="$CXXFLAGS_JDKLIB $GCOV_CFLAGS"
+        CXXFLAGS_JDKEXE="$CXXFLAGS_JDKEXE $GCOV_CFLAGS"
+        LDFLAGS_JDKLIB="$LDFLAGS_JDKLIB $GCOV_LDFLAGS"
+        LDFLAGS_JDKEXE="$LDFLAGS_JDKEXE $GCOV_LDFLAGS"
+        GCOV_ENABLED="true"
+        ;;
+      *)
+        AC_MSG_ERROR([--enable-native-coverage only works with toolchain type gcc or clang])
+        ;;
+    esac
   elif test "x$enable_native_coverage" = "xno"; then
     AC_MSG_CHECKING([if native coverage is enabled])
     AC_MSG_RESULT([no])
--- a/make/autoconf/spec.gmk.in	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/autoconf/spec.gmk.in	Tue Feb 26 11:17:12 2019 +0530
@@ -774,6 +774,7 @@
 UCRT_DLL_DIR:=@UCRT_DLL_DIR@
 STLPORT_LIB:=@STLPORT_LIB@
 ENABLE_PANDOC:=@ENABLE_PANDOC@
+PANDOC_MARKDOWN_FLAG:=@PANDOC_MARKDOWN_FLAG@
 
 ####################################################
 #
--- a/make/autoconf/toolchain.m4	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/autoconf/toolchain.m4	Tue Feb 26 11:17:12 2019 +0530
@@ -276,6 +276,20 @@
   fi
   AC_SUBST(TOOLCHAIN_TYPE)
 
+  # on AIX, check for xlclang++ on the PATH and TOOLCHAIN_PATH and use it if it is available
+  if test "x$OPENJDK_TARGET_OS" = xaix; then
+    if test "x$TOOLCHAIN_PATH" != x; then
+      XLC_TEST_PATH=${TOOLCHAIN_PATH}/
+    fi
+
+    XLCLANG_VERSION_OUTPUT=`${XLC_TEST_PATH}xlclang++ -qversion 2>&1 | $HEAD -n 1`
+    $ECHO "$XLCLANG_VERSION_OUTPUT" | $GREP "IBM XL C/C++ for AIX" > /dev/null
+    if test $? -eq 0; then
+      AC_MSG_NOTICE([xlclang++ output: $XLCLANG_VERSION_OUTPUT])
+      XLC_USES_CLANG=true
+    fi
+  fi
+
   TOOLCHAIN_CC_BINARY_clang="clang"
   TOOLCHAIN_CC_BINARY_gcc="gcc"
   TOOLCHAIN_CC_BINARY_microsoft="cl$EXE_SUFFIX"
@@ -288,6 +302,14 @@
   TOOLCHAIN_CXX_BINARY_solstudio="CC"
   TOOLCHAIN_CXX_BINARY_xlc="xlC_r"
 
+  if test "x$OPENJDK_TARGET_OS" = xaix; then
+    if test "x$XLC_USES_CLANG" = xtrue; then
+      AC_MSG_NOTICE([xlclang++ detected, using it])
+      TOOLCHAIN_CC_BINARY_xlc="xlclang"
+      TOOLCHAIN_CXX_BINARY_xlc="xlclang++"
+    fi
+  fi
+
   # Use indirect variable referencing
   toolchain_var_name=TOOLCHAIN_DESCRIPTION_$TOOLCHAIN_TYPE
   TOOLCHAIN_DESCRIPTION=${!toolchain_var_name}
--- a/make/common/MakeBase.gmk	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/common/MakeBase.gmk	Tue Feb 26 11:17:12 2019 +0530
@@ -392,9 +392,10 @@
   #
   # Param 1 - Dirs to find in
   # Param 2 - (optional) specialization. Normally "-a \( ... \)" expression.
+  # Param 3 - (optional) options to find.
   define CacheFind
     $(if $(filter-out $(addsuffix /%,- $(FIND_CACHE_DIRS)) $(FIND_CACHE_DIRS),$1), \
-      $(if $(wildcard $1), $(shell $(FIND) $(wildcard $1) \( -type f -o -type l \) $2 \
+      $(if $(wildcard $1), $(shell $(FIND) $3 $(wildcard $1) \( -type f -o -type l \) $2 \
           | $(TR) ' ' '?')), \
       $(filter $(addsuffix /%,$(patsubst %/,%,$1)) $1,$(FIND_CACHE)))
   endef
--- a/make/common/ProcessMarkdown.gmk	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/common/ProcessMarkdown.gmk	Tue Feb 26 11:17:12 2019 +0530
@@ -80,7 +80,8 @@
 	$$(call LogInfo, Converting $2 to $$($1_FORMAT))
 	$$(call MakeDir, $$(SUPPORT_OUTPUTDIR)/markdown $$(dir $$($1_$2_PANDOC_OUTPUT)))
 	$$(call ExecuteWithLog, $$(SUPPORT_OUTPUTDIR)/markdown/$$($1_$2_MARKER), \
-	    $$(PANDOC) $$($1_OPTIONS) -f markdown-smart -t $$($1_FORMAT) --standalone \
+	    $$(PANDOC) $$($1_OPTIONS) -f $$(PANDOC_MARKDOWN_FLAG) \
+	    -t $$($1_FORMAT) --standalone \
 	    $$($1_$2_CSS_OPTION) $$($1_$2_OPTIONS) '$$($1_$2_PANDOC_INPUT)' \
 	    -o '$$($1_$2_PANDOC_OUTPUT)')
         ifneq ($$(findstring $$(LOG_LEVEL), debug trace),)
--- a/make/common/ZipArchive.gmk	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/common/ZipArchive.gmk	Tue Feb 26 11:17:12 2019 +0530
@@ -62,8 +62,9 @@
     $1_FIND_LIST := $$($1_SRC)
   endif
 
-  # Find all files in the source tree.
-  $1_ALL_SRCS := $$(call not-containing,_the.,$$(call CacheFind,$$($1_FIND_LIST)))
+  # Find all files in the source tree. Follow symlinks in this find since that is
+  # what zip does.
+  $1_ALL_SRCS := $$(call not-containing,_the.,$$(call CacheFind,$$($1_FIND_LIST), , -L))
 
   # Filter on suffixes if set
   ifneq ($$($1_SUFFIXES),)
@@ -126,10 +127,14 @@
   $$($1_ZIP) : $$($1_ALL_SRCS) $$($1_EXTRA_DEPS)
 	$$(call LogWarn, Updating $$($1_NAME))
 	$$(call MakeTargetDir)
-	$$(foreach s,$$($1_SRC),(cd $$s && $(ZIPEXE) -qru $$($1_ZIP_OPTIONS) $$@ . \
-	    $$($1_ZIP_INCLUDES) $$($1_ZIP_EXCLUDES) -x \*_the.\* \
-	    $$($1_ZIP_EXCLUDES_$$s) \
-	    || test "$$$$?" = "12" )$$(NEWLINE)) true
+	$$(foreach s,$$($1_SRC), $$(call ExecuteWithLog, \
+	    $$(SUPPORT_OUTPUTDIR)/zip/$$(patsubst $$(OUTPUTDIR)/%,%, $$@), \
+	    (cd $$s && $(ZIPEXE) -qru $$($1_ZIP_OPTIONS) $$@ . \
+	        $$($1_ZIP_INCLUDES) $$($1_ZIP_EXCLUDES) -x \*_the.\* \
+	        $$($1_ZIP_EXCLUDES_$$s) \
+	        || test "$$$$?" = "12" \
+	    ))$$(NEWLINE) \
+	) true \
 	$(TOUCH) $$@
 
   # Add zip to target list
--- a/make/conf/jib-profiles.js	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/conf/jib-profiles.js	Tue Feb 26 11:17:12 2019 +0530
@@ -241,7 +241,7 @@
     common.main_profile_names = [
         "linux-x64", "linux-x86", "macosx-x64", "solaris-x64",
         "solaris-sparcv9", "windows-x64", "windows-x86",
-        "linux-aarch64", "linux-arm32"
+        "linux-aarch64", "linux-arm32", "linux-ppc64le", "linux-s390x"
     ];
 
     // These are the base setttings for all the main build profiles.
@@ -464,6 +464,28 @@
                 "--with-abi-profile=arm-vfp-hflt", "--disable-warnings-as-errors"
             ],
         },
+
+        "linux-ppc64le": {
+            target_os: "linux",
+            target_cpu: "ppc64le",
+            build_cpu: "x64",
+            dependencies: ["devkit", "build_devkit", "cups"],
+            configure_args: [
+                "--openjdk-target=ppc64le-linux-gnu", "--with-freetype=bundled",
+                "--disable-warnings-as-errors"
+            ],
+        },
+
+        "linux-s390x": {
+            target_os: "linux",
+            target_cpu: "s390x",
+            build_cpu: "x64",
+            dependencies: ["devkit", "build_devkit", "cups"],
+            configure_args: [
+                "--openjdk-target=s390x-linux-gnu", "--with-freetype=bundled",
+                "--disable-warnings-as-errors"
+            ],
+        },
     };
 
     // Add the base settings to all the main profiles
@@ -499,6 +521,15 @@
             profiles[maketestName].default_make_targets = [ "test-make" ];
         });
 
+    // Generate -gcov profiles
+    [ "linux-x64", "macosx-x64" ].forEach(function (name) {
+        var gcovName = name + "-gcov";
+        profiles[gcovName] = clone(profiles[name]);
+        profiles[gcovName].default_make_targets = ["product-bundles", "test-bundles"];
+        profiles[gcovName].configure_args = concat(profiles[gcovName].configure_args,
+            ["--enable-native-coverage", "--disable-warnings-as-errors"]);
+    });
+
     // Profiles for building the zero jvm variant. These are used for verification.
     var zeroProfiles = {
         "linux-x64-zero": {
@@ -626,6 +657,12 @@
         },
        "linux-arm32": {
             platform: "linux-arm32",
+        },
+       "linux-ppc64le": {
+            platform: "linux-ppc64le",
+        },
+       "linux-s390x": {
+            platform: "linux-s390x",
         }
     }
     // Generate common artifacts for all main profiles
@@ -744,6 +781,40 @@
             };
         });
 
+    // Artifacts of gcov (native-code-coverage) profiles
+    [ "linux-x64", "macosx-x64" ].forEach(function (name) {
+        var o = artifactData[name]
+        var pf = o.platform
+        var jdk_subdir = (o.jdk_subdir != null ? o.jdk_subdir : "jdk-" + data.version);
+        var jdk_suffix = (o.jdk_suffix != null ? o.jdk_suffix : "tar.gz");
+        var gcovName = name + "-gcov";
+        profiles[gcovName].artifacts = {
+            jdk: {
+                local: "bundles/\\(jdk.*bin." + jdk_suffix + "\\)",
+                remote: [
+                    "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-gcov." + jdk_suffix,
+                ],
+                subdir: jdk_subdir,
+                exploded: "images/jdk",
+            },
+            test: {
+                    local: "bundles/\\(jdk.*bin-tests.tar.gz\\)",
+                    remote: [
+                        "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-gcov-tests.tar.gz",
+                    ],
+                    exploded: "images/test"
+            },
+            jdk_symbols: {
+                    local: "bundles/\\(jdk.*bin-symbols.tar.gz\\)",
+                    remote: [
+                        "bundles/" + pf + "/jdk-" + data.version + "_" + pf + "_bin-gcov-symbols.tar.gz",
+                    ],
+                    subdir: jdk_subdir,
+                    exploded: "images/jdk"
+                },
+            };
+    });
+
     // Profiles used to run tests.
     var testOnlyProfiles = {
         "run-test": {
@@ -770,6 +841,10 @@
     } else {
         testedProfileTest = testedProfile + ".test";
     }
+    var testOnlyMake = [ "run-test-prebuilt", "LOG_CMDLINES=true", "JTREG_VERBOSE=fail,error,time" ];
+    if (testedProfile.endsWith("-gcov")) {
+        testOnlyMake = concat(testOnlyMake, "GCOV_ENABLED=true")
+    }
     var testOnlyProfilesPrebuilt = {
         "run-test-prebuilt": {
             target_os: input.build_os,
@@ -779,7 +854,7 @@
                 testedProfileTest
             ],
             src: "src.conf",
-            make_args: [ "run-test-prebuilt", "LOG_CMDLINES=true", "JTREG_VERBOSE=fail,error,time" ],
+            make_args: testOnlyMake,
             environment: {
                 "BOOT_JDK": common.boot_jdk_home,
                 "JDK_IMAGE_DIR": input.get(testedProfileJDK, "home_path"),
@@ -870,7 +945,9 @@
         solaris_sparcv9: "SS12u6-Solaris11u3+1.0",
         windows_x64: "VS2017-15.5.5+1.0",
         linux_aarch64: "gcc7.3.0-Fedora27+1.2",
-        linux_arm: "gcc7.3.0-Fedora27+1.2"
+        linux_arm: "gcc7.3.0-Fedora27+1.2",
+        linux_ppc64le: "gcc7.3.0-Fedora27+1.0",
+        linux_s390x: "gcc7.3.0-Fedora27+1.0"
     };
 
     var devkit_platform = (input.target_cpu == "x86"
@@ -930,7 +1007,7 @@
         jtreg: {
             server: "javare",
             revision: "4.2",
-            build_number: "b13",
+            build_number: "b14",
             checksum_file: "MD5_VALUES",
             file: "jtreg_bin-4.2.zip",
             environment_name: "JT_HOME",
--- a/make/copy/CopyCommon.gmk	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/copy/CopyCommon.gmk	Tue Feb 26 11:17:12 2019 +0530
@@ -66,6 +66,8 @@
 # Setup make rules for copying legal files. This is only needed if the files
 # need to be filtered due to optional components being enabled/disabled.
 # Otherwise CreateJmods.gmk will find the legal files in the original src dirs.
+# If multiple license files with the same name are found, only the first one
+# found will get copied.
 #
 # Parameter 1 is the name of the rule.
 #
@@ -75,10 +77,12 @@
 define SetupCopyLegalFilesBody
   $$(foreach f, $$(filter-out $$(addprefix %/, $$($1_EXCLUDES)), \
       $$(wildcard $$(addsuffix /*, $$(call FindModuleLegalSrcDirs, $$(MODULE))))), \
-    $$(eval $$(call SetupCopyFiles, $1_$$(notdir $$f), \
-        DEST := $$(LEGAL_DST_DIR), \
-        FILES := $$f, \
-    )) \
-    $$(eval $1 += $$($1_$$(notdir $$f))) \
+    $$(if $$(filter $$($1_$$(notdir $$f)), $$($1)), , \
+      $$(eval $$(call SetupCopyFiles, $1_$$(notdir $$f), \
+          DEST := $$(LEGAL_DST_DIR), \
+          FILES := $$f, \
+      )) \
+      $$(eval $1 += $$($1_$$(notdir $$f))) \
+    ) \
   )
 endef
--- a/make/lib/Awt2dLibraries.gmk	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/lib/Awt2dLibraries.gmk	Tue Feb 26 11:17:12 2019 +0530
@@ -613,7 +613,8 @@
         type-limits missing-field-initializers implicit-fallthrough \
         strict-aliasing undef unused-function, \
     DISABLED_WARNINGS_CXX_gcc := reorder delete-non-virtual-dtor strict-overflow \
-        maybe-uninitialized, \
+        maybe-uninitialized \
+        missing-attributes class-memaccess, \
     DISABLED_WARNINGS_clang := unused-value incompatible-pointer-types \
         tautological-constant-out-of-range-compare int-to-pointer-cast \
         sign-compare undef missing-field-initializers, \
--- a/make/test/BuildMicrobenchmark.gmk	Tue Feb 26 11:08:07 2019 +0530
+++ b/make/test/BuildMicrobenchmark.gmk	Tue Feb 26 11:17:12 2019 +0530
@@ -30,6 +30,7 @@
 include MakeBase.gmk
 include JavaCompilation.gmk
 include SetupJavaCompilers.gmk
+include TestFilesCompilation.gmk
 
 ifeq ($(JMH_CORE_JAR), )
   $(info Error: JMH is missing. Please use configure --with-jmh.)
@@ -39,7 +40,8 @@
 #### Variables
 
 MICROBENCHMARK_SRC := $(TOPDIR)/test/micro
-MICROBENCHMARK_JAR := $(IMAGES_OUTPUTDIR)/test/micro/benchmarks.jar
+MICROBENCHMARK_IMAGE_DIR := $(TEST_IMAGE_DIR)/micro
+MICROBENCHMARK_JAR := $(MICROBENCHMARK_IMAGE_DIR)/benchmarks.jar
 
 MICROBENCHMARK_OUTPUT := $(SUPPORT_OUTPUTDIR)/test/micro
 MICROBENCHMARK_CLASSES := $(MICROBENCHMARK_OUTPUT)/classes
@@ -54,6 +56,11 @@
 
 MICROBENCHMARK_CLASSPATH := $(call PathList, $(JMH_COMPILE_JARS))
 
+# Native dependencies
+MICROBENCHMARK_NATIVE_SRC_DIRS := $(MICROBENCHMARK_SRC)
+MICROBENCHMARK_NATIVE_OUTPUT := $(MICROBENCHMARK_OUTPUT)/native
+MICROBENCHMARK_NATIVE_EXCLUDE :=
+
 ###
 
 # Need double \n to get new lines and no trailing spaces
@@ -108,6 +115,22 @@
     JAR := $(MICROBENCHMARK_JAR), \
 ))
 
-all: $(MICROBENCHMARK_JAR)
+# Setup compilation of native library dependencies
+$(eval $(call SetupTestFilesCompilation, BUILD_MICROBENCHMARK_LIBRARIES, \
+    TYPE := LIBRARY, \
+    SOURCE_DIRS := $(MICROBENCHMARK_NATIVE_SRC_DIRS), \
+    OUTPUT_DIR := $(MICROBENCHMARK_NATIVE_OUTPUT), \
+    EXCLUDE := $(MICROBENCHMARK_NATIVE_EXCLUDE), \
+))
+
+# Setup copy of native dependencies to image output dir
+$(eval $(call SetupCopyFiles, COPY_MICROBENCHMARK_NATIVE, \
+    SRC := $(MICROBENCHMARK_NATIVE_OUTPUT), \
+    DEST := $(MICROBENCHMARK_IMAGE_DIR)/native, \
+    FILES := $(BUILD_MICROBENCHMARK_LIBRARIES), \
+    FLATTEN := true, \
+))
+
+all: $(MICROBENCHMARK_JAR) $(BUILD_MICROBENCHMARK_LIBRARIES) $(COPY_MICROBENCHMARK_NATIVE)
 
 .PHONY: all
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -362,8 +362,8 @@
 
 void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
   CodeBuffer cb(code_pos, BytesPerInstWord + 1);
-  MacroAssembler* a = new MacroAssembler(&cb);
-  a->b(entry);
+  MacroAssembler a(&cb);
+  a.b(entry);
   ICache::ppc64_flush_icache_bytes(code_pos, NativeGeneralJump::instruction_size);
 }
 
--- a/src/hotspot/cpu/s390/frame_s390.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/cpu/s390/frame_s390.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -479,6 +479,7 @@
           // name
           Method* method = *(Method**)((address)current_fp + _z_ijava_state_neg(method));
           if (method) {
+            ResourceMark rm;
             if (method->is_synchronized()) st->print("synchronized ");
             if (method->is_static()) st->print("static ");
             if (method->is_native()) st->print("native ");
@@ -543,6 +544,7 @@
           // name
           Method* method = ((nmethod *)blob)->method();
           if (method) {
+            ResourceMark rm;
             method->name_and_sig_as_C_string(buf, sizeof(buf));
             st->print("%s ", buf);
           }
--- a/src/hotspot/cpu/x86/c2_init_x86.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/cpu/x86/c2_init_x86.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -29,6 +29,8 @@
 
 // processor dependent initialization for i486
 
+LP64_ONLY(extern void reg_mask_init();)
+
 void Compile::pd_compiler2_init() {
   guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
   // QQQ presumably all 64bit cpu's support this. Seems like the ifdef could
@@ -58,4 +60,5 @@
       OptoReg::invalidate(i);
     }
   }
+  LP64_ONLY(reg_mask_init();)
 }
--- a/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/cpu/x86/gc/z/zBarrierSetAssembler_x86.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -359,7 +359,7 @@
 // ZBarrierSetRuntime::load_barrier_on_weak_oop_field_preloaded().
 static address generate_load_barrier_stub(StubCodeGenerator* cgen, Register raddr, DecoratorSet decorators) {
   // Don't generate stub for invalid registers
-  if (raddr == rsp || raddr == r12 || raddr == r15) {
+  if (raddr == rsp || raddr == r15) {
     return NULL;
   }
 
--- a/src/hotspot/cpu/x86/gc/z/z_x86_64.ad	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/cpu/x86/gc/z/z_x86_64.ad	Tue Feb 26 11:17:12 2019 +0530
@@ -26,9 +26,8 @@
 #include "gc/z/zBarrierSetAssembler.hpp"
 
 static void z_load_barrier_slow_reg(MacroAssembler& _masm, Register dst, Address src, bool weak) {
-  assert(dst != r12, "Invalid register");
+  assert(dst != rsp, "Invalid register");
   assert(dst != r15, "Invalid register");
-  assert(dst != rsp, "Invalid register");
 
   const address stub = weak ? ZBarrierSet::assembler()->load_barrier_weak_slow_stub(dst)
                             : ZBarrierSet::assembler()->load_barrier_slow_stub(dst);
--- a/src/hotspot/cpu/x86/vm_version_x86.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/cpu/x86/vm_version_x86.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -336,7 +336,7 @@
 #define CPU_AVX512_VPOPCNTDQ ((uint64_t)UCONST64(0x2000000000)) // Vector popcount
 #define CPU_VPCLMULQDQ ((uint64_t)UCONST64(0x4000000000)) //Vector carryless multiplication
 #define CPU_VAES ((uint64_t)UCONST64(0x8000000000))    // Vector AES instructions
-#define CPU_VNNI ((uint64_t)UCONST64(0x16000000000))   // Vector Neural Network Instructions
+#define CPU_VNNI ((uint64_t)UCONST64(0x10000000000))   // Vector Neural Network Instructions
 
   enum Extended_Family {
     // AMD
--- a/src/hotspot/cpu/x86/x86_64.ad	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/cpu/x86/x86_64.ad	Tue Feb 26 11:17:12 2019 +0530
@@ -169,135 +169,94 @@
 // Empty register class.
 reg_class no_reg();
 
-// Class for all pointer registers (including RSP and RBP)
-reg_class any_reg_with_rbp(RAX, RAX_H,
-                           RDX, RDX_H,
-                           RBP, RBP_H,
-                           RDI, RDI_H,
-                           RSI, RSI_H,
-                           RCX, RCX_H,
-                           RBX, RBX_H,
-                           RSP, RSP_H,
-                           R8,  R8_H,
-                           R9,  R9_H,
-                           R10, R10_H,
-                           R11, R11_H,
-                           R12, R12_H,
-                           R13, R13_H,
-                           R14, R14_H,
-                           R15, R15_H);
-
-// Class for all pointer registers (including RSP, but excluding RBP)
-reg_class any_reg_no_rbp(RAX, RAX_H,
-                         RDX, RDX_H,
-                         RDI, RDI_H,
-                         RSI, RSI_H,
-                         RCX, RCX_H,
-                         RBX, RBX_H,
-                         RSP, RSP_H,
-                         R8,  R8_H,
-                         R9,  R9_H,
-                         R10, R10_H,
-                         R11, R11_H,
-                         R12, R12_H,
-                         R13, R13_H,
-                         R14, R14_H,
-                         R15, R15_H);
-
-// Dynamic register class that selects at runtime between register classes
-// any_reg_no_rbp and any_reg_with_rbp (depending on the value of the flag PreserveFramePointer).
-// Equivalent to: return PreserveFramePointer ? any_reg_no_rbp : any_reg_with_rbp;
-reg_class_dynamic any_reg(any_reg_no_rbp, any_reg_with_rbp, %{ PreserveFramePointer %});
+// Class for all pointer/long registers
+reg_class all_reg(RAX, RAX_H,
+                  RDX, RDX_H,
+                  RBP, RBP_H,
+                  RDI, RDI_H,
+                  RSI, RSI_H,
+                  RCX, RCX_H,
+                  RBX, RBX_H,
+                  RSP, RSP_H,
+                  R8,  R8_H,
+                  R9,  R9_H,
+                  R10, R10_H,
+                  R11, R11_H,
+                  R12, R12_H,
+                  R13, R13_H,
+                  R14, R14_H,
+                  R15, R15_H);
+
+// Class for all int registers
+reg_class all_int_reg(RAX
+                      RDX,
+                      RBP,
+                      RDI,
+                      RSI,
+                      RCX,
+                      RBX,
+                      R8,
+                      R9,
+                      R10,
+                      R11,
+                      R12,
+                      R13,
+                      R14);
+
+// Class for all pointer registers
+reg_class any_reg %{
+  return _ANY_REG_mask;
+%}
 
 // Class for all pointer registers (excluding RSP)
-reg_class ptr_reg_with_rbp(RAX, RAX_H,
-                           RDX, RDX_H,
-                           RBP, RBP_H,
-                           RDI, RDI_H,
-                           RSI, RSI_H,
-                           RCX, RCX_H,
-                           RBX, RBX_H,
-                           R8,  R8_H,
-                           R9,  R9_H,
-                           R10, R10_H,
-                           R11, R11_H,
-                           R13, R13_H,
-                           R14, R14_H);
+reg_class ptr_reg %{
+  return _PTR_REG_mask;
+%}
 
 // Class for all pointer registers (excluding RSP and RBP)
-reg_class ptr_reg_no_rbp(RAX, RAX_H,
-                         RDX, RDX_H,
-                         RDI, RDI_H,
-                         RSI, RSI_H,
-                         RCX, RCX_H,
-                         RBX, RBX_H,
-                         R8,  R8_H,
-                         R9,  R9_H,
-                         R10, R10_H,
-                         R11, R11_H,
-                         R13, R13_H,
-                         R14, R14_H);
-
-// Dynamic register class that selects between ptr_reg_no_rbp and ptr_reg_with_rbp.
-reg_class_dynamic ptr_reg(ptr_reg_no_rbp, ptr_reg_with_rbp, %{ PreserveFramePointer %});
+reg_class ptr_reg_no_rbp %{
+  return _PTR_REG_NO_RBP_mask;
+%}
 
 // Class for all pointer registers (excluding RAX and RSP)
-reg_class ptr_no_rax_reg_with_rbp(RDX, RDX_H,
-                                  RBP, RBP_H,
-                                  RDI, RDI_H,
-                                  RSI, RSI_H,
-                                  RCX, RCX_H,
-                                  RBX, RBX_H,
-                                  R8,  R8_H,
-                                  R9,  R9_H,
-                                  R10, R10_H,
-                                  R11, R11_H,
-                                  R13, R13_H,
-                                  R14, R14_H);
-
-// Class for all pointer registers (excluding RAX, RSP, and RBP)
-reg_class ptr_no_rax_reg_no_rbp(RDX, RDX_H,
-                                RDI, RDI_H,
-                                RSI, RSI_H,
-                                RCX, RCX_H,
-                                RBX, RBX_H,
-                                R8,  R8_H,
-                                R9,  R9_H,
-                                R10, R10_H,
-                                R11, R11_H,
-                                R13, R13_H,
-                                R14, R14_H);
-
-// Dynamic register class that selects between ptr_no_rax_reg_no_rbp and ptr_no_rax_reg_with_rbp.
-reg_class_dynamic ptr_no_rax_reg(ptr_no_rax_reg_no_rbp, ptr_no_rax_reg_with_rbp, %{ PreserveFramePointer %});
+reg_class ptr_no_rax_reg %{
+  return _PTR_NO_RAX_REG_mask;
+%}
 
 // Class for all pointer registers (excluding RAX, RBX, and RSP)
-reg_class ptr_no_rax_rbx_reg_with_rbp(RDX, RDX_H,
-                                      RBP, RBP_H,
-                                      RDI, RDI_H,
-                                      RSI, RSI_H,
-                                      RCX, RCX_H,
-                                      R8,  R8_H,
-                                      R9,  R9_H,
-                                      R10, R10_H,
-                                      R11, R11_H,
-                                      R13, R13_H,
-                                      R14, R14_H);
-
-// Class for all pointer registers (excluding RAX, RBX, RSP, and RBP)
-reg_class ptr_no_rax_rbx_reg_no_rbp(RDX, RDX_H,
-                                    RDI, RDI_H,
-                                    RSI, RSI_H,
-                                    RCX, RCX_H,
-                                    R8,  R8_H,
-                                    R9,  R9_H,
-                                    R10, R10_H,
-                                    R11, R11_H,
-                                    R13, R13_H,
-                                    R14, R14_H);
-
-// Dynamic register class that selects between ptr_no_rax_rbx_reg_no_rbp and ptr_no_rax_rbx_reg_with_rbp.
-reg_class_dynamic ptr_no_rax_rbx_reg(ptr_no_rax_rbx_reg_no_rbp, ptr_no_rax_rbx_reg_with_rbp, %{ PreserveFramePointer %});
+reg_class ptr_no_rax_rbx_reg %{
+  return _PTR_NO_RAX_RBX_REG_mask;
+%}
+
+// Class for all long registers (excluding RSP)
+reg_class long_reg %{
+  return _LONG_REG_mask;
+%}
+
+// Class for all long registers (excluding RAX, RDX and RSP)
+reg_class long_no_rax_rdx_reg %{
+  return _LONG_NO_RAX_RDX_REG_mask;
+%}
+
+// Class for all long registers (excluding RCX and RSP)
+reg_class long_no_rcx_reg %{
+  return _LONG_NO_RCX_REG_mask;
+%}
+
+// Class for all int registers (excluding RSP)
+reg_class int_reg %{
+  return _INT_REG_mask;
+%}
+
+// Class for all int registers (excluding RAX, RDX, and RSP)
+reg_class int_no_rax_rdx_reg %{
+  return _INT_NO_RAX_RDX_REG_mask;
+%}
+
+// Class for all int registers (excluding RCX and RSP)
+reg_class int_no_rcx_reg %{
+  return _INT_NO_RCX_REG_mask;
+%}
 
 // Singleton class for RAX pointer register
 reg_class ptr_rax_reg(RAX, RAX_H);
@@ -317,96 +276,6 @@
 // Singleton class for TLS pointer
 reg_class ptr_r15_reg(R15, R15_H);
 
-// Class for all long registers (excluding RSP)
-reg_class long_reg_with_rbp(RAX, RAX_H,
-                            RDX, RDX_H,
-                            RBP, RBP_H,
-                            RDI, RDI_H,
-                            RSI, RSI_H,
-                            RCX, RCX_H,
-                            RBX, RBX_H,
-                            R8,  R8_H,
-                            R9,  R9_H,
-                            R10, R10_H,
-                            R11, R11_H,
-                            R13, R13_H,
-                            R14, R14_H);
-
-// Class for all long registers (excluding RSP and RBP)
-reg_class long_reg_no_rbp(RAX, RAX_H,
-                          RDX, RDX_H,
-                          RDI, RDI_H,
-                          RSI, RSI_H,
-                          RCX, RCX_H,
-                          RBX, RBX_H,
-                          R8,  R8_H,
-                          R9,  R9_H,
-                          R10, R10_H,
-                          R11, R11_H,
-                          R13, R13_H,
-                          R14, R14_H);
-
-// Dynamic register class that selects between long_reg_no_rbp and long_reg_with_rbp.
-reg_class_dynamic long_reg(long_reg_no_rbp, long_reg_with_rbp, %{ PreserveFramePointer %});
-
-// Class for all long registers (excluding RAX, RDX and RSP)
-reg_class long_no_rax_rdx_reg_with_rbp(RBP, RBP_H,
-                                       RDI, RDI_H,
-                                       RSI, RSI_H,
-                                       RCX, RCX_H,
-                                       RBX, RBX_H,
-                                       R8,  R8_H,
-                                       R9,  R9_H,
-                                       R10, R10_H,
-                                       R11, R11_H,
-                                       R13, R13_H,
-                                       R14, R14_H);
-
-// Class for all long registers (excluding RAX, RDX, RSP, and RBP)
-reg_class long_no_rax_rdx_reg_no_rbp(RDI, RDI_H,
-                                     RSI, RSI_H,
-                                     RCX, RCX_H,
-                                     RBX, RBX_H,
-                                     R8,  R8_H,
-                                     R9,  R9_H,
-                                     R10, R10_H,
-                                     R11, R11_H,
-                                     R13, R13_H,
-                                     R14, R14_H);
-
-// Dynamic register class that selects between long_no_rax_rdx_reg_no_rbp and long_no_rax_rdx_reg_with_rbp.
-reg_class_dynamic long_no_rax_rdx_reg(long_no_rax_rdx_reg_no_rbp, long_no_rax_rdx_reg_with_rbp, %{ PreserveFramePointer %});
-
-// Class for all long registers (excluding RCX and RSP)
-reg_class long_no_rcx_reg_with_rbp(RBP, RBP_H,
-                                   RDI, RDI_H,
-                                   RSI, RSI_H,
-                                   RAX, RAX_H,
-                                   RDX, RDX_H,
-                                   RBX, RBX_H,
-                                   R8,  R8_H,
-                                   R9,  R9_H,
-                                   R10, R10_H,
-                                   R11, R11_H,
-                                   R13, R13_H,
-                                   R14, R14_H);
-
-// Class for all long registers (excluding RCX, RSP, and RBP)
-reg_class long_no_rcx_reg_no_rbp(RDI, RDI_H,
-                                 RSI, RSI_H,
-                                 RAX, RAX_H,
-                                 RDX, RDX_H,
-                                 RBX, RBX_H,
-                                 R8,  R8_H,
-                                 R9,  R9_H,
-                                 R10, R10_H,
-                                 R11, R11_H,
-                                 R13, R13_H,
-                                 R14, R14_H);
-
-// Dynamic register class that selects between long_no_rcx_reg_no_rbp and long_no_rcx_reg_with_rbp.
-reg_class_dynamic long_no_rcx_reg(long_no_rcx_reg_no_rbp, long_no_rcx_reg_with_rbp, %{ PreserveFramePointer %});
-
 // Singleton class for RAX long register
 reg_class long_rax_reg(RAX, RAX_H);
 
@@ -416,96 +285,6 @@
 // Singleton class for RDX long register
 reg_class long_rdx_reg(RDX, RDX_H);
 
-// Class for all int registers (excluding RSP)
-reg_class int_reg_with_rbp(RAX,
-                           RDX,
-                           RBP,
-                           RDI,
-                           RSI,
-                           RCX,
-                           RBX,
-                           R8,
-                           R9,
-                           R10,
-                           R11,
-                           R13,
-                           R14);
-
-// Class for all int registers (excluding RSP and RBP)
-reg_class int_reg_no_rbp(RAX,
-                         RDX,
-                         RDI,
-                         RSI,
-                         RCX,
-                         RBX,
-                         R8,
-                         R9,
-                         R10,
-                         R11,
-                         R13,
-                         R14);
-
-// Dynamic register class that selects between int_reg_no_rbp and int_reg_with_rbp.
-reg_class_dynamic int_reg(int_reg_no_rbp, int_reg_with_rbp, %{ PreserveFramePointer %});
-
-// Class for all int registers (excluding RCX and RSP)
-reg_class int_no_rcx_reg_with_rbp(RAX,
-                                  RDX,
-                                  RBP,
-                                  RDI,
-                                  RSI,
-                                  RBX,
-                                  R8,
-                                  R9,
-                                  R10,
-                                  R11,
-                                  R13,
-                                  R14);
-
-// Class for all int registers (excluding RCX, RSP, and RBP)
-reg_class int_no_rcx_reg_no_rbp(RAX,
-                                RDX,
-                                RDI,
-                                RSI,
-                                RBX,
-                                R8,
-                                R9,
-                                R10,
-                                R11,
-                                R13,
-                                R14);
-
-// Dynamic register class that selects between int_no_rcx_reg_no_rbp and int_no_rcx_reg_with_rbp.
-reg_class_dynamic int_no_rcx_reg(int_no_rcx_reg_no_rbp, int_no_rcx_reg_with_rbp, %{ PreserveFramePointer %});
-
-// Class for all int registers (excluding RAX, RDX, and RSP)
-reg_class int_no_rax_rdx_reg_with_rbp(RBP,
-                                      RDI,
-                                      RSI,
-                                      RCX,
-                                      RBX,
-                                      R8,
-                                      R9,
-                                      R10,
-                                      R11,
-                                      R13,
-                                      R14);
-
-// Class for all int registers (excluding RAX, RDX, RSP, and RBP)
-reg_class int_no_rax_rdx_reg_no_rbp(RDI,
-                                    RSI,
-                                    RCX,
-                                    RBX,
-                                    R8,
-                                    R9,
-                                    R10,
-                                    R11,
-                                    R13,
-                                    R14);
-
-// Dynamic register class that selects between int_no_rax_rdx_reg_no_rbp and int_no_rax_rdx_reg_with_rbp.
-reg_class_dynamic int_no_rax_rdx_reg(int_no_rax_rdx_reg_no_rbp, int_no_rax_rdx_reg_with_rbp, %{ PreserveFramePointer %});
-
 // Singleton class for RAX int register
 reg_class int_rax_reg(RAX);
 
@@ -529,12 +308,123 @@
 //----------SOURCE BLOCK-------------------------------------------------------
 // This is a block of C++ code which provides values, functions, and
 // definitions necessary in the rest of the architecture description
+source_hpp %{
+
+extern RegMask _ANY_REG_mask;
+extern RegMask _PTR_REG_mask;
+extern RegMask _PTR_REG_NO_RBP_mask;
+extern RegMask _PTR_NO_RAX_REG_mask;
+extern RegMask _PTR_NO_RAX_RBX_REG_mask;
+extern RegMask _LONG_REG_mask;
+extern RegMask _LONG_NO_RAX_RDX_REG_mask;
+extern RegMask _LONG_NO_RCX_REG_mask;
+extern RegMask _INT_REG_mask;
+extern RegMask _INT_NO_RAX_RDX_REG_mask;
+extern RegMask _INT_NO_RCX_REG_mask;
+
+extern RegMask _STACK_OR_PTR_REG_mask;
+extern RegMask _STACK_OR_LONG_REG_mask;
+extern RegMask _STACK_OR_INT_REG_mask;
+
+inline const RegMask& STACK_OR_PTR_REG_mask()  { return _STACK_OR_PTR_REG_mask;  }
+inline const RegMask& STACK_OR_LONG_REG_mask() { return _STACK_OR_LONG_REG_mask; }
+inline const RegMask& STACK_OR_INT_REG_mask()  { return _STACK_OR_INT_REG_mask;  }
+
+%}
+
 source %{
 #define   RELOC_IMM64    Assembler::imm_operand
 #define   RELOC_DISP32   Assembler::disp32_operand
 
 #define __ _masm.
 
+RegMask _ANY_REG_mask;
+RegMask _PTR_REG_mask;
+RegMask _PTR_REG_NO_RBP_mask;
+RegMask _PTR_NO_RAX_REG_mask;
+RegMask _PTR_NO_RAX_RBX_REG_mask;
+RegMask _LONG_REG_mask;
+RegMask _LONG_NO_RAX_RDX_REG_mask;
+RegMask _LONG_NO_RCX_REG_mask;
+RegMask _INT_REG_mask;
+RegMask _INT_NO_RAX_RDX_REG_mask;
+RegMask _INT_NO_RCX_REG_mask;
+RegMask _STACK_OR_PTR_REG_mask;
+RegMask _STACK_OR_LONG_REG_mask;
+RegMask _STACK_OR_INT_REG_mask;
+
+static bool need_r12_heapbase() {
+  return UseCompressedOops || UseCompressedClassPointers;
+}
+
+void reg_mask_init() {
+  // _ALL_REG_mask is generated by adlc from the all_reg register class below.
+  // We derive a number of subsets from it.
+  _ANY_REG_mask = _ALL_REG_mask;
+
+  if (PreserveFramePointer) {
+    _ANY_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
+    _ANY_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
+  }
+  if (need_r12_heapbase()) {
+    _ANY_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg()));
+    _ANY_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg()->next()));
+  }
+
+  _PTR_REG_mask = _ANY_REG_mask;
+  _PTR_REG_mask.Remove(OptoReg::as_OptoReg(rsp->as_VMReg()));
+  _PTR_REG_mask.Remove(OptoReg::as_OptoReg(rsp->as_VMReg()->next()));
+  _PTR_REG_mask.Remove(OptoReg::as_OptoReg(r15->as_VMReg()));
+  _PTR_REG_mask.Remove(OptoReg::as_OptoReg(r15->as_VMReg()->next()));
+
+  _STACK_OR_PTR_REG_mask = _PTR_REG_mask;
+  _STACK_OR_PTR_REG_mask.OR(STACK_OR_STACK_SLOTS_mask());
+
+  _PTR_REG_NO_RBP_mask = _PTR_REG_mask;
+  _PTR_REG_NO_RBP_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
+  _PTR_REG_NO_RBP_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()->next()));
+
+  _PTR_NO_RAX_REG_mask = _PTR_REG_mask;
+  _PTR_NO_RAX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()));
+  _PTR_NO_RAX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()->next()));
+
+  _PTR_NO_RAX_RBX_REG_mask = _PTR_NO_RAX_REG_mask;
+  _PTR_NO_RAX_RBX_REG_mask.Remove(OptoReg::as_OptoReg(rbx->as_VMReg()));
+  _PTR_NO_RAX_RBX_REG_mask.Remove(OptoReg::as_OptoReg(rbx->as_VMReg()->next()));
+
+  _LONG_REG_mask = _PTR_REG_mask;
+  _STACK_OR_LONG_REG_mask = _LONG_REG_mask;
+  _STACK_OR_LONG_REG_mask.OR(STACK_OR_STACK_SLOTS_mask());
+
+  _LONG_NO_RAX_RDX_REG_mask = _LONG_REG_mask;
+  _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()));
+  _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()->next()));
+  _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg()));
+  _LONG_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg()->next()));
+
+  _LONG_NO_RCX_REG_mask = _LONG_REG_mask;
+  _LONG_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg()));
+  _LONG_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg()->next()));
+
+  _INT_REG_mask = _ALL_INT_REG_mask;
+  if (PreserveFramePointer) {
+    _INT_REG_mask.Remove(OptoReg::as_OptoReg(rbp->as_VMReg()));
+  }
+  if (need_r12_heapbase()) {
+    _INT_REG_mask.Remove(OptoReg::as_OptoReg(r12->as_VMReg()));
+  }
+
+  _STACK_OR_INT_REG_mask = _INT_REG_mask;
+  _STACK_OR_INT_REG_mask.OR(STACK_OR_STACK_SLOTS_mask());
+
+  _INT_NO_RAX_RDX_REG_mask = _INT_REG_mask;
+  _INT_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rax->as_VMReg()));
+  _INT_NO_RAX_RDX_REG_mask.Remove(OptoReg::as_OptoReg(rdx->as_VMReg()));
+
+  _INT_NO_RCX_REG_mask = _INT_REG_mask;
+  _INT_NO_RCX_REG_mask.Remove(OptoReg::as_OptoReg(rcx->as_VMReg()));
+}
+
 static bool generate_vzeroupper(Compile* C) {
   return (VM_Version::supports_vzeroupper() && (C->max_vector_size() > 16 || C->clear_upper_avx() == true)) ? true: false;  // Generate vzeroupper
 }
--- a/src/hotspot/os/aix/os_aix.inline.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/aix/os_aix.inline.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -27,6 +27,7 @@
 #define OS_AIX_OS_AIX_INLINE_HPP
 
 #include "runtime/os.hpp"
+#include "os_posix.inline.hpp"
 
 // System includes
 
--- a/src/hotspot/os/aix/os_perf_aix.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/aix/os_perf_aix.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -859,11 +859,7 @@
 
 char* SystemProcessInterface::SystemProcesses::ProcessIterator::allocate_string(const char* str) const {
   if (str != NULL) {
-    size_t len = strlen(str);
-    char* tmp = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
-    strncpy(tmp, str, len);
-    tmp[len] = '\0';
-    return tmp;
+    return os::strdup_check_oom(str, mtInternal);
   }
   return NULL;
 }
--- a/src/hotspot/os/bsd/os_bsd.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -37,6 +37,7 @@
 #include "memory/filemap.hpp"
 #include "oops/oop.inline.hpp"
 #include "os_bsd.inline.hpp"
+#include "os_posix.inline.hpp"
 #include "os_share_bsd.hpp"
 #include "prims/jniFastGetField.hpp"
 #include "prims/jvm_misc.hpp"
--- a/src/hotspot/os/bsd/os_bsd.inline.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/bsd/os_bsd.inline.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -26,6 +26,7 @@
 #define OS_BSD_OS_BSD_INLINE_HPP
 
 #include "runtime/os.hpp"
+#include "os_posix.inline.hpp"
 
 // System includes
 
--- a/src/hotspot/os/linux/globals_linux.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/linux/globals_linux.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -70,7 +70,12 @@
   diagnostic(bool, DumpPrivateMappingsInCore, true,                     \
           "If true, sets bit 2 of /proc/PID/coredump_filter, thus "     \
           "resulting in file-backed private mappings of the process to "\
-          "be dumped into the corefile, if UseSharedSpaces is true.")   \
+          "be dumped into the corefile.")                               \
+                                                                        \
+  diagnostic(bool, DumpSharedMappingsInCore, true,                      \
+          "If true, sets bit 3 of /proc/PID/coredump_filter, thus "     \
+          "resulting in file-backed shared mappings of the process to " \
+          "be dumped into the corefile.")                               \
                                                                         \
   diagnostic(bool, UseCpuAllocPath, false,                              \
              "Use CPU_ALLOC code path in os::active_processor_count ")
--- a/src/hotspot/os/linux/os_linux.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/linux/os_linux.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -38,6 +38,7 @@
 #include "memory/filemap.hpp"
 #include "oops/oop.inline.hpp"
 #include "os_linux.inline.hpp"
+#include "os_posix.inline.hpp"
 #include "os_share_linux.hpp"
 #include "osContainer_linux.hpp"
 #include "prims/jniFastGetField.hpp"
@@ -131,6 +132,7 @@
 
 enum CoredumpFilterBit {
   FILE_BACKED_PVT_BIT = 1 << 2,
+  FILE_BACKED_SHARED_BIT = 1 << 3,
   LARGEPAGES_BIT = 1 << 6,
   DAX_SHARED_BIT = 1 << 8
 };
@@ -1357,11 +1359,9 @@
 void os::abort(bool dump_core, void* siginfo, const void* context) {
   os::shutdown();
   if (dump_core) {
-#if INCLUDE_CDS
-    if (UseSharedSpaces && DumpPrivateMappingsInCore) {
+    if (DumpPrivateMappingsInCore) {
       ClassLoader::close_jrt_image();
     }
-#endif
 #ifndef PRODUCT
     fdStream out(defaultStream::output_fd());
     out.print_raw("Current thread is ");
@@ -3432,8 +3432,6 @@
   return result;
 }
 
-// Set the coredump_filter bits to include largepages in core dump (bit 6)
-//
 // From the coredump_filter documentation:
 //
 // - (bit 0) anonymous private memory
@@ -5131,11 +5129,13 @@
     set_coredump_filter(DAX_SHARED_BIT);
   }
 
-#if INCLUDE_CDS
-  if (UseSharedSpaces && DumpPrivateMappingsInCore) {
+  if (DumpPrivateMappingsInCore) {
     set_coredump_filter(FILE_BACKED_PVT_BIT);
   }
-#endif
+
+  if (DumpSharedMappingsInCore) {
+    set_coredump_filter(FILE_BACKED_SHARED_BIT);
+  }
 
   return JNI_OK;
 }
--- a/src/hotspot/os/linux/os_perf_linux.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/linux/os_perf_linux.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -861,11 +861,7 @@
 
 char* SystemProcessInterface::SystemProcesses::ProcessIterator::allocate_string(const char* str) const {
   if (str != NULL) {
-    size_t len = strlen(str);
-    char* tmp = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
-    strncpy(tmp, str, len);
-    tmp[len] = '\0';
-    return tmp;
+    return os::strdup_check_oom(str, mtInternal);
   }
   return NULL;
 }
@@ -1066,7 +1062,7 @@
 
   snprintf(buf, sizeof(buf), "/sys/class/net/%s/statistics/%s", iface, counter);
 
-  int fd = open(buf, O_RDONLY);
+  int fd = os::open(buf, O_RDONLY, 0);
   if (fd == -1) {
     return -1;
   }
--- a/src/hotspot/os/linux/perfMemory_linux.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/linux/perfMemory_linux.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -97,8 +97,8 @@
 
   int result;
 
-  RESTARTABLE(::open(destfile, O_CREAT|O_WRONLY|O_TRUNC, S_IREAD|S_IWRITE),
-              result);;
+  RESTARTABLE(os::open(destfile, O_CREAT|O_WRONLY|O_TRUNC, S_IRUSR|S_IWUSR),
+              result);
   if (result == OS_ERR) {
     if (PrintMiscellaneous && Verbose) {
       warning("Could not create Perfdata save file: %s: %s\n",
@@ -871,7 +871,7 @@
   // Cannot use O_TRUNC here; truncation of an existing file has to happen
   // after the is_file_secure() check below.
   int result;
-  RESTARTABLE(::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IREAD|S_IWRITE), result);
+  RESTARTABLE(os::open(filename, O_RDWR|O_CREAT|O_NOFOLLOW, S_IRUSR|S_IWUSR), result);
   if (result == OS_ERR) {
     if (PrintMiscellaneous && Verbose) {
       if (errno == ELOOP) {
@@ -949,7 +949,7 @@
 
   // open the file
   int result;
-  RESTARTABLE(::open(filename, oflags), result);
+  RESTARTABLE(os::open(filename, oflags, 0), result);
   if (result == OS_ERR) {
     if (errno == ENOENT) {
       THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
--- a/src/hotspot/os/posix/os_posix.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/posix/os_posix.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -180,20 +180,17 @@
 
   const char name_template[] = "/jvmheap.XXXXXX";
 
-  char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal);
+  size_t fullname_len = strlen(dir) + strlen(name_template);
+  char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
   if (fullname == NULL) {
     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
     return -1;
   }
-  (void)strncpy(fullname, dir, strlen(dir)+1);
-  (void)strncat(fullname, name_template, strlen(name_template));
+  int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
+  assert((size_t)n == fullname_len, "Unexpected number of characters in string");
 
   os::native_path(fullname);
 
-  sigset_t set, oldset;
-  int ret = sigfillset(&set);
-  assert_with_errno(ret == 0, "sigfillset returned error");
-
   // set the file creation mask.
   mode_t file_mode = S_IRUSR | S_IWUSR;
 
@@ -207,7 +204,7 @@
   }
 
   // delete the name from the filesystem. When 'fd' is closed, the file (and space) will be deleted.
-  ret = unlink(fullname);
+  int ret = unlink(fullname);
   assert_with_errno(ret == 0, "unlink returned error");
 
   os::free(fullname);
@@ -2218,22 +2215,6 @@
   assert_status(status == 0, status, "mutex_destroy");
 }
 
-void os::PlatformMonitor::lock() {
-  int status = pthread_mutex_lock(&_mutex);
-  assert_status(status == 0, status, "mutex_lock");
-}
-
-void os::PlatformMonitor::unlock() {
-  int status = pthread_mutex_unlock(&_mutex);
-  assert_status(status == 0, status, "mutex_unlock");
-}
-
-bool os::PlatformMonitor::try_lock() {
-  int status = pthread_mutex_trylock(&_mutex);
-  assert_status(status == 0 || status == EBUSY, status, "mutex_trylock");
-  return status == 0;
-}
-
 // Must already be locked
 int os::PlatformMonitor::wait(jlong millis) {
   assert(millis >= 0, "negative timeout");
@@ -2262,14 +2243,4 @@
   }
 }
 
-void os::PlatformMonitor::notify() {
-  int status = pthread_cond_signal(&_cond);
-  assert_status(status == 0, status, "cond_signal");
-}
-
-void os::PlatformMonitor::notify_all() {
-  int status = pthread_cond_broadcast(&_cond);
-  assert_status(status == 0, status, "cond_broadcast");
-}
-
 #endif // !SOLARIS
--- a/src/hotspot/os/posix/os_posix.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/posix/os_posix.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -177,7 +177,7 @@
  * These event objects are type-stable and immortal - we never delete them.
  * Events are associated with a thread for the lifetime of the thread.
  */
-class PlatformEvent : public CHeapObj<mtInternal> {
+class PlatformEvent : public CHeapObj<mtSynchronizer> {
  private:
   double cachePad[4];        // Increase odds that _mutex is sole occupant of cache line
   volatile int _event;       // Event count/permit: -1, 0 or 1
@@ -212,7 +212,7 @@
 // API updates of course). But Parker methods use fastpaths that break that
 // level of encapsulation - so combining the two remains a future project.
 
-class PlatformParker : public CHeapObj<mtInternal> {
+class PlatformParker : public CHeapObj<mtSynchronizer> {
  protected:
   enum {
     REL_INDEX = 0,
@@ -230,7 +230,7 @@
 };
 
 // Platform specific implementation that underpins VM Monitor/Mutex class
-class PlatformMonitor : public CHeapObj<mtInternal> {
+class PlatformMonitor : public CHeapObj<mtSynchronizer> {
  private:
   pthread_mutex_t _mutex; // Native mutex for locking
   pthread_cond_t  _cond;  // Native condition variable for blocking
--- a/src/hotspot/os/posix/os_posix.inline.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/posix/os_posix.inline.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -42,6 +42,39 @@
 inline int os::Posix::clock_getres(clockid_t clock_id, struct timespec *tp) {
   return _clock_getres != NULL ? _clock_getres(clock_id, tp) : -1;
 }
+
 #endif // SUPPORTS_CLOCK_MONOTONIC
 
+#ifndef SOLARIS
+
+// Platform Monitor implementation
+
+inline void os::PlatformMonitor::lock() {
+  int status = pthread_mutex_lock(&_mutex);
+  assert_status(status == 0, status, "mutex_lock");
+}
+
+inline void os::PlatformMonitor::unlock() {
+  int status = pthread_mutex_unlock(&_mutex);
+  assert_status(status == 0, status, "mutex_unlock");
+}
+
+inline bool os::PlatformMonitor::try_lock() {
+  int status = pthread_mutex_trylock(&_mutex);
+  assert_status(status == 0 || status == EBUSY, status, "mutex_trylock");
+  return status == 0;
+}
+
+inline void os::PlatformMonitor::notify() {
+  int status = pthread_cond_signal(&_cond);
+  assert_status(status == 0, status, "cond_signal");
+}
+
+inline void os::PlatformMonitor::notify_all() {
+  int status = pthread_cond_broadcast(&_cond);
+  assert_status(status == 0, status, "cond_broadcast");
+}
+
+#endif // !SOLARIS
+
 #endif // OS_POSIX_OS_POSIX_INLINE_HPP
--- a/src/hotspot/os/solaris/os_perf_solaris.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/solaris/os_perf_solaris.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -74,7 +74,7 @@
 
   int fd = -1;
 
-  if ((fd = open(path, O_RDONLY)) < 0) {
+  if ((fd = os::open(path, O_RDONLY, 0)) < 0) {
     return OS_ERR;
   }
   if (pread(fd, info, s, o) != s) {
@@ -544,11 +544,7 @@
 
 char* SystemProcessInterface::SystemProcesses::ProcessIterator::allocate_string(const char* str) const {
   if (str != NULL) {
-    size_t len = strlen(str);
-    char* tmp = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
-    strncpy(tmp, str, len);
-    tmp[len] = '\0';
-    return tmp;
+    return os::strdup_check_oom(str, mtInternal);
   }
   return NULL;
 }
--- a/src/hotspot/os/solaris/os_solaris.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/solaris/os_solaris.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -281,7 +281,7 @@
 
 };
 
-class PlatformEvent : public CHeapObj<mtInternal> {
+class PlatformEvent : public CHeapObj<mtSynchronizer> {
  private:
   double CachePad[4];   // increase odds that _mutex is sole occupant of cache line
   volatile int _Event;
@@ -317,7 +317,7 @@
   void unpark();
 };
 
-class PlatformParker : public CHeapObj<mtInternal> {
+class PlatformParker : public CHeapObj<mtSynchronizer> {
  protected:
   mutex_t _mutex[1];
   cond_t  _cond[1];
@@ -336,7 +336,7 @@
 };
 
 // Platform specific implementation that underpins VM Monitor/Mutex class
-class PlatformMonitor : public CHeapObj<mtInternal> {
+class PlatformMonitor : public CHeapObj<mtSynchronizer> {
  private:
   mutex_t _mutex; // Native mutex for locking
   cond_t  _cond;  // Native condition variable for blocking
--- a/src/hotspot/os/windows/os_perf_windows.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/windows/os_perf_windows.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1253,14 +1253,7 @@
 
 char* SystemProcessInterface::SystemProcesses::ProcessIterator::allocate_string(const char* str) const {
   if (str != NULL) {
-    size_t len = strlen(str);
-    char* tmp = NEW_C_HEAP_ARRAY(char, len+1, mtInternal);
-    if (NULL == tmp) {
-      return NULL;
-    }
-    strncpy(tmp, str, len);
-    tmp[len] = '\0';
-    return tmp;
+    return os::strdup_check_oom(str, mtInternal);
   }
   return NULL;
 }
--- a/src/hotspot/os/windows/os_windows.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/windows/os_windows.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -2970,14 +2970,15 @@
 int os::create_file_for_heap(const char* dir) {
 
   const char name_template[] = "/jvmheap.XXXXXX";
-  char *fullname = (char*)os::malloc((strlen(dir) + strlen(name_template) + 1), mtInternal);
+
+  size_t fullname_len = strlen(dir) + strlen(name_template);
+  char *fullname = (char*)os::malloc(fullname_len + 1, mtInternal);
   if (fullname == NULL) {
     vm_exit_during_initialization(err_msg("Malloc failed during creation of backing file for heap (%s)", os::strerror(errno)));
     return -1;
   }
-
-  (void)strncpy(fullname, dir, strlen(dir)+1);
-  (void)strncat(fullname, name_template, strlen(name_template));
+  int n = snprintf(fullname, fullname_len + 1, "%s%s", dir, name_template);
+  assert((size_t)n == fullname_len, "Unexpected number of characters in string");
 
   os::native_path(fullname);
 
@@ -5319,27 +5320,6 @@
 
 // Platform Monitor implementation
 
-os::PlatformMonitor::PlatformMonitor() {
-  InitializeConditionVariable(&_cond);
-  InitializeCriticalSection(&_mutex);
-}
-
-os::PlatformMonitor::~PlatformMonitor() {
-  DeleteCriticalSection(&_mutex);
-}
-
-void os::PlatformMonitor::lock() {
-  EnterCriticalSection(&_mutex);
-}
-
-void os::PlatformMonitor::unlock() {
-  LeaveCriticalSection(&_mutex);
-}
-
-bool os::PlatformMonitor::try_lock() {
-  return TryEnterCriticalSection(&_mutex);
-}
-
 // Must already be locked
 int os::PlatformMonitor::wait(jlong millis) {
   assert(millis >= 0, "negative timeout");
@@ -5358,14 +5338,6 @@
   return ret;
 }
 
-void os::PlatformMonitor::notify() {
-  WakeConditionVariable(&_cond);
-}
-
-void os::PlatformMonitor::notify_all() {
-  WakeAllConditionVariable(&_cond);
-}
-
 // Run the specified command in a separate process. Return its exit value,
 // or -1 on failure (e.g. can't create a new process).
 int os::fork_and_exec(char* cmd, bool use_vfork_if_available) {
--- a/src/hotspot/os/windows/os_windows.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/windows/os_windows.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -148,7 +148,7 @@
   static volatile intptr_t _crash_mux;
 };
 
-class PlatformEvent : public CHeapObj<mtInternal> {
+class PlatformEvent : public CHeapObj<mtSynchronizer> {
   private:
     double CachePad [4] ;   // increase odds that _Event is sole occupant of cache line
     volatile int _Event ;
@@ -174,7 +174,7 @@
 
 
 
-class PlatformParker : public CHeapObj<mtInternal> {
+class PlatformParker : public CHeapObj<mtSynchronizer> {
   protected:
     HANDLE _ParkEvent ;
 
@@ -188,7 +188,7 @@
 } ;
 
 // Platform specific implementation that underpins VM Monitor/Mutex class
-class PlatformMonitor : public CHeapObj<mtInternal> {
+class PlatformMonitor : public CHeapObj<mtSynchronizer> {
  private:
   CRITICAL_SECTION   _mutex; // Native mutex for locking
   CONDITION_VARIABLE _cond;  // Native condition variable for blocking
--- a/src/hotspot/os/windows/os_windows.inline.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os/windows/os_windows.inline.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -86,4 +86,35 @@
   win32::exit_process_or_thread(win32::EPT_PROCESS, num);
 }
 
+// Platform Monitor implementation
+
+inline os::PlatformMonitor::PlatformMonitor() {
+  InitializeConditionVariable(&_cond);
+  InitializeCriticalSection(&_mutex);
+}
+
+inline os::PlatformMonitor::~PlatformMonitor() {
+  DeleteCriticalSection(&_mutex);
+}
+
+inline void os::PlatformMonitor::lock() {
+  EnterCriticalSection(&_mutex);
+}
+
+inline void os::PlatformMonitor::unlock() {
+  LeaveCriticalSection(&_mutex);
+}
+
+inline bool os::PlatformMonitor::try_lock() {
+  return TryEnterCriticalSection(&_mutex);
+}
+
+inline void os::PlatformMonitor::notify() {
+  WakeConditionVariable(&_cond);
+}
+
+inline void os::PlatformMonitor::notify_all() {
+  WakeAllConditionVariable(&_cond);
+}
+
 #endif // OS_WINDOWS_OS_WINDOWS_INLINE_HPP
--- a/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/os_cpu/linux_x86/gc/z/zBackingFile_linux_x86.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -191,7 +191,7 @@
 
   // Try to create an anonymous file using the O_TMPFILE flag. Note that this
   // flag requires kernel >= 3.11. If this fails we fall back to open/unlink.
-  const int fd_anon = open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+  const int fd_anon = os::open(path.get(), O_TMPFILE|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
   if (fd_anon == -1) {
     ZErrno err;
     log_debug(gc, init)("Failed to create anonymous file in %s (%s)", path.get(),
@@ -217,7 +217,7 @@
   snprintf(filename, sizeof(filename), "%s/%s.%d", path.get(), name, os::current_process_id());
 
   // Create file
-  const int fd = open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
+  const int fd = os::open(filename, O_CREAT|O_EXCL|O_RDWR|O_CLOEXEC, S_IRUSR|S_IWUSR);
   if (fd == -1) {
     ZErrno err;
     log_error(gc, init)("Failed to create file %s (%s)", filename, err.to_string());
--- a/src/hotspot/share/adlc/formsopt.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/adlc/formsopt.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -242,9 +242,6 @@
   char* code_snippet() {
     return _code_snippet;
   }
-  void set_stack_version(bool flag) {
-    assert(false, "User defined register classes are not allowed to spill to the stack.");
-  }
   void declare_register_masks(FILE* fp);
   void build_register_masks(FILE* fp) {
     // We do not need to generate register masks because we select at runtime
--- a/src/hotspot/share/c1/c1_Optimizer.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/c1/c1_Optimizer.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -174,6 +174,12 @@
   for_each_phi_fun(t_block, phi, return; );
   for_each_phi_fun(f_block, phi, return; );
 
+  // Only replace safepoint gotos if state_before information is available (if is a safepoint)
+  bool is_safepoint = if_->is_safepoint();
+  if (!is_safepoint && (t_goto->is_safepoint() || f_goto->is_safepoint())) {
+    return;
+  }
+
   // 2) substitute conditional expression
   //    with an IfOp followed by a Goto
   // cut if_ away and get node before
@@ -202,7 +208,7 @@
 
   // append Goto to successor
   ValueStack* state_before = if_->state_before();
-  Goto* goto_ = new Goto(sux, state_before, if_->is_safepoint() || t_goto->is_safepoint() || f_goto->is_safepoint());
+  Goto* goto_ = new Goto(sux, state_before, is_safepoint);
 
   // prepare state for Goto
   ValueStack* goto_state = if_state;
--- a/src/hotspot/share/ci/ciEnv.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/ci/ciEnv.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1253,7 +1253,7 @@
   static char buffer[O_BUFLEN];
   int ret = jio_snprintf(buffer, O_BUFLEN, "replay_pid%p_compid%d.log", os::current_process_id(), compile_id);
   if (ret > 0) {
-    int fd = open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0666);
+    int fd = os::open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0666);
     if (fd != -1) {
       FILE* replay_data_file = os::open(fd, "w");
       if (replay_data_file != NULL) {
@@ -1271,7 +1271,7 @@
   static char buffer[O_BUFLEN];
   int ret = jio_snprintf(buffer, O_BUFLEN, "inline_pid%p_compid%d.log", os::current_process_id(), compile_id);
   if (ret > 0) {
-    int fd = open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0666);
+    int fd = os::open(buffer, O_RDWR | O_CREAT | O_TRUNC, 0666);
     if (fd != -1) {
       FILE* inline_data_file = os::open(fd, "w");
       if (inline_data_file != NULL) {
--- a/src/hotspot/share/ci/ciReplay.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/ci/ciReplay.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -33,6 +33,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/method.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/fieldDescriptor.inline.hpp"
--- a/src/hotspot/share/classfile/classFileParser.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/classFileParser.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -60,6 +60,7 @@
 #include "runtime/arguments.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/javaCalls.hpp"
+#include "runtime/os.hpp"
 #include "runtime/perfData.hpp"
 #include "runtime/reflection.hpp"
 #include "runtime/safepointVerifiers.hpp"
@@ -5743,16 +5744,13 @@
     ClassLoader::package_from_name(unsafe_anonymous_host->name()->as_C_string(), NULL);
 
   if (host_pkg_name != NULL) {
-    size_t host_pkg_len = strlen(host_pkg_name);
+    int host_pkg_len = (int)strlen(host_pkg_name);
     int class_name_len = _class_name->utf8_length();
-    char* new_anon_name =
-      NEW_RESOURCE_ARRAY(char, host_pkg_len + 1 + class_name_len);
-    // Copy host package name and trailing /.
-    strncpy(new_anon_name, host_pkg_name, host_pkg_len);
-    new_anon_name[host_pkg_len] = '/';
-    // Append unsafe anonymous class name. The unsafe anonymous class name can contain odd
-    // characters.  So, do a strncpy instead of using sprintf("%s...").
-    strncpy(new_anon_name + host_pkg_len + 1, (char *)_class_name->base(), class_name_len);
+    int symbol_len = host_pkg_len + 1 + class_name_len;
+    char* new_anon_name = NEW_RESOURCE_ARRAY(char, symbol_len + 1);
+    int n = os::snprintf(new_anon_name, symbol_len + 1, "%s/%.*s",
+                         host_pkg_name, class_name_len, _class_name->base());
+    assert(n == symbol_len, "Unexpected number of characters in string");
 
     // Decrement old _class_name to avoid leaking.
     _class_name->decrement_refcount();
@@ -5761,9 +5759,7 @@
     // The new class name is created with a refcount of one. When installed into the InstanceKlass,
     // it'll be two and when the ClassFileParser destructor runs, it'll go back to one and get deleted
     // when the class is unloaded.
-    _class_name = SymbolTable::new_symbol(new_anon_name,
-                                          (int)host_pkg_len + 1 + class_name_len,
-                                          CHECK);
+    _class_name = SymbolTable::new_symbol(new_anon_name, symbol_len, CHECK);
   }
 }
 
--- a/src/hotspot/share/classfile/classLoader.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/classLoader.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -101,7 +101,6 @@
 static JImageFindResource_t            JImageFindResource     = NULL;
 static JImageGetResource_t             JImageGetResource      = NULL;
 static JImageResourceIterator_t        JImageResourceIterator = NULL;
-static JImage_ResourcePath_t           JImageResourcePath     = NULL;
 
 // Globals
 
@@ -621,13 +620,14 @@
   update_module_path_entry_list(path, THREAD);
 }
 
+#endif // INCLUDE_CDS
+
 void ClassLoader::close_jrt_image() {
-  assert(ClassLoader::has_jrt_entry(), "Not applicable for exploded builds");
+  // Not applicable for exploded builds
+  if (!ClassLoader::has_jrt_entry()) return;
   _jrt_entry->close_jimage();
 }
 
-#endif // INCLUDE_CDS
-
 // Construct the array of module/path pairs as specified to --patch-module
 // for the boot loader to search ahead of the jimage, if the class being
 // loaded is defined to a module that has been specified to --patch-module.
@@ -1094,8 +1094,6 @@
   guarantee(JImageGetResource != NULL, "function JIMAGE_GetResource not found");
   JImageResourceIterator = CAST_TO_FN_PTR(JImageResourceIterator_t, os::dll_lookup(handle, "JIMAGE_ResourceIterator"));
   guarantee(JImageResourceIterator != NULL, "function JIMAGE_ResourceIterator not found");
-  JImageResourcePath = CAST_TO_FN_PTR(JImage_ResourcePath_t, os::dll_lookup(handle, "JIMAGE_ResourcePath"));
-  guarantee(JImageResourcePath != NULL, "function JIMAGE_ResourcePath not found");
 }
 
 jboolean ClassLoader::decompress(void *in, u8 inSize, void *out, u8 outSize, char **pmsg) {
--- a/src/hotspot/share/classfile/classLoader.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/classLoader.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -247,12 +247,12 @@
 
   static void load_zip_library();
   static void load_jimage_library();
+
+ public:
   static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
                                                  bool throw_exception,
                                                  bool is_boot_append, TRAPS);
 
- public:
-
   // If the package for the fully qualified class name is in the boot
   // loader's package entry table then add_package() sets the classpath_index
   // field so that get_system_package() will know to return a non-null value
--- a/src/hotspot/share/classfile/classLoaderExt.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/classLoaderExt.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -203,13 +203,13 @@
         file_end = end;
       }
 
-      int name_len = (int)strlen(file_start);
+      size_t name_len = strlen(file_start);
       if (name_len > 0) {
         ResourceMark rm(THREAD);
-        char* libname = NEW_RESOURCE_ARRAY(char, dir_len + name_len + 1);
-        *libname = 0;
-        strncat(libname, dir_name, dir_len);
-        strncat(libname, file_start, name_len);
+        size_t libname_len = dir_len + name_len;
+        char* libname = NEW_RESOURCE_ARRAY(char, libname_len + 1);
+        int n = os::snprintf(libname, libname_len + 1, "%.*s%s", dir_len, dir_name, file_start);
+        assert((size_t)n == libname_len, "Unexpected number of characters in string");
         trace_class_path("library = ", libname);
         ClassLoader::update_class_path_entry_list(libname, true, false);
       }
--- a/src/hotspot/share/classfile/compactHashtable.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/compactHashtable.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -198,7 +198,7 @@
     quit("Unable to get hashtable dump file size", filename);
   }
   _size = st.st_size;
-  _fd = open(filename, O_RDONLY | O_BINARY, 0);
+  _fd = os::open(filename, O_RDONLY | O_BINARY, 0);
   if (_fd < 0) {
     quit("Unable to open hashtable dump file", filename);
   }
--- a/src/hotspot/share/classfile/klassFactory.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/klassFactory.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
@@ -46,22 +46,22 @@
                                           InstanceKlass* ik,
                                           Symbol* class_name,
                                           Handle class_loader,
-                                          Handle protection_domain, TRAPS) {
+                                          Handle protection_domain,
+                                          const ClassFileStream *cfs,
+                                          TRAPS) {
 #if INCLUDE_CDS && INCLUDE_JVMTI
   assert(ik != NULL, "sanity");
   assert(ik->is_shared(), "expecting a shared class");
-
   if (JvmtiExport::should_post_class_file_load_hook()) {
     assert(THREAD->is_Java_thread(), "must be JavaThread");
 
     // Post the CFLH
     JvmtiCachedClassFileData* cached_class_file = NULL;
-    JvmtiCachedClassFileData* archived_class_data = ik->get_archived_class_data();
-    assert(archived_class_data != NULL, "shared class has no archived class data");
-    unsigned char* ptr =
-        VM_RedefineClasses::get_cached_class_file_bytes(archived_class_data);
-    unsigned char* end_ptr =
-        ptr + VM_RedefineClasses::get_cached_class_file_len(archived_class_data);
+    if (cfs == NULL) {
+      cfs = FileMapInfo::open_stream_for_jvmti(ik, CHECK_NULL);
+    }
+    unsigned char* ptr = (unsigned char*)cfs->buffer();
+    unsigned char* end_ptr = ptr + cfs->length();
     unsigned char* old_ptr = ptr;
     JvmtiExport::post_class_file_load_hook(class_name,
                                            class_loader,
@@ -75,25 +75,9 @@
       ClassLoaderData* loader_data =
         ClassLoaderData::class_loader_data(class_loader());
       int path_index = ik->shared_classpath_index();
-      const char* pathname;
-      if (path_index < 0) {
-        // shared classes loaded by user defined class loader
-        // do not have shared_classpath_index
-        ModuleEntry* mod_entry = ik->module();
-        if (mod_entry != NULL && (mod_entry->location() != NULL)) {
-          ResourceMark rm;
-          pathname = (const char*)(mod_entry->location()->as_C_string());
-        } else {
-          pathname = "";
-        }
-      } else {
-        SharedClassPathEntry* ent =
-          (SharedClassPathEntry*)FileMapInfo::shared_path(path_index);
-        pathname = ent == NULL ? NULL : ent->name();
-      }
       ClassFileStream* stream = new ClassFileStream(ptr,
                                                     end_ptr - ptr,
-                                                    pathname,
+                                                    cfs->source(),
                                                     ClassFileStream::verify);
       ClassFileParser parser(stream,
                              class_name,
@@ -236,24 +220,6 @@
 #if INCLUDE_CDS
   if (DumpSharedSpaces) {
     ClassLoader::record_result(result, stream, THREAD);
-#if INCLUDE_JVMTI
-    assert(cached_class_file == NULL, "Sanity");
-    // Archive the class stream data into the optional data section
-    JvmtiCachedClassFileData *p;
-    int len;
-    const unsigned char *bytes;
-    // event based tracing might set cached_class_file
-    if ((bytes = result->get_cached_class_file_bytes()) != NULL) {
-      len = result->get_cached_class_file_len();
-    } else {
-      len = stream->length();
-      bytes = stream->buffer();
-    }
-    p = (JvmtiCachedClassFileData*)os::malloc(offset_of(JvmtiCachedClassFileData, data) + len, mtInternal);
-    p->length = len;
-    memcpy(p->data, bytes, len);
-    result->set_archived_class_data(p);
-#endif // INCLUDE_JVMTI
   }
 #endif // INCLUDE_CDS
 
--- a/src/hotspot/share/classfile/klassFactory.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/klassFactory.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -80,7 +80,9 @@
                                           InstanceKlass* ik,
                                           Symbol* class_name,
                                           Handle class_loader,
-                                          Handle protection_domain, TRAPS);
+                                          Handle protection_domain,
+                                          const ClassFileStream *cfs,
+                                          TRAPS);
 };
 
 #endif // SHARE_CLASSFILE_KLASSFACTORY_HPP
--- a/src/hotspot/share/classfile/stackMapTable.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/stackMapTable.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -26,6 +26,7 @@
 #include "classfile/stackMapTable.hpp"
 #include "classfile/verifier.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/fieldType.hpp"
 #include "runtime/handles.inline.hpp"
--- a/src/hotspot/share/classfile/symbolTable.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/symbolTable.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -436,18 +436,16 @@
   }
 }
 
-void SymbolTable::add(ClassLoaderData* loader_data, const constantPoolHandle& cp,
-                      int names_count, const char** names, int* lengths,
-                      int* cp_indices, unsigned int* hashValues, TRAPS) {
+void SymbolTable::new_symbols(ClassLoaderData* loader_data, const constantPoolHandle& cp,
+                              int names_count, const char** names, int* lengths,
+                              int* cp_indices, unsigned int* hashValues, TRAPS) {
   bool c_heap = !loader_data->is_the_null_class_loader_data();
   for (int i = 0; i < names_count; i++) {
     const char *name = names[i];
     int len = lengths[i];
     unsigned int hash = hashValues[i];
-    Symbol* sym = SymbolTable::the_table()->lookup_common(name, len, hash);
-    if (sym == NULL) {
-      sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, c_heap, CHECK);
-    }
+    assert(SymbolTable::the_table()->lookup_shared(name, len, hash) == NULL, "must have checked already");
+    Symbol* sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, c_heap, CHECK);
     assert(sym->refcount() != 0, "lookup should have incremented the count");
     cp->symbol_at_put(cp_indices[i], sym);
   }
--- a/src/hotspot/share/classfile/symbolTable.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/symbolTable.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -144,18 +144,11 @@
   Symbol* do_add_if_needed(const char* name, int len, uintx hash, bool heap, TRAPS);
 
   // Adding elements
-  static void add(ClassLoaderData* loader_data,
-                  const constantPoolHandle& cp, int names_count,
-                  const char** names, int* lengths, int* cp_indices,
-                  unsigned int* hashValues, TRAPS);
-
   static void new_symbols(ClassLoaderData* loader_data,
                           const constantPoolHandle& cp, int names_count,
                           const char** name, int* lengths,
                           int* cp_indices, unsigned int* hashValues,
-                          TRAPS) {
-    add(loader_data, cp, names_count, name, lengths, cp_indices, hashValues, THREAD);
-  }
+                          TRAPS);
 
   static Symbol* lookup_shared(const char* name, int len, unsigned int hash);
   Symbol* lookup_dynamic(const char* name, int len, unsigned int hash);
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1174,7 +1174,7 @@
                                                         TRAPS) {
   InstanceKlass* ik = SystemDictionaryShared::find_builtin_class(class_name);
   if (ik != NULL && ik->is_shared_boot_class()) {
-    return load_shared_class(ik, Handle(), Handle(), THREAD);
+    return load_shared_class(ik, Handle(), Handle(), NULL, THREAD);
   }
   return NULL;
 }
@@ -1274,7 +1274,9 @@
 
 InstanceKlass* SystemDictionary::load_shared_class(InstanceKlass* ik,
                                                    Handle class_loader,
-                                                   Handle protection_domain, TRAPS) {
+                                                   Handle protection_domain,
+                                                   const ClassFileStream *cfs,
+                                                   TRAPS) {
 
   if (ik != NULL) {
     Symbol* class_name = ik->name();
@@ -1321,7 +1323,7 @@
     }
 
     InstanceKlass* new_ik = KlassFactory::check_shared_class_file_load_hook(
-        ik, class_name, class_loader, protection_domain, CHECK_NULL);
+        ik, class_name, class_loader, protection_domain, cfs, CHECK_NULL);
     if (new_ik != NULL) {
       // The class is changed by CFLH. Return the new class. The shared class is
       // not used.
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -628,6 +628,7 @@
   static InstanceKlass* load_shared_class(InstanceKlass* ik,
                                           Handle class_loader,
                                           Handle protection_domain,
+                                          const ClassFileStream *cfs,
                                           TRAPS);
   static InstanceKlass* load_shared_boot_class(Symbol* class_name,
                                                TRAPS);
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -803,7 +803,7 @@
          SystemDictionary::is_platform_class_loader(class_loader()))) {
       Handle protection_domain =
         SystemDictionaryShared::init_security_info(class_loader, ik, CHECK_NULL);
-      return load_shared_class(ik, class_loader, protection_domain, THREAD);
+      return load_shared_class(ik, class_loader, protection_domain, NULL, THREAD);
     }
   }
   return NULL;
@@ -873,13 +873,15 @@
   }
 
   return acquire_class_for_current_thread(record->_klass, class_loader,
-                                          protection_domain, THREAD);
+                                          protection_domain, cfs,
+                                          THREAD);
 }
 
 InstanceKlass* SystemDictionaryShared::acquire_class_for_current_thread(
                    InstanceKlass *ik,
                    Handle class_loader,
                    Handle protection_domain,
+                   const ClassFileStream *cfs,
                    TRAPS) {
   ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
 
@@ -900,7 +902,8 @@
   loader_data->add_class(ik);
 
   // Load and check super/interfaces, restore unsharable info
-  InstanceKlass* shared_klass = load_shared_class(ik, class_loader, protection_domain, THREAD);
+  InstanceKlass* shared_klass = load_shared_class(ik, class_loader, protection_domain,
+                                                  cfs, THREAD);
   if (shared_klass == NULL || HAS_PENDING_EXCEPTION) {
     // TODO: clean up <ik> so it can be used again
     return NULL;
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -207,6 +207,7 @@
                                  InstanceKlass *ik,
                                  Handle class_loader,
                                  Handle protection_domain,
+                                 const ClassFileStream* cfs,
                                  TRAPS);
   static DumpTimeSharedClassInfo* find_or_allocate_info_for(InstanceKlass* k);
   static void write_dictionary(RunTimeSharedDictionary* dictionary, bool is_builtin);
--- a/src/hotspot/share/classfile/verifier.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/verifier.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -2981,18 +2981,16 @@
     }
     // add one dimension to component
     length++;
-    arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length);
-    arr_sig_str[0] = '[';
-    strncpy(&arr_sig_str[1], component_name, length - 1);
+    arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length + 1);
+    int n = os::snprintf(arr_sig_str, length + 1, "[%s", component_name);
+    assert(n == length, "Unexpected number of characters in string");
   } else {         // it's an object or interface
     const char* component_name = component_type.name()->as_utf8();
     // add one dimension to component with 'L' prepended and ';' postpended.
     length = (int)strlen(component_name) + 3;
-    arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length);
-    arr_sig_str[0] = '[';
-    arr_sig_str[1] = 'L';
-    strncpy(&arr_sig_str[2], component_name, length - 2);
-    arr_sig_str[length - 1] = ';';
+    arr_sig_str = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, length + 1);
+    int n = os::snprintf(arr_sig_str, length + 1, "[L%s;", component_name);
+    assert(n == length, "Unexpected number of characters in string");
   }
   Symbol* arr_sig = create_temporary_symbol(
     arr_sig_str, length, CHECK_VERIFY(this));
--- a/src/hotspot/share/classfile/verifier.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/classfile/verifier.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -49,7 +49,7 @@
 
   // Return false if the class is loaded by the bootstrap loader,
   // or if defineClass was called requesting skipping verification
-  // -Xverify:all/none override this value
+  // -Xverify:all overrides this value
   static bool should_verify_for(oop class_loader, bool should_verify_class);
 
   // Relax certain access checks to enable some broken 1.1 apps to run on 1.2.
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -321,16 +321,26 @@
   }
 }
 
-void G1PLABAllocator::waste(size_t& wasted, size_t& undo_wasted) {
-  wasted = 0;
-  undo_wasted = 0;
+size_t G1PLABAllocator::waste() const {
+  size_t result = 0;
   for (uint state = 0; state < InCSetState::Num; state++) {
     PLAB * const buf = _alloc_buffers[state];
     if (buf != NULL) {
-      wasted += buf->waste();
-      undo_wasted += buf->undo_waste();
+      result += buf->waste();
     }
   }
+  return result;
+}
+
+size_t G1PLABAllocator::undo_waste() const {
+  size_t result = 0;
+  for (uint state = 0; state < InCSetState::Num; state++) {
+    PLAB * const buf = _alloc_buffers[state];
+    if (buf != NULL) {
+      result += buf->undo_waste();
+    }
+  }
+  return result;
 }
 
 bool G1ArchiveAllocator::_archive_check_enabled = false;
--- a/src/hotspot/share/gc/g1/g1Allocator.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/g1Allocator.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -155,7 +155,8 @@
 public:
   G1PLABAllocator(G1Allocator* allocator);
 
-  void waste(size_t& wasted, size_t& undo_wasted);
+  size_t waste() const;
+  size_t undo_waste() const;
 
   // Allocate word_sz words in dest, either directly into the regions or by
   // allocating a new PLAB. Returns the address of the allocated memory, NULL if
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -3282,10 +3282,6 @@
 
       _root_processor->evacuate_roots(pss, worker_id);
 
-      // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
-      // treating the nmethods visited to act as roots for concurrent marking.
-      // We only want to make sure that the oops in the nmethods are adjusted with regard to the
-      // objects copied by the current evacuation.
       _g1h->g1_rem_set()->oops_into_collection_set_do(pss, worker_id);
 
       double strong_roots_sec = os::elapsedTime() - start_strong_roots_sec;
@@ -3303,27 +3299,22 @@
 
         G1GCPhaseTimes* p = _g1h->g1_policy()->phase_times();
         p->add_time_secs(G1GCPhaseTimes::ObjCopy, worker_id, elapsed_sec - term_sec);
+
+        p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
+                                          worker_id,
+                                          pss->lab_waste_words() * HeapWordSize,
+                                          G1GCPhaseTimes::ObjCopyLABWaste);
+        p->record_or_add_thread_work_item(G1GCPhaseTimes::ObjCopy,
+                                          worker_id,
+                                          pss->lab_undo_waste_words() * HeapWordSize,
+                                          G1GCPhaseTimes::ObjCopyLABUndoWaste);
+
         p->record_time_secs(G1GCPhaseTimes::Termination, worker_id, term_sec);
         p->record_thread_work_item(G1GCPhaseTimes::Termination, worker_id, evac_term_attempts);
       }
 
       assert(pss->queue_is_empty(), "should be empty");
 
-      if (log_is_enabled(Debug, gc, task, stats)) {
-        MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
-        size_t lab_waste;
-        size_t lab_undo_waste;
-        pss->waste(lab_waste, lab_undo_waste);
-        _g1h->print_termination_stats(worker_id,
-                                      (os::elapsedTime() - start_sec) * 1000.0,   /* elapsed time */
-                                      strong_roots_sec * 1000.0,                  /* strong roots time */
-                                      term_sec * 1000.0,                          /* evac term time */
-                                      evac_term_attempts,                         /* evac term attempts */
-                                      lab_waste,                                  /* alloc buffer waste */
-                                      lab_undo_waste                              /* undo waste */
-                                      );
-      }
-
       // Close the inner scope so that the ResourceMark and HandleMark
       // destructors are executed here and are included as part of the
       // "GC Worker Time".
@@ -3332,31 +3323,6 @@
   }
 };
 
-void G1CollectedHeap::print_termination_stats_hdr() {
-  log_debug(gc, task, stats)("GC Termination Stats");
-  log_debug(gc, task, stats)("     elapsed  --strong roots-- -------termination------- ------waste (KiB)------");
-  log_debug(gc, task, stats)("thr     ms        ms      %%        ms      %%    attempts  total   alloc    undo");
-  log_debug(gc, task, stats)("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
-}
-
-void G1CollectedHeap::print_termination_stats(uint worker_id,
-                                              double elapsed_ms,
-                                              double strong_roots_ms,
-                                              double term_ms,
-                                              size_t term_attempts,
-                                              size_t alloc_buffer_waste,
-                                              size_t undo_waste) const {
-  log_debug(gc, task, stats)
-              ("%3d %9.2f %9.2f %6.2f "
-               "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
-               SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
-               worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
-               term_ms, term_ms * 100 / elapsed_ms, term_attempts,
-               (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
-               alloc_buffer_waste * HeapWordSize / K,
-               undo_waste * HeapWordSize / K);
-}
-
 void G1CollectedHeap::complete_cleaning(BoolObjectClosure* is_alive,
                                         bool class_unloading_occurred) {
   uint num_workers = workers()->active_workers();
@@ -3767,8 +3733,6 @@
     G1RootProcessor root_processor(this, n_workers);
     G1ParTask g1_par_task(this, per_thread_states, _task_queues, &root_processor, n_workers);
 
-    print_termination_stats_hdr();
-
     workers()->run_task(&g1_par_task);
     end_par_time_sec = os::elapsedTime();
 
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -738,16 +738,6 @@
   void pre_evacuate_collection_set();
   void post_evacuate_collection_set(G1EvacuationInfo& evacuation_info, G1ParScanThreadStateSet* pss);
 
-  // Print the header for the per-thread termination statistics.
-  static void print_termination_stats_hdr();
-  // Print actual per-thread termination statistics.
-  void print_termination_stats(uint worker_id,
-                               double elapsed_ms,
-                               double strong_roots_ms,
-                               double term_ms,
-                               size_t term_attempts,
-                               size_t alloc_buffer_waste,
-                               size_t undo_waste) const;
   // Update object copying statistics.
   void record_obj_copy_mem_stats();
 
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -71,26 +71,19 @@
       }
       if (!_g1h->is_in_closed_subset(obj)) {
         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
-        yy.print_cr("Field " PTR_FORMAT
-            " of live obj " PTR_FORMAT " in region "
-            "[" PTR_FORMAT ", " PTR_FORMAT ")",
-            p2i(p), p2i(_containing_obj),
-            p2i(from->bottom()), p2i(from->end()));
+        yy.print_cr("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT,
+                    p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
         print_object(&yy, _containing_obj);
         yy.print_cr("points to obj " PTR_FORMAT " not in the heap",
-            p2i(obj));
+                    p2i(obj));
       } else {
         HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
         HeapRegion* to   = _g1h->heap_region_containing((HeapWord*)obj);
-        yy.print_cr("Field " PTR_FORMAT
-            " of live obj " PTR_FORMAT " in region "
-            "[" PTR_FORMAT ", " PTR_FORMAT ")",
-            p2i(p), p2i(_containing_obj),
-            p2i(from->bottom()), p2i(from->end()));
+        yy.print_cr("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT,
+                    p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
         print_object(&yy, _containing_obj);
-        yy.print_cr("points to dead obj " PTR_FORMAT " in region "
-            "[" PTR_FORMAT ", " PTR_FORMAT ")",
-            p2i(obj), p2i(to->bottom()), p2i(to->end()));
+        yy.print_cr("points to dead obj " PTR_FORMAT " in region " HR_FORMAT,
+                    p2i(obj), HR_FORMAT_PARAMS(to));
         print_object(&yy, obj);
       }
       yy.print_cr("----------");
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -60,6 +60,9 @@
   _gc_par_phases[SystemDictionaryRoots] = new WorkerDataArray<double>(max_gc_threads, "SystemDictionary Roots (ms):");
   _gc_par_phases[CLDGRoots] = new WorkerDataArray<double>(max_gc_threads, "CLDG Roots (ms):");
   _gc_par_phases[JVMTIRoots] = new WorkerDataArray<double>(max_gc_threads, "JVMTI Roots (ms):");
+#if INCLUDE_AOT
+  _gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scan (ms):");
+#endif
   _gc_par_phases[CMRefRoots] = new WorkerDataArray<double>(max_gc_threads, "CM RefProcessor Roots (ms):");
   _gc_par_phases[WaitForStrongCLD] = new WorkerDataArray<double>(max_gc_threads, "Wait For Strong CLD (ms):");
   _gc_par_phases[WeakCLDRoots] = new WorkerDataArray<double>(max_gc_threads, "Weak CLD Roots (ms):");
@@ -74,9 +77,6 @@
   _gc_par_phases[ScanRS] = new WorkerDataArray<double>(max_gc_threads, "Scan RS (ms):");
   _gc_par_phases[OptScanRS] = new WorkerDataArray<double>(max_gc_threads, "Optional Scan RS (ms):");
   _gc_par_phases[CodeRoots] = new WorkerDataArray<double>(max_gc_threads, "Code Root Scanning (ms):");
-#if INCLUDE_AOT
-  _gc_par_phases[AOTCodeRoots] = new WorkerDataArray<double>(max_gc_threads, "AOT Root Scanning (ms):");
-#endif
   _gc_par_phases[ObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Object Copy (ms):");
   _gc_par_phases[OptObjCopy] = new WorkerDataArray<double>(max_gc_threads, "Optional Object Copy (ms):");
   _gc_par_phases[Termination] = new WorkerDataArray<double>(max_gc_threads, "Termination (ms):");
@@ -107,6 +107,11 @@
   _update_rs_skipped_cards = new WorkerDataArray<size_t>(max_gc_threads, "Skipped Cards:");
   _gc_par_phases[UpdateRS]->link_thread_work_items(_update_rs_skipped_cards, UpdateRSSkippedCards);
 
+  _obj_copy_lab_waste = new WorkerDataArray<size_t>(max_gc_threads, "LAB Waste");
+  _gc_par_phases[ObjCopy]->link_thread_work_items(_obj_copy_lab_waste, ObjCopyLABWaste);
+  _obj_copy_lab_undo_waste = new WorkerDataArray<size_t>(max_gc_threads, "LAB Undo Waste");
+  _gc_par_phases[ObjCopy]->link_thread_work_items(_obj_copy_lab_undo_waste, ObjCopyLABUndoWaste);
+
   _termination_attempts = new WorkerDataArray<size_t>(max_gc_threads, "Termination Attempts:");
   _gc_par_phases[Termination]->link_thread_work_items(_termination_attempts);
 
@@ -383,15 +388,12 @@
   for (int i = ThreadRoots; i <= SATBFiltering; i++) {
     trace_phase(_gc_par_phases[i]);
   }
+  if (G1HotCardCache::default_use_cache()) {
+    debug_phase(_gc_par_phases[ScanHCC]);
+  }
   debug_phase(_gc_par_phases[UpdateRS]);
-  if (G1HotCardCache::default_use_cache()) {
-    trace_phase(_gc_par_phases[ScanHCC]);
-  }
   debug_phase(_gc_par_phases[ScanRS]);
   debug_phase(_gc_par_phases[CodeRoots]);
-#if INCLUDE_AOT
-  debug_phase(_gc_par_phases[AOTCodeRoots]);
-#endif
   debug_phase(_gc_par_phases[ObjCopy]);
   debug_phase(_gc_par_phases[Termination]);
   debug_phase(_gc_par_phases[Other]);
@@ -503,6 +505,9 @@
       "SystemDictionaryRoots",
       "CLDGRoots",
       "JVMTIRoots",
+#if INCLUDE_AOT
+      "AOTCodeRoots",
+#endif
       "CMRefRoots",
       "WaitForStrongCLD",
       "WeakCLDRoots",
@@ -512,9 +517,6 @@
       "ScanRS",
       "OptScanRS",
       "CodeRoots",
-#if INCLUDE_AOT
-      "AOTCodeRoots",
-#endif
       "ObjCopy",
       "OptObjCopy",
       "Termination",
--- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -55,6 +55,9 @@
     SystemDictionaryRoots,
     CLDGRoots,
     JVMTIRoots,
+#if INCLUDE_AOT
+    AOTCodeRoots,
+#endif
     CMRefRoots,
     WaitForStrongCLD,
     WeakCLDRoots,
@@ -64,9 +67,6 @@
     ScanRS,
     OptScanRS,
     CodeRoots,
-#if INCLUDE_AOT
-    AOTCodeRoots,
-#endif
     ObjCopy,
     OptObjCopy,
     Termination,
@@ -93,6 +93,11 @@
     UpdateRSSkippedCards
   };
 
+  enum GCObjCopyWorkItems {
+    ObjCopyLABWaste,
+    ObjCopyLABUndoWaste
+  };
+
   enum GCOptCSetWorkItems {
       OptCSetScannedCards,
       OptCSetClaimedCards,
@@ -114,6 +119,9 @@
   WorkerDataArray<size_t>* _scan_rs_claimed_cards;
   WorkerDataArray<size_t>* _scan_rs_skipped_cards;
 
+  WorkerDataArray<size_t>* _obj_copy_lab_waste;
+  WorkerDataArray<size_t>* _obj_copy_lab_undo_waste;
+
   WorkerDataArray<size_t>* _opt_cset_scanned_cards;
   WorkerDataArray<size_t>* _opt_cset_claimed_cards;
   WorkerDataArray<size_t>* _opt_cset_skipped_cards;
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -106,8 +106,12 @@
   delete[] _oops_into_optional_regions;
 }
 
-void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) {
-  _plab_allocator->waste(wasted, undo_wasted);
+size_t G1ParScanThreadState::lab_waste_words() const {
+  return _plab_allocator->waste();
+}
+
+size_t G1ParScanThreadState::lab_undo_waste_words() const {
+  return _plab_allocator->undo_waste();
 }
 
 #ifdef ASSERT
--- a/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/g1ParScanThreadState.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -127,9 +127,8 @@
   G1EvacuationRootClosures* closures() { return _closures; }
   uint worker_id() { return _worker_id; }
 
-  // Returns the current amount of waste due to alignment or not being able to fit
-  // objects within LABs and the undo waste.
-  virtual void waste(size_t& wasted, size_t& undo_wasted);
+  size_t lab_waste_words() const;
+  size_t lab_undo_waste_words() const;
 
   size_t* surviving_young_words() {
     // We add one to hide entry 0 which accumulates surviving words for
--- a/src/hotspot/share/gc/g1/g1RemSet.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/g1RemSet.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -409,6 +409,10 @@
 
 void G1ScanRSForRegionClosure::scan_strong_code_roots(HeapRegion* r) {
   EventGCPhaseParallel event;
+  // We pass a weak code blobs closure to the remembered set scanning because we want to avoid
+  // treating the nmethods visited to act as roots for concurrent marking.
+  // We only want to make sure that the oops in the nmethods are adjusted with regard to the
+  // objects copied by the current evacuation.
   r->strong_code_roots_do(_pss->closures()->weak_codeblobs());
   event.commit(GCId::current(), _worker_i, G1GCPhaseTimes::phase_name(G1GCPhaseTimes::CodeRoots));
 }
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -336,8 +336,8 @@
         // Object is in the region. Check that its less than top
         if (_hr->top() <= (HeapWord*)obj) {
           // Object is above top
-          log_error(gc, verify)("Object " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ") is above top " PTR_FORMAT,
-                               p2i(obj), p2i(_hr->bottom()), p2i(_hr->end()), p2i(_hr->top()));
+          log_error(gc, verify)("Object " PTR_FORMAT " in region " HR_FORMAT " is above top ",
+                                p2i(obj), HR_FORMAT_PARAMS(_hr));
           _failures = true;
           return;
         }
@@ -415,8 +415,8 @@
   // on its strong code root list
   if (is_empty()) {
     if (strong_code_roots_length > 0) {
-      log_error(gc, verify)("region [" PTR_FORMAT "," PTR_FORMAT "] is empty but has " SIZE_FORMAT " code root entries",
-                            p2i(bottom()), p2i(end()), strong_code_roots_length);
+      log_error(gc, verify)("region " HR_FORMAT " is empty but has " SIZE_FORMAT " code root entries",
+                            HR_FORMAT_PARAMS(this), strong_code_roots_length);
       *failures = true;
     }
     return;
@@ -524,21 +524,22 @@
         ResourceMark rm;
         if (!_g1h->is_in_closed_subset(obj)) {
           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
-          log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
-            p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end()));
+          log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT,
+                    p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
           LogStream ls(log.error());
           print_object(&ls, _containing_obj);
           HeapRegion* const to = _g1h->heap_region_containing(obj);
-          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
+          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s",
+                    p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
         } else {
           HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
           HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj);
-          log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
-            p2i(p), p2i(_containing_obj), p2i(from->bottom()), p2i(from->end()));
+          log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT,
+                    p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
           LogStream ls(log.error());
           print_object(&ls, _containing_obj);
-          log.error("points to dead obj " PTR_FORMAT " in region [" PTR_FORMAT ", " PTR_FORMAT ")",
-            p2i(obj), p2i(to->bottom()), p2i(to->end()));
+          log.error("points to dead obj " PTR_FORMAT " in region " HR_FORMAT,
+                    p2i(obj), HR_FORMAT_PARAMS(to));
           print_object(&ls, obj);
         }
         log.error("----------");
@@ -593,12 +594,13 @@
             log.error("----------");
           }
           log.error("Missing rem set entry:");
-          log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT ", in region " HR_FORMAT,
-            p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
+          log.error("Field " PTR_FORMAT " of obj " PTR_FORMAT " in region " HR_FORMAT,
+                    p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
           ResourceMark rm;
           LogStream ls(log.error());
           _containing_obj->print_on(&ls);
-          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s", p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
+          log.error("points to obj " PTR_FORMAT " in region " HR_FORMAT " remset %s",
+                    p2i(obj), HR_FORMAT_PARAMS(to), to->rem_set()->get_state_str());
           if (oopDesc::is_oop(obj)) {
             obj->print_on(&ls);
           }
--- a/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/shenandoah/shenandoahBarrierSet.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -335,7 +335,7 @@
   // Filter marked objects before hitting the SATB queues. The same predicate would
   // be used by SATBMQ::filter to eliminate already marked objects downstream, but
   // filtering here helps to avoid wasteful SATB queueing work to begin with.
-  if (!_heap->requires_marking(obj)) return;
+  if (!_heap->requires_marking<false>(obj)) return;
 
   Thread* thr = Thread::current();
   if (thr->is_Java_thread()) {
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -2639,6 +2639,10 @@
   return memory_pools;
 }
 
+MemoryUsage ShenandoahHeap::memory_usage() {
+  return _memory_pool->get_memory_usage();
+}
+
 void ShenandoahHeap::enter_evacuation() {
   _oom_evac_handler.enter_evacuation();
 }
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -511,6 +511,7 @@
 
   GrowableArray<GCMemoryManager*> memory_managers();
   GrowableArray<MemoryPool*> memory_pools();
+  MemoryUsage memory_usage();
   GCTracer* tracer();
   GCTimer* gc_timer() const;
   CollectorPolicy* collector_policy() const;
@@ -676,6 +677,7 @@
   void reset_mark_bitmap();
 
   // SATB barriers hooks
+  template<bool RESOLVE>
   inline bool requires_marking(const void* entry) const;
   void force_satb_flush_all_threads();
 
--- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.inline.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -316,8 +316,13 @@
   }
 }
 
+template<bool RESOLVE>
 inline bool ShenandoahHeap::requires_marking(const void* entry) const {
-  return !_marking_context->is_marked(oop(entry));
+  oop obj = oop(entry);
+  if (RESOLVE) {
+    obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
+  }
+  return !_marking_context->is_marked(obj);
 }
 
 template <class T>
--- a/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/shenandoah/shenandoahMemoryPool.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -26,7 +26,7 @@
 
 ShenandoahMemoryPool::ShenandoahMemoryPool(ShenandoahHeap* heap) :
         CollectedMemoryPool("Shenandoah",
-                            heap->capacity(),
+                            heap->initial_capacity(),
                             heap->max_capacity(),
                             true /* support_usage_threshold */),
                             _heap(heap) {}
@@ -37,9 +37,15 @@
   size_t used      = used_in_bytes();
   size_t committed = _heap->committed();
 
+  // These asserts can never fail: max is stable, and all updates to other values never overflow max.
   assert(initial <= max,    "initial: "   SIZE_FORMAT ", max: "       SIZE_FORMAT, initial,   max);
   assert(used <= max,       "used: "      SIZE_FORMAT ", max: "       SIZE_FORMAT, used,      max);
   assert(committed <= max,  "committed: " SIZE_FORMAT ", max: "       SIZE_FORMAT, committed, max);
+
+  // Committed and used are updated concurrently and independently. They can momentarily break
+  // the assert below, which would also fail in downstream code. To avoid that, adjust values
+  // to make sense under the race. See JDK-8207200.
+  committed = MAX2(used, committed);
   assert(used <= committed, "used: "      SIZE_FORMAT ", committed: " SIZE_FORMAT, used,      committed);
 
   return MemoryUsage(initial, used, committed, max);
--- a/src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueueSet.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/shenandoah/shenandoahSATBMarkQueueSet.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -49,12 +49,9 @@
   return ShenandoahThreadLocalData::satb_mark_queue(t);
 }
 
-static inline bool discard_entry(const void* entry, ShenandoahHeap* heap) {
-  return !heap->requires_marking(entry);
-}
-
+template <bool RESOLVE>
 class ShenandoahSATBMarkQueueFilterFn {
-  ShenandoahHeap* _heap;
+  ShenandoahHeap* const _heap;
 
 public:
   ShenandoahSATBMarkQueueFilterFn(ShenandoahHeap* heap) : _heap(heap) {}
@@ -62,13 +59,17 @@
   // Return true if entry should be filtered out (removed), false if
   // it should be retained.
   bool operator()(const void* entry) const {
-    return discard_entry(entry, _heap);
+    return !_heap->requires_marking<RESOLVE>(entry);
   }
 };
 
 void ShenandoahSATBMarkQueueSet::filter(SATBMarkQueue* queue) {
   assert(_heap != NULL, "SATB queue set not initialized");
-  apply_filter(ShenandoahSATBMarkQueueFilterFn(_heap), queue);
+  if (_heap->has_forwarded_objects()) {
+    apply_filter(ShenandoahSATBMarkQueueFilterFn<true>(_heap), queue);
+  } else {
+    apply_filter(ShenandoahSATBMarkQueueFilterFn<false>(_heap), queue);
+  }
 }
 
 bool ShenandoahSATBMarkQueue::should_enqueue_buffer() {
--- a/src/hotspot/share/gc/z/vmStructs_z.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/vmStructs_z.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +26,7 @@
 
 ZGlobalsForVMStructs::ZGlobalsForVMStructs() :
     _ZGlobalPhase(&ZGlobalPhase),
+    _ZGlobalSeqNum(&ZGlobalSeqNum),
     _ZAddressGoodMask(&ZAddressGoodMask),
     _ZAddressBadMask(&ZAddressBadMask),
     _ZAddressWeakBadMask(&ZAddressWeakBadMask),
--- a/src/hotspot/share/gc/z/vmStructs_z.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/vmStructs_z.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -42,6 +42,8 @@
 
   uint32_t* _ZGlobalPhase;
 
+  uint32_t* _ZGlobalSeqNum;
+
   uintptr_t* _ZAddressGoodMask;
   uintptr_t* _ZAddressBadMask;
   uintptr_t* _ZAddressWeakBadMask;
@@ -55,6 +57,7 @@
 #define VM_STRUCTS_ZGC(nonstatic_field, volatile_nonstatic_field, static_field)                      \
   static_field(ZGlobalsForVMStructs,            _instance_p,          ZGlobalsForVMStructs*)         \
   nonstatic_field(ZGlobalsForVMStructs,         _ZGlobalPhase,        uint32_t*)                     \
+  nonstatic_field(ZGlobalsForVMStructs,         _ZGlobalSeqNum,       uint32_t*)                     \
   nonstatic_field(ZGlobalsForVMStructs,         _ZAddressGoodMask,    uintptr_t*)                    \
   nonstatic_field(ZGlobalsForVMStructs,         _ZAddressBadMask,     uintptr_t*)                    \
   nonstatic_field(ZGlobalsForVMStructs,         _ZAddressWeakBadMask, uintptr_t*)                    \
@@ -67,7 +70,10 @@
   nonstatic_field(ZHeap,                        _pagetable,           ZPageTable)                    \
                                                                                                      \
   nonstatic_field(ZPage,                        _type,                const uint8_t)                 \
+  nonstatic_field(ZPage,                        _seqnum,              uint32_t)                      \
   nonstatic_field(ZPage,                        _virtual,             const ZVirtualMemory)          \
+  volatile_nonstatic_field(ZPage,               _top,                 uintptr_t)                     \
+  volatile_nonstatic_field(ZPage,               _refcount,            uint32_t)                      \
   nonstatic_field(ZPage,                        _forwarding,          ZForwardingTable)              \
                                                                                                      \
   nonstatic_field(ZPageAllocator,               _physical,            ZPhysicalMemoryManager)        \
@@ -101,6 +107,7 @@
   declare_constant(ZAddressOffsetShift)                                                              \
   declare_constant(ZAddressOffsetBits)                                                               \
   declare_constant(ZAddressOffsetMask)                                                               \
+  declare_constant(ZAddressOffsetMax)                                                                \
   declare_constant(ZAddressSpaceStart)
 
 #define VM_TYPES_ZGC(declare_type, declare_toplevel_type, declare_integer_type)                      \
--- a/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zBarrierSetNMethod.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -27,12 +27,12 @@
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zLock.inline.hpp"
 #include "gc/z/zOopClosures.hpp"
-#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zNMethod.hpp"
 #include "gc/z/zThreadLocalData.hpp"
 #include "logging/log.hpp"
 
 bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
-  ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
+  ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
   log_trace(nmethod, barrier)("Entered critical zone for %p", nm);
 
   if (!is_armed(nm)) {
--- a/src/hotspot/share/gc/z/zCollectedHeap.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,7 +27,7 @@
 #include "gc/z/zCollectedHeap.hpp"
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zHeap.inline.hpp"
-#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zNMethod.hpp"
 #include "gc/z/zServiceability.hpp"
 #include "gc/z/zStat.hpp"
 #include "gc/z/zUtils.inline.hpp"
@@ -118,10 +118,6 @@
   return is_in(p);
 }
 
-void ZCollectedHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
-  // Does nothing, not a parsable heap
-}
-
 HeapWord* ZCollectedHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
   const size_t size_in_bytes = ZUtils::words_to_bytes(align_object_size(requested_size));
   const uintptr_t addr = _heap.alloc_tlab(size_in_bytes);
@@ -259,11 +255,11 @@
 }
 
 void ZCollectedHeap::register_nmethod(nmethod* nm) {
-  ZNMethodTable::register_nmethod(nm);
+  ZNMethod::register_nmethod(nm);
 }
 
 void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
-  ZNMethodTable::unregister_nmethod(nm);
+  ZNMethod::unregister_nmethod(nm);
 }
 
 void ZCollectedHeap::verify_nmethod(nmethod* nm) {
--- a/src/hotspot/share/gc/z/zCollectedHeap.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zCollectedHeap.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -75,8 +75,6 @@
   virtual bool is_in(const void* p) const;
   virtual bool is_in_closed_subset(const void* p) const;
 
-  virtual void fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap);
-
   virtual HeapWord* mem_allocate(size_t size, bool* gc_overhead_limit_was_exceeded);
   virtual MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
                                                        size_t size,
--- a/src/hotspot/share/gc/z/zInitialize.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zInitialize.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -30,7 +30,7 @@
 #include "gc/z/zLargePages.hpp"
 #include "gc/z/zNUMA.hpp"
 #include "gc/z/zStat.hpp"
-#include "gc/z/zStatTLAB.hpp"
+#include "gc/z/zThreadLocalAllocBuffer.hpp"
 #include "gc/z/zTracer.hpp"
 #include "logging/log.hpp"
 #include "runtime/vm_version.hpp"
@@ -46,7 +46,7 @@
   ZNUMA::initialize();
   ZCPU::initialize();
   ZStatValue::initialize();
-  ZStatTLAB::initialize();
+  ZThreadLocalAllocBuffer::initialize();
   ZTracer::initialize();
   ZLargePages::initialize();
   ZBarrierSet::set_barrier_set(barrier_set);
--- a/src/hotspot/share/gc/z/zMark.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zMark.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -32,9 +32,9 @@
 #include "gc/z/zPageTable.inline.hpp"
 #include "gc/z/zRootsIterator.hpp"
 #include "gc/z/zStat.hpp"
-#include "gc/z/zStatTLAB.hpp"
 #include "gc/z/zTask.hpp"
 #include "gc/z/zThread.hpp"
+#include "gc/z/zThreadLocalAllocBuffer.hpp"
 #include "gc/z/zUtils.inline.hpp"
 #include "gc/z/zWorkers.inline.hpp"
 #include "logging/log.hpp"
@@ -121,24 +121,19 @@
 class ZMarkRootsIteratorClosure : public ZRootsIteratorClosure {
 public:
   ZMarkRootsIteratorClosure() {
-    ZStatTLAB::reset();
+    ZThreadLocalAllocBuffer::reset_statistics();
   }
 
   ~ZMarkRootsIteratorClosure() {
-    ZStatTLAB::publish();
+    ZThreadLocalAllocBuffer::publish_statistics();
   }
 
   virtual void do_thread(Thread* thread) {
-    ZRootsIteratorClosure::do_thread(thread);
-
     // Update thread local address bad mask
     ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
 
     // Retire TLAB
-    if (UseTLAB && thread->is_Java_thread()) {
-      thread->tlab().retire(ZStatTLAB::get());
-      thread->tlab().resize();
-    }
+    ZThreadLocalAllocBuffer::retire(thread);
   }
 
   virtual void do_oop(oop* p) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethod.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -0,0 +1,394 @@
+/*
+ * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "code/relocInfo.hpp"
+#include "code/nmethod.hpp"
+#include "code/icBuffer.hpp"
+#include "gc/shared/barrierSet.hpp"
+#include "gc/shared/barrierSetNMethod.hpp"
+#include "gc/z/zGlobals.hpp"
+#include "gc/z/zLock.inline.hpp"
+#include "gc/z/zNMethod.hpp"
+#include "gc/z/zNMethodData.hpp"
+#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zOopClosures.inline.hpp"
+#include "gc/z/zTask.hpp"
+#include "gc/z/zWorkers.hpp"
+#include "logging/log.hpp"
+#include "memory/allocation.inline.hpp"
+#include "memory/iterator.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "utilities/debug.hpp"
+
+static ZNMethodData* gc_data(const nmethod* nm) {
+  return nm->gc_data<ZNMethodData>();
+}
+
+static void set_gc_data(nmethod* nm, ZNMethodData* data) {
+  return nm->set_gc_data<ZNMethodData>(data);
+}
+
+void ZNMethod::attach_gc_data(nmethod* nm) {
+  GrowableArray<oop*> immediate_oops;
+  bool non_immediate_oops = false;
+
+  // Find all oops relocations
+  RelocIterator iter(nm);
+  while (iter.next()) {
+    if (iter.type() != relocInfo::oop_type) {
+      // Not an oop
+      continue;
+    }
+
+    oop_Relocation* r = iter.oop_reloc();
+
+    if (!r->oop_is_immediate()) {
+      // Non-immediate oop found
+      non_immediate_oops = true;
+      continue;
+    }
+
+    if (r->oop_value() != NULL) {
+      // Non-NULL immediate oop found. NULL oops can safely be
+      // ignored since the method will be re-registered if they
+      // are later patched to be non-NULL.
+      immediate_oops.push(r->oop_addr());
+    }
+  }
+
+  // Attach GC data to nmethod
+  ZNMethodData* data = gc_data(nm);
+  if (data == NULL) {
+    data = ZNMethodData::create(nm);
+    set_gc_data(nm, data);
+  }
+
+  // Attach oops in GC data
+  ZNMethodDataOops* const new_oops = ZNMethodDataOops::create(immediate_oops, non_immediate_oops);
+  ZNMethodDataOops* const old_oops = data->swap_oops(new_oops);
+  ZNMethodDataOops::destroy(old_oops);
+}
+
+void ZNMethod::detach_gc_data(nmethod* nm) {
+  // Destroy GC data
+  ZNMethodData::destroy(gc_data(nm));
+  set_gc_data(nm, NULL);
+}
+
+ZReentrantLock* ZNMethod::lock_for_nmethod(nmethod* nm) {
+  ZNMethodData* const data = gc_data(nm);
+  if (data == NULL) {
+    return NULL;
+  }
+  return data->lock();
+}
+
+void ZNMethod::log_register(const nmethod* nm) {
+  LogTarget(Trace, gc, nmethod) log;
+  if (!log.is_enabled()) {
+    return;
+  }
+
+  const ZNMethodDataOops* const oops = gc_data(nm)->oops();
+
+  log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
+            "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
+            nm->method()->method_holder()->external_name(),
+            nm->method()->name()->as_C_string(),
+            p2i(nm),
+            nm->compiler_name(),
+            nm->oops_count() - 1,
+            oops->immediates_count(),
+            oops->has_non_immediates() ? "Yes" : "No");
+
+  LogTarget(Trace, gc, nmethod, oops) log_oops;
+  if (!log_oops.is_enabled()) {
+    return;
+  }
+
+  // Print nmethod oops table
+  {
+    oop* const begin = nm->oops_begin();
+    oop* const end = nm->oops_end();
+    for (oop* p = begin; p < end; p++) {
+      log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
+                     (p - begin), p2i(*p), (*p)->klass()->external_name());
+    }
+  }
+
+  // Print nmethod immediate oops
+  {
+    oop** const begin = oops->immediates_begin();
+    oop** const end = oops->immediates_end();
+    for (oop** p = begin; p < end; p++) {
+      log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
+                     (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
+    }
+  }
+}
+
+void ZNMethod::log_unregister(const nmethod* nm) {
+  LogTarget(Debug, gc, nmethod) log;
+  if (!log.is_enabled()) {
+    return;
+  }
+
+  log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
+            nm->method()->method_holder()->external_name(),
+            nm->method()->name()->as_C_string(),
+            p2i(nm));
+}
+
+void ZNMethod::register_nmethod(nmethod* nm) {
+  ResourceMark rm;
+
+  // Create and attach gc data
+  attach_gc_data(nm);
+
+  log_register(nm);
+
+  ZNMethodTable::register_nmethod(nm);
+
+  // Disarm nmethod entry barrier
+  disarm_nmethod(nm);
+}
+
+void ZNMethod::unregister_nmethod(nmethod* nm) {
+  assert(CodeCache_lock->owned_by_self(), "Lock must be held");
+
+  if (Thread::current()->is_Code_cache_sweeper_thread()) {
+    // The sweeper must wait for any ongoing iteration to complete
+    // before it can unregister an nmethod.
+    ZNMethodTable::wait_until_iteration_done();
+  }
+
+  ResourceMark rm;
+
+  log_unregister(nm);
+
+  ZNMethodTable::unregister_nmethod(nm);
+
+  // Destroy and detach gc data
+  detach_gc_data(nm);
+}
+
+void ZNMethod::disarm_nmethod(nmethod* nm) {
+  BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
+  if (bs != NULL) {
+    bs->disarm(nm);
+  }
+}
+
+void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) {
+  // Process oops table
+  {
+    oop* const begin = nm->oops_begin();
+    oop* const end = nm->oops_end();
+    for (oop* p = begin; p < end; p++) {
+      if (*p != Universe::non_oop_word()) {
+        cl->do_oop(p);
+      }
+    }
+  }
+
+  ZNMethodDataOops* const oops = gc_data(nm)->oops();
+
+  // Process immediate oops
+  {
+    oop** const begin = oops->immediates_begin();
+    oop** const end = oops->immediates_end();
+    for (oop** p = begin; p < end; p++) {
+      if (**p != Universe::non_oop_word()) {
+        cl->do_oop(*p);
+      }
+    }
+  }
+
+  // Process non-immediate oops
+  if (oops->has_non_immediates()) {
+    nm->fix_oop_relocations();
+  }
+}
+
+class ZNMethodToOopsDoClosure : public NMethodClosure {
+private:
+  OopClosure* _cl;
+
+public:
+  ZNMethodToOopsDoClosure(OopClosure* cl) :
+      _cl(cl) {}
+
+  virtual void do_nmethod(nmethod* nm) {
+    ZNMethod::nmethod_oops_do(nm, _cl);
+  }
+};
+
+void ZNMethod::oops_do_begin() {
+  ZNMethodTable::nmethods_do_begin();
+}
+
+void ZNMethod::oops_do_end() {
+  ZNMethodTable::nmethods_do_end();
+}
+
+void ZNMethod::oops_do(OopClosure* cl) {
+  ZNMethodToOopsDoClosure nmethod_cl(cl);
+  ZNMethodTable::nmethods_do(&nmethod_cl);
+}
+
+class ZNMethodUnlinkClosure : public NMethodClosure {
+private:
+  bool          _unloading_occurred;
+  volatile bool _failed;
+
+  void set_failed() {
+    Atomic::store(true, &_failed);
+  }
+
+public:
+  ZNMethodUnlinkClosure(bool unloading_occurred) :
+      _unloading_occurred(unloading_occurred),
+      _failed(false) {}
+
+  virtual void do_nmethod(nmethod* nm) {
+    if (failed()) {
+      return;
+    }
+
+    if (!nm->is_alive()) {
+      return;
+    }
+
+    ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
+
+    if (nm->is_unloading()) {
+      // Unlinking of the dependencies must happen before the
+      // handshake separating unlink and purge.
+      nm->flush_dependencies(false /* delete_immediately */);
+
+      // We don't need to take the lock when unlinking nmethods from
+      // the Method, because it is only concurrently unlinked by
+      // the entry barrier, which acquires the per nmethod lock.
+      nm->unlink_from_method(false /* acquire_lock */);
+      return;
+    }
+
+    // Heal oops and disarm
+    ZNMethodOopClosure cl;
+    ZNMethod::nmethod_oops_do(nm, &cl);
+    ZNMethod::disarm_nmethod(nm);
+
+    // Clear compiled ICs and exception caches
+    if (!nm->unload_nmethod_caches(_unloading_occurred)) {
+      set_failed();
+    }
+  }
+
+  bool failed() const {
+    return Atomic::load(&_failed);
+  }
+};
+
+class ZNMethodUnlinkTask : public ZTask {
+private:
+  ZNMethodUnlinkClosure _cl;
+  ICRefillVerifier*     _verifier;
+
+public:
+  ZNMethodUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
+      ZTask("ZNMethodUnlinkTask"),
+      _cl(unloading_occurred),
+      _verifier(verifier) {
+    ZNMethodTable::nmethods_do_begin();
+  }
+
+  ~ZNMethodUnlinkTask() {
+    ZNMethodTable::nmethods_do_end();
+  }
+
+  virtual void work() {
+    ICRefillVerifierMark mark(_verifier);
+    ZNMethodTable::nmethods_do(&_cl);
+  }
+
+  bool success() const {
+    return !_cl.failed();
+  }
+};
+
+void ZNMethod::unlink(ZWorkers* workers, bool unloading_occurred) {
+  for (;;) {
+    ICRefillVerifier verifier;
+
+    {
+      ZNMethodUnlinkTask task(unloading_occurred, &verifier);
+      workers->run_concurrent(&task);
+      if (task.success()) {
+        return;
+      }
+    }
+
+    // Cleaning failed because we ran out of transitional IC stubs,
+    // so we have to refill and try again. Refilling requires taking
+    // a safepoint, so we temporarily leave the suspendible thread set.
+    SuspendibleThreadSetLeaver sts;
+    InlineCacheBuffer::refill_ic_stubs();
+  }
+}
+
+class ZNMethodPurgeClosure : public NMethodClosure {
+public:
+  virtual void do_nmethod(nmethod* nm) {
+    if (nm->is_alive() && nm->is_unloading()) {
+      nm->make_unloaded();
+    }
+  }
+};
+
+class ZNMethodPurgeTask : public ZTask {
+private:
+  ZNMethodPurgeClosure _cl;
+
+public:
+  ZNMethodPurgeTask() :
+      ZTask("ZNMethodPurgeTask"),
+      _cl() {
+    ZNMethodTable::nmethods_do_begin();
+  }
+
+  ~ZNMethodPurgeTask() {
+    ZNMethodTable::nmethods_do_end();
+  }
+
+  virtual void work() {
+    ZNMethodTable::nmethods_do(&_cl);
+  }
+};
+
+void ZNMethod::purge(ZWorkers* workers) {
+  ZNMethodPurgeTask task;
+  workers->run_concurrent(&task);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethod.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZNMETHOD_HPP
+#define SHARE_GC_Z_ZNMETHOD_HPP
+
+#include "memory/allocation.hpp"
+
+class nmethod;
+class OopClosure;
+class ZReentrantLock;
+class ZWorkers;
+
+class ZNMethod : public AllStatic {
+private:
+  static void attach_gc_data(nmethod* nm);
+  static void detach_gc_data(nmethod* nm);
+
+  static void log_register(const nmethod* nm);
+  static void log_unregister(const nmethod* nm);
+
+public:
+  static void register_nmethod(nmethod* nm);
+  static void unregister_nmethod(nmethod* nm);
+
+  static void disarm_nmethod(nmethod* nm);
+
+  static void nmethod_oops_do(nmethod* nm, OopClosure* cl);
+
+  static void oops_do_begin();
+  static void oops_do_end();
+  static void oops_do(OopClosure* cl);
+
+  static ZReentrantLock* lock_for_nmethod(nmethod* nm);
+
+  static void unlink(ZWorkers* workers, bool unloading_occurred);
+  static void purge(ZWorkers* workers);
+};
+
+#endif // SHARE_GC_Z_ZNMETHOD_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethodAllocator.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -0,0 +1,69 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zArray.inline.hpp"
+#include "gc/z/zNMethodAllocator.hpp"
+#include "memory/allocation.hpp"
+
+ZArray<void*> ZNMethodAllocator::_deferred_frees;
+bool          ZNMethodAllocator::_defer_frees(false);
+
+void ZNMethodAllocator::immediate_free(void* data) {
+  FREE_C_HEAP_ARRAY(uint8_t, data);
+}
+
+void ZNMethodAllocator::deferred_free(void* data) {
+  _deferred_frees.add(data);
+}
+
+void* ZNMethodAllocator::allocate(size_t size) {
+  return NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
+}
+
+void ZNMethodAllocator::free(void* data) {
+  if (data == NULL) {
+    return;
+  }
+
+  if (_defer_frees) {
+    deferred_free(data);
+  } else {
+    immediate_free(data);
+  }
+}
+
+void ZNMethodAllocator::activate_deferred_frees() {
+  assert(_deferred_frees.is_empty(), "precondition");
+  _defer_frees = true;
+}
+
+void ZNMethodAllocator::deactivate_and_process_deferred_frees() {
+  _defer_frees = false;
+
+  ZArrayIterator<void*> iter(&_deferred_frees);
+  for (void* data; iter.next(&data);) {
+    immediate_free(data);
+  }
+  _deferred_frees.clear();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethodAllocator.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZNMETHODALLOCATOR_HPP
+#define SHARE_GC_Z_ZNMETHODALLOCATOR_HPP
+
+#include "memory/allocation.hpp"
+#include "gc/z/zArray.hpp"
+
+class ZNMethodAllocator : public AllStatic {
+private:
+  static ZArray<void*> _deferred_frees;
+  static bool          _defer_frees;
+
+  static void immediate_free(void* data);
+  static void deferred_free(void* data);
+
+public:
+  static void* allocate(size_t size);
+  static void free(void* data);
+
+  static void activate_deferred_frees();
+  static void deactivate_and_process_deferred_frees();
+};
+
+#endif // SHARE_GC_Z_ZNMETHODALLOCATOR_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethodData.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -0,0 +1,103 @@
+/*
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zLock.inline.hpp"
+#include "gc/z/zNMethodAllocator.hpp"
+#include "gc/z/zNMethodData.hpp"
+#include "memory/allocation.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/orderAccess.hpp"
+#include "utilities/align.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/growableArray.hpp"
+
+size_t ZNMethodDataOops::header_size() {
+  const size_t size = sizeof(ZNMethodDataOops);
+  assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
+  return size;
+}
+
+ZNMethodDataOops* ZNMethodDataOops::create(const GrowableArray<oop*>& immediates, bool has_non_immediates) {
+  // Allocate memory for the ZNMethodDataOops object
+  // plus the immediate oop* array that follows right after.
+  const size_t size = ZNMethodDataOops::header_size() + (sizeof(oop*) * immediates.length());
+  void* const mem = ZNMethodAllocator::allocate(size);
+  return ::new (mem) ZNMethodDataOops(immediates, has_non_immediates);
+}
+
+void ZNMethodDataOops::destroy(ZNMethodDataOops* oops) {
+  ZNMethodAllocator::free(oops);
+}
+
+ZNMethodDataOops::ZNMethodDataOops(const GrowableArray<oop*>& immediates, bool has_non_immediates) :
+    _nimmediates(immediates.length()),
+    _has_non_immediates(has_non_immediates) {
+  // Save all immediate oops
+  for (size_t i = 0; i < _nimmediates; i++) {
+    immediates_begin()[i] = immediates.at(i);
+  }
+}
+
+size_t ZNMethodDataOops::immediates_count() const {
+  return _nimmediates;
+}
+
+oop** ZNMethodDataOops::immediates_begin() const {
+  // The immediate oop* array starts immediately after this object
+  return (oop**)((uintptr_t)this + header_size());
+}
+
+oop** ZNMethodDataOops::immediates_end() const {
+  return immediates_begin() + immediates_count();
+}
+
+bool ZNMethodDataOops::has_non_immediates() const {
+  return _has_non_immediates;
+}
+
+ZNMethodData* ZNMethodData::create(nmethod* nm) {
+  void* const mem = ZNMethodAllocator::allocate(sizeof(ZNMethodData));
+  return ::new (mem) ZNMethodData(nm);
+}
+
+void ZNMethodData::destroy(ZNMethodData* data) {
+  ZNMethodAllocator::free(data->oops());
+  ZNMethodAllocator::free(data);
+}
+
+ZNMethodData::ZNMethodData(nmethod* nm) :
+    _lock(),
+    _oops(NULL) {}
+
+ZReentrantLock* ZNMethodData::lock() {
+  return &_lock;
+}
+
+ZNMethodDataOops* ZNMethodData::oops() const {
+  return OrderAccess::load_acquire(&_oops);
+}
+
+ZNMethodDataOops* ZNMethodData::swap_oops(ZNMethodDataOops* new_oops) {
+  return Atomic::xchg(new_oops, &_oops);
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethodData.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "gc/z/zLock.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+#ifndef SHARE_GC_Z_ZNMETHODDATA_HPP
+#define SHARE_GC_Z_ZNMETHODDATA_HPP
+
+class nmethod;
+template <typename T> class GrowableArray;
+
+class ZNMethodDataOops {
+private:
+  const size_t _nimmediates;
+  bool         _has_non_immediates;
+
+  static size_t header_size();
+
+  ZNMethodDataOops(const GrowableArray<oop*>& immediates, bool has_non_immediates);
+
+public:
+  static ZNMethodDataOops* create(const GrowableArray<oop*>& immediates, bool has_non_immediates);
+  static void destroy(ZNMethodDataOops* oops);
+
+  size_t immediates_count() const;
+  oop** immediates_begin() const;
+  oop** immediates_end() const;
+
+  bool has_non_immediates() const;
+};
+
+class ZNMethodData {
+private:
+  ZReentrantLock             _lock;
+  ZNMethodDataOops* volatile _oops;
+
+  ZNMethodData(nmethod* nm);
+
+public:
+  static ZNMethodData* create(nmethod* nm);
+  static void destroy(ZNMethodData* data);
+
+  ZReentrantLock* lock();
+
+  ZNMethodDataOops* oops() const;
+  ZNMethodDataOops* swap_oops(ZNMethodDataOops* oops);
+};
+
+#endif // SHARE_GC_Z_ZNMETHODDATA_HPP
--- a/src/hotspot/share/gc/z/zNMethodTable.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zNMethodTable.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,205 +27,38 @@
 #include "code/icBuffer.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/barrierSetNMethod.hpp"
-#include "gc/z/zArray.inline.hpp"
 #include "gc/z/zGlobals.hpp"
 #include "gc/z/zHash.inline.hpp"
 #include "gc/z/zLock.inline.hpp"
+#include "gc/z/zNMethodAllocator.hpp"
+#include "gc/z/zNMethodData.hpp"
 #include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zNMethodTableEntry.hpp"
+#include "gc/z/zNMethodTableIteration.hpp"
 #include "gc/z/zOopClosures.inline.hpp"
 #include "gc/z/zTask.hpp"
 #include "gc/z/zWorkers.hpp"
 #include "logging/log.hpp"
-#include "memory/allocation.inline.hpp"
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
 #include "memory/resourceArea.hpp"
 #include "runtime/atomic.hpp"
 #include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
 #include "utilities/debug.hpp"
 
-class ZNMethodDataImmediateOops {
-private:
-  const size_t _nimmediate_oops;
-
-  static size_t header_size();
-
-  ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops);
-
-public:
-  static ZNMethodDataImmediateOops* create(const GrowableArray<oop*>& immediate_oops);
-  static void destroy(ZNMethodDataImmediateOops* data_immediate_oops);
-
-  size_t immediate_oops_count() const;
-  oop** immediate_oops_begin() const;
-  oop** immediate_oops_end() const;
-};
-
-size_t ZNMethodDataImmediateOops::header_size() {
-  const size_t size = sizeof(ZNMethodDataImmediateOops);
-  assert(is_aligned(size, sizeof(oop*)), "Header misaligned");
-  return size;
-}
-
-ZNMethodDataImmediateOops* ZNMethodDataImmediateOops::create(const GrowableArray<oop*>& immediate_oops) {
-  // Allocate memory for the ZNMethodDataImmediateOops object
-  // plus the immediate oop* array that follows right after.
-  const size_t size = ZNMethodDataImmediateOops::header_size() + (sizeof(oop*) * immediate_oops.length());
-  void* const data_immediate_oops = NEW_C_HEAP_ARRAY(uint8_t, size, mtGC);
-  return ::new (data_immediate_oops) ZNMethodDataImmediateOops(immediate_oops);
-}
-
-void ZNMethodDataImmediateOops::destroy(ZNMethodDataImmediateOops* data_immediate_oops) {
-  ZNMethodTable::safe_delete(data_immediate_oops);
-}
-
-ZNMethodDataImmediateOops::ZNMethodDataImmediateOops(const GrowableArray<oop*>& immediate_oops) :
-    _nimmediate_oops(immediate_oops.length()) {
-  // Save all immediate oops
-  for (size_t i = 0; i < _nimmediate_oops; i++) {
-    immediate_oops_begin()[i] = immediate_oops.at(i);
-  }
-}
-
-size_t ZNMethodDataImmediateOops::immediate_oops_count() const {
-  return _nimmediate_oops;
-}
-
-oop** ZNMethodDataImmediateOops::immediate_oops_begin() const {
-  // The immediate oop* array starts immediately after this object
-  return (oop**)((uintptr_t)this + header_size());
-}
-
-oop** ZNMethodDataImmediateOops::immediate_oops_end() const {
-  return immediate_oops_begin() + immediate_oops_count();
-}
-
-class ZNMethodData {
-private:
-  ZReentrantLock                      _lock;
-  ZNMethodDataImmediateOops* volatile _immediate_oops;
-
-  ZNMethodData(nmethod* nm);
-
-public:
-  static ZNMethodData* create(nmethod* nm);
-  static void destroy(ZNMethodData* data);
-
-  ZReentrantLock* lock();
-
-  ZNMethodDataImmediateOops* immediate_oops() const;
-  ZNMethodDataImmediateOops* swap_immediate_oops(const GrowableArray<oop*>& immediate_oops);
-};
-
-ZNMethodData* ZNMethodData::create(nmethod* nm) {
-  void* const method = NEW_C_HEAP_ARRAY(uint8_t, sizeof(ZNMethodData), mtGC);
-  return ::new (method) ZNMethodData(nm);
-}
-
-void ZNMethodData::destroy(ZNMethodData* data) {
-  ZNMethodDataImmediateOops::destroy(data->immediate_oops());
-  ZNMethodTable::safe_delete(data);
-}
-
-ZNMethodData::ZNMethodData(nmethod* nm) :
-    _lock(),
-    _immediate_oops(NULL) {}
-
-ZReentrantLock* ZNMethodData::lock() {
-  return &_lock;
-}
-
-ZNMethodDataImmediateOops* ZNMethodData::immediate_oops() const {
-  return OrderAccess::load_acquire(&_immediate_oops);
-}
-
-ZNMethodDataImmediateOops* ZNMethodData::swap_immediate_oops(const GrowableArray<oop*>& immediate_oops) {
-  ZNMethodDataImmediateOops* const data_immediate_oops =
-    immediate_oops.is_empty() ? NULL : ZNMethodDataImmediateOops::create(immediate_oops);
-  return Atomic::xchg(data_immediate_oops, &_immediate_oops);
-}
-
-static ZNMethodData* gc_data(const nmethod* nm) {
-  return nm->gc_data<ZNMethodData>();
-}
-
-static void set_gc_data(nmethod* nm, ZNMethodData* data) {
-  return nm->set_gc_data<ZNMethodData>(data);
-}
-
 ZNMethodTableEntry* ZNMethodTable::_table = NULL;
 size_t ZNMethodTable::_size = 0;
-ZLock ZNMethodTable::_iter_lock;
-ZNMethodTableEntry* ZNMethodTable::_iter_table = NULL;
-size_t ZNMethodTable::_iter_table_size = 0;
-ZArray<void*> ZNMethodTable::_iter_deferred_deletes;
 size_t ZNMethodTable::_nregistered = 0;
 size_t ZNMethodTable::_nunregistered = 0;
-volatile size_t ZNMethodTable::_claimed = 0;
+ZNMethodTableIteration ZNMethodTable::_iteration;
 
-void ZNMethodTable::safe_delete(void* data) {
-  if (data == NULL) {
-    return;
-  }
-
-  ZLocker<ZLock> locker(&_iter_lock);
-  if (_iter_table != NULL) {
-    // Iteration in progress, defer delete
-    _iter_deferred_deletes.add(data);
-  } else {
-    // Iteration not in progress, delete now
-    FREE_C_HEAP_ARRAY(uint8_t, data);
-  }
+ZNMethodTableEntry* ZNMethodTable::create(size_t size) {
+  void* const mem = ZNMethodAllocator::allocate(size * sizeof(ZNMethodTableEntry));
+  return ::new (mem) ZNMethodTableEntry[size];
 }
 
-ZNMethodTableEntry ZNMethodTable::create_entry(nmethod* nm) {
-  GrowableArray<oop*> immediate_oops;
-  bool non_immediate_oops = false;
-
-  // Find all oops relocations
-  RelocIterator iter(nm);
-  while (iter.next()) {
-    if (iter.type() != relocInfo::oop_type) {
-      // Not an oop
-      continue;
-    }
-
-    oop_Relocation* r = iter.oop_reloc();
-
-    if (!r->oop_is_immediate()) {
-      // Non-immediate oop found
-      non_immediate_oops = true;
-      continue;
-    }
-
-    if (r->oop_value() != NULL) {
-      // Non-NULL immediate oop found. NULL oops can safely be
-      // ignored since the method will be re-registered if they
-      // are later patched to be non-NULL.
-      immediate_oops.push(r->oop_addr());
-    }
-  }
-
-  // Attach GC data to nmethod
-  ZNMethodData* data = gc_data(nm);
-  if (data == NULL) {
-    data = ZNMethodData::create(nm);
-    set_gc_data(nm, data);
-  }
-
-  // Attach immediate oops in GC data
-  ZNMethodDataImmediateOops* const old_data_immediate_oops = data->swap_immediate_oops(immediate_oops);
-  ZNMethodDataImmediateOops::destroy(old_data_immediate_oops);
-
-  // Create entry
-  return ZNMethodTableEntry(nm, non_immediate_oops, !immediate_oops.is_empty());
-}
-
-ZReentrantLock* ZNMethodTable::lock_for_nmethod(nmethod* nm) {
-  ZNMethodData* const data = gc_data(nm);
-  if (data == NULL) {
-    return NULL;
-  }
-  return data->lock();
+void ZNMethodTable::destroy(ZNMethodTableEntry* table) {
+  ZNMethodAllocator::free(table);
 }
 
 size_t ZNMethodTable::first_index(const nmethod* nm, size_t size) {
@@ -241,8 +74,8 @@
   return (prev_index + 1) & mask;
 }
 
-bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry) {
-  const nmethod* const nm = entry.method();
+bool ZNMethodTable::register_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
+  const ZNMethodTableEntry entry(nm);
   size_t index = first_index(nm, size);
 
   for (;;) {
@@ -265,11 +98,6 @@
 }
 
 void ZNMethodTable::unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm) {
-  if (size == 0) {
-    // Table is empty
-    return;
-  }
-
   size_t index = first_index(nm, size);
 
   for (;;) {
@@ -279,10 +107,6 @@
     if (table_entry.registered() && table_entry.method() == nm) {
       // Remove entry
       table[index] = ZNMethodTableEntry(true /* unregistered */);
-
-      // Destroy GC data
-      ZNMethodData::destroy(gc_data(nm));
-      set_gc_data(nm, NULL);
       return;
     }
 
@@ -291,7 +115,8 @@
 }
 
 void ZNMethodTable::rebuild(size_t new_size) {
-  ZLocker<ZLock> locker(&_iter_lock);
+  assert(CodeCache_lock->owned_by_self(), "Lock must be held");
+
   assert(is_power_of_2(new_size), "Invalid size");
 
   log_debug(gc, nmethod)("Rebuilding NMethod Table: "
@@ -303,20 +128,18 @@
                          _nunregistered, percent_of(_nunregistered, _size), 0.0);
 
   // Allocate new table
-  ZNMethodTableEntry* const new_table = new ZNMethodTableEntry[new_size];
+  ZNMethodTableEntry* const new_table = ZNMethodTable::create(new_size);
 
   // Transfer all registered entries
   for (size_t i = 0; i < _size; i++) {
     const ZNMethodTableEntry entry = _table[i];
     if (entry.registered()) {
-      register_entry(new_table, new_size, entry);
+      register_entry(new_table, new_size, entry.method());
     }
   }
 
-  if (_iter_table != _table) {
-    // Delete old table
-    delete [] _table;
-  }
+  // Free old table
+  ZNMethodTable::destroy(_table);
 
   // Install new table
   _table = new_table;
@@ -353,61 +176,6 @@
   }
 }
 
-void ZNMethodTable::log_register(const nmethod* nm, ZNMethodTableEntry entry) {
-  LogTarget(Trace, gc, nmethod) log;
-  if (!log.is_enabled()) {
-    return;
-  }
-
-  log.print("Register NMethod: %s.%s (" PTR_FORMAT "), "
-            "Compiler: %s, Oops: %d, ImmediateOops: " SIZE_FORMAT ", NonImmediateOops: %s",
-            nm->method()->method_holder()->external_name(),
-            nm->method()->name()->as_C_string(),
-            p2i(nm),
-            nm->compiler_name(),
-            nm->oops_count() - 1,
-            entry.immediate_oops() ? gc_data(nm)->immediate_oops()->immediate_oops_count() : 0,
-            entry.non_immediate_oops() ? "Yes" : "No");
-
-  LogTarget(Trace, gc, nmethod, oops) log_oops;
-  if (!log_oops.is_enabled()) {
-    return;
-  }
-
-  // Print nmethod oops table
-  oop* const begin = nm->oops_begin();
-  oop* const end = nm->oops_end();
-  for (oop* p = begin; p < end; p++) {
-    log_oops.print("           Oop[" SIZE_FORMAT "] " PTR_FORMAT " (%s)",
-                   (p - begin), p2i(*p), (*p)->klass()->external_name());
-  }
-
-  if (entry.immediate_oops()) {
-    // Print nmethod immediate oops
-    const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
-    if (nmi != NULL) {
-      oop** const begin = nmi->immediate_oops_begin();
-      oop** const end = nmi->immediate_oops_end();
-      for (oop** p = begin; p < end; p++) {
-        log_oops.print("  ImmediateOop[" SIZE_FORMAT "] " PTR_FORMAT " @ " PTR_FORMAT " (%s)",
-                       (p - begin), p2i(**p), p2i(*p), (**p)->klass()->external_name());
-      }
-    }
-  }
-}
-
-void ZNMethodTable::log_unregister(const nmethod* nm) {
-  LogTarget(Debug, gc, nmethod) log;
-  if (!log.is_enabled()) {
-    return;
-  }
-
-  log.print("Unregister NMethod: %s.%s (" PTR_FORMAT ")",
-            nm->method()->method_holder()->external_name(),
-            nm->method()->name()->as_C_string(),
-            p2i(nm));
-}
-
 size_t ZNMethodTable::registered_nmethods() {
   return _nregistered;
 }
@@ -418,48 +186,29 @@
 
 void ZNMethodTable::register_nmethod(nmethod* nm) {
   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
-  ResourceMark rm;
 
   // Grow/Shrink/Prune table if needed
   rebuild_if_needed();
 
-  // Create entry
-  const ZNMethodTableEntry entry = create_entry(nm);
-
-  log_register(nm, entry);
-
   // Insert new entry
-  if (register_entry(_table, _size, entry)) {
+  if (register_entry(_table, _size, nm)) {
     // New entry registered. When register_entry() instead returns
     // false the nmethod was already in the table so we do not want
     // to increase number of registered entries in that case.
     _nregistered++;
   }
-
-  // Disarm nmethod entry barrier
-  disarm_nmethod(nm);
 }
 
-void ZNMethodTable::sweeper_wait_for_iteration() {
-  // The sweeper must wait for any ongoing iteration to complete
-  // before it can unregister an nmethod.
-  if (!Thread::current()->is_Code_cache_sweeper_thread()) {
-    return;
-  }
+void ZNMethodTable::wait_until_iteration_done() {
+  assert(CodeCache_lock->owned_by_self(), "Lock must be held");
 
-  while (_iter_table != NULL) {
-    MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-    os::naked_short_sleep(1);
+  while (_iteration.in_progress()) {
+    CodeCache_lock->wait(Monitor::_no_safepoint_check_flag);
   }
 }
 
 void ZNMethodTable::unregister_nmethod(nmethod* nm) {
   assert(CodeCache_lock->owned_by_self(), "Lock must be held");
-  ResourceMark rm;
-
-  sweeper_wait_for_iteration();
-
-  log_unregister(nm);
 
   // Remove entry
   unregister_entry(_table, _size, nm);
@@ -467,248 +216,29 @@
   _nregistered--;
 }
 
-void ZNMethodTable::disarm_nmethod(nmethod* nm) {
-  BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
-  if (bs != NULL) {
-    bs->disarm(nm);
-  }
+void ZNMethodTable::nmethods_do_begin() {
+  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
+
+  // Make sure we don't free data while iterating
+  ZNMethodAllocator::activate_deferred_frees();
+
+  // Prepare iteration
+  _iteration.nmethods_do_begin(_table, _size);
 }
 
-void ZNMethodTable::nmethod_entries_do_begin() {
+void ZNMethodTable::nmethods_do_end() {
   MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-  ZLocker<ZLock> locker(&_iter_lock);
 
-  // Prepare iteration
-  _iter_table = _table;
-  _iter_table_size = _size;
-  _claimed = 0;
-  assert(_iter_deferred_deletes.is_empty(), "Should be emtpy");
+  // Finish iteration
+  _iteration.nmethods_do_end();
+
+  // Process deferred frees
+  ZNMethodAllocator::deactivate_and_process_deferred_frees();
+
+  // Notify iteration done
+  CodeCache_lock->notify_all();
 }
 
-void ZNMethodTable::nmethod_entries_do_end() {
-  MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
-  ZLocker<ZLock> locker(&_iter_lock);
-
-  // Finish iteration
-  if (_iter_table != _table) {
-    delete [] _iter_table;
-  }
-  _iter_table = NULL;
-  assert(_claimed >= _iter_table_size, "Failed to claim all table entries");
-
-  // Process deferred deletes
-  ZArrayIterator<void*> iter(&_iter_deferred_deletes);
-  for (void* data; iter.next(&data);) {
-    FREE_C_HEAP_ARRAY(uint8_t, data);
-  }
-  _iter_deferred_deletes.clear();
+void ZNMethodTable::nmethods_do(NMethodClosure* cl) {
+  _iteration.nmethods_do(cl);
 }
-
-void ZNMethodTable::entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl) {
-  nmethod* const nm = entry.method();
-
-  // Process oops table
-  oop* const begin = nm->oops_begin();
-  oop* const end = nm->oops_end();
-  for (oop* p = begin; p < end; p++) {
-    if (*p != Universe::non_oop_word()) {
-      cl->do_oop(p);
-    }
-  }
-
-  // Process immediate oops
-  if (entry.immediate_oops()) {
-    const ZNMethodDataImmediateOops* const nmi = gc_data(nm)->immediate_oops();
-    if (nmi != NULL) {
-      oop** const begin = nmi->immediate_oops_begin();
-      oop** const end = nmi->immediate_oops_end();
-      for (oop** p = begin; p < end; p++) {
-        if (**p != Universe::non_oop_word()) {
-          cl->do_oop(*p);
-        }
-      }
-    }
-  }
-
-  // Process non-immediate oops
-  if (entry.non_immediate_oops()) {
-    nmethod* const nm = entry.method();
-    nm->fix_oop_relocations();
-  }
-}
-
-class ZNMethodTableEntryToOopsDo : public ZNMethodTableEntryClosure {
-private:
-  OopClosure* _cl;
-
-public:
-  ZNMethodTableEntryToOopsDo(OopClosure* cl) :
-      _cl(cl) {}
-
-  void do_nmethod_entry(ZNMethodTableEntry entry) {
-    ZNMethodTable::entry_oops_do(entry, _cl);
-  }
-};
-
-void ZNMethodTable::oops_do(OopClosure* cl) {
-  ZNMethodTableEntryToOopsDo entry_cl(cl);
-  nmethod_entries_do(&entry_cl);
-}
-
-void ZNMethodTable::nmethod_entries_do(ZNMethodTableEntryClosure* cl) {
-  for (;;) {
-    // Claim table partition. Each partition is currently sized to span
-    // two cache lines. This number is just a guess, but seems to work well.
-    const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
-    const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _iter_table_size);
-    const size_t partition_end = MIN2(partition_start + partition_size, _iter_table_size);
-    if (partition_start == partition_end) {
-      // End of table
-      break;
-    }
-
-    // Process table partition
-    for (size_t i = partition_start; i < partition_end; i++) {
-      const ZNMethodTableEntry entry = _iter_table[i];
-      if (entry.registered()) {
-        cl->do_nmethod_entry(entry);
-      }
-    }
-  }
-}
-
-class ZNMethodTableUnlinkClosure : public ZNMethodTableEntryClosure {
-private:
-  bool          _unloading_occurred;
-  volatile bool _failed;
-
-  void set_failed() {
-    Atomic::store(true, &_failed);
-  }
-
-public:
-  ZNMethodTableUnlinkClosure(bool unloading_occurred) :
-      _unloading_occurred(unloading_occurred),
-      _failed(false) {}
-
-  virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
-    if (failed()) {
-      return;
-    }
-
-    nmethod* const nm = entry.method();
-    if (!nm->is_alive()) {
-      return;
-    }
-
-    ZLocker<ZReentrantLock> locker(ZNMethodTable::lock_for_nmethod(nm));
-
-    if (nm->is_unloading()) {
-      // Unlinking of the dependencies must happen before the
-      // handshake separating unlink and purge.
-      nm->flush_dependencies(false /* delete_immediately */);
-
-      // We don't need to take the lock when unlinking nmethods from
-      // the Method, because it is only concurrently unlinked by
-      // the entry barrier, which acquires the per nmethod lock.
-      nm->unlink_from_method(false /* acquire_lock */);
-      return;
-    }
-
-    // Heal oops and disarm
-    ZNMethodOopClosure cl;
-    ZNMethodTable::entry_oops_do(entry, &cl);
-    ZNMethodTable::disarm_nmethod(nm);
-
-    // Clear compiled ICs and exception caches
-    if (!nm->unload_nmethod_caches(_unloading_occurred)) {
-      set_failed();
-    }
-  }
-
-  bool failed() const {
-    return Atomic::load(&_failed);
-  }
-};
-
-class ZNMethodTableUnlinkTask : public ZTask {
-private:
-  ZNMethodTableUnlinkClosure _cl;
-  ICRefillVerifier*          _verifier;
-
-public:
-  ZNMethodTableUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
-      ZTask("ZNMethodTableUnlinkTask"),
-      _cl(unloading_occurred),
-      _verifier(verifier) {
-    ZNMethodTable::nmethod_entries_do_begin();
-  }
-
-  ~ZNMethodTableUnlinkTask() {
-    ZNMethodTable::nmethod_entries_do_end();
-  }
-
-  virtual void work() {
-    ICRefillVerifierMark mark(_verifier);
-    ZNMethodTable::nmethod_entries_do(&_cl);
-  }
-
-  bool success() const {
-    return !_cl.failed();
-  }
-};
-
-void ZNMethodTable::unlink(ZWorkers* workers, bool unloading_occurred) {
-  for (;;) {
-    ICRefillVerifier verifier;
-
-    {
-      ZNMethodTableUnlinkTask task(unloading_occurred, &verifier);
-      workers->run_concurrent(&task);
-      if (task.success()) {
-        return;
-      }
-    }
-
-    // Cleaning failed because we ran out of transitional IC stubs,
-    // so we have to refill and try again. Refilling requires taking
-    // a safepoint, so we temporarily leave the suspendible thread set.
-    SuspendibleThreadSetLeaver sts;
-    InlineCacheBuffer::refill_ic_stubs();
-  }
-}
-
-class ZNMethodTablePurgeClosure : public ZNMethodTableEntryClosure {
-public:
-  virtual void do_nmethod_entry(ZNMethodTableEntry entry) {
-    nmethod* const nm = entry.method();
-    if (nm->is_alive() && nm->is_unloading()) {
-      nm->make_unloaded();
-    }
-  }
-};
-
-class ZNMethodTablePurgeTask : public ZTask {
-private:
-  ZNMethodTablePurgeClosure _cl;
-
-public:
-  ZNMethodTablePurgeTask() :
-      ZTask("ZNMethodTablePurgeTask"),
-      _cl() {
-    ZNMethodTable::nmethod_entries_do_begin();
-  }
-
-  ~ZNMethodTablePurgeTask() {
-    ZNMethodTable::nmethod_entries_do_end();
-  }
-
-  virtual void work() {
-    ZNMethodTable::nmethod_entries_do(&_cl);
-  }
-};
-
-void ZNMethodTable::purge(ZWorkers* workers) {
-  ZNMethodTablePurgeTask task;
-  workers->run_concurrent(&task);
-}
--- a/src/hotspot/share/gc/z/zNMethodTable.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zNMethodTable.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,66 +24,46 @@
 #ifndef SHARE_GC_Z_ZNMETHODTABLE_HPP
 #define SHARE_GC_Z_ZNMETHODTABLE_HPP
 
-#include "gc/z/zArray.hpp"
-#include "gc/z/zGlobals.hpp"
-#include "gc/z/zLock.hpp"
-#include "gc/z/zNMethodTableEntry.hpp"
+#include "gc/z/zNMethodTableIteration.hpp"
 #include "memory/allocation.hpp"
 
+class nmethod;
+class NMethodClosure;
+class ZNMethodTableEntry;
 class ZWorkers;
 
-class ZNMethodTableEntryClosure {
-public:
-  virtual void do_nmethod_entry(ZNMethodTableEntry entry) = 0;
-};
-
 class ZNMethodTable : public AllStatic {
 private:
-  static ZNMethodTableEntry* _table;
-  static size_t              _size;
-  static ZLock               _iter_lock;
-  static ZNMethodTableEntry* _iter_table;
-  static size_t              _iter_table_size;
-  static ZArray<void*>       _iter_deferred_deletes;
-  static size_t              _nregistered;
-  static size_t              _nunregistered;
-  static volatile size_t     _claimed ATTRIBUTE_ALIGNED(ZCacheLineSize);
+  static ZNMethodTableEntry*    _table;
+  static size_t                 _size;
+  static size_t                 _nregistered;
+  static size_t                 _nunregistered;
+  static ZNMethodTableIteration _iteration;
 
-  static ZNMethodTableEntry create_entry(nmethod* nm);
+  static ZNMethodTableEntry* create(size_t size);
+  static void destroy(ZNMethodTableEntry* table);
 
   static size_t first_index(const nmethod* nm, size_t size);
   static size_t next_index(size_t prev_index, size_t size);
 
-  static void sweeper_wait_for_iteration();
-
-  static bool register_entry(ZNMethodTableEntry* table, size_t size, ZNMethodTableEntry entry);
+  static bool register_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm);
   static void unregister_entry(ZNMethodTableEntry* table, size_t size, nmethod* nm);
 
   static void rebuild(size_t new_size);
   static void rebuild_if_needed();
 
-  static void log_register(const nmethod* nm, ZNMethodTableEntry entry);
-  static void log_unregister(const nmethod* nm);
-
 public:
-  static void safe_delete(void* data);
-
   static size_t registered_nmethods();
   static size_t unregistered_nmethods();
 
   static void register_nmethod(nmethod* nm);
   static void unregister_nmethod(nmethod* nm);
-  static void disarm_nmethod(nmethod* nm);
 
-  static ZReentrantLock* lock_for_nmethod(nmethod* nm);
+  static void wait_until_iteration_done();
 
-  static void oops_do(OopClosure* cl);
-
-  static void entry_oops_do(ZNMethodTableEntry entry, OopClosure* cl);
-
-  static void nmethod_entries_do_begin();
-  static void nmethod_entries_do_end();
-  static void nmethod_entries_do(ZNMethodTableEntryClosure* cl);
+  static void nmethods_do_begin();
+  static void nmethods_do_end();
+  static void nmethods_do(NMethodClosure* cl);
 
   static void unlink(ZWorkers* workers, bool unloading_occurred);
   static void purge(ZWorkers* workers);
--- a/src/hotspot/share/gc/z/zNMethodTableEntry.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zNMethodTableEntry.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -32,16 +32,16 @@
 // --------------------------
 //
 //   6
-//   3                                                                  3 2 1 0
-//  +--------------------------------------------------------------------+-+-+-+
-//  |11111111 11111111 11111111 11111111 11111111 11111111 11111111 11111|1|1|1|
-//  +--------------------------------------------------------------------+-+-+-+
-//  |                                                                    | | |
-//  |                               2-2 Non-immediate Oops Flag (1-bits) * | |
-//  |                                                                      | |
-//  |                        1-1 Immediate Oops/Unregistered Flag (1-bits) * |
-//  |                                                                        |
-//  |                                           0-0 Registered Flag (1-bits) *
+//   3                                                                   2 1 0
+//  +---------------------------------------------------------------------+-+-+
+//  |11111111 11111111 11111111 11111111 11111111 11111111 11111111 111111|1|1|
+//  +---------------------------------------------------------------------+-+-+
+//  |                                                                     | |
+//  |                                                                     | |
+//  |                                                                     | |
+//  |                                      1-1 Unregistered Flag (1-bits) * |
+//  |                                                                       |
+//  |                                          0-0 Registered Flag (1-bits) *
 //  |
 //  * 63-3 NMethod Address (61-bits)
 //
@@ -52,22 +52,20 @@
 private:
   typedef ZBitField<uint64_t, bool,     0,  1>    field_registered;
   typedef ZBitField<uint64_t, bool,     1,  1>    field_unregistered;
-  typedef ZBitField<uint64_t, bool,     1,  1>    field_immediate_oops;
-  typedef ZBitField<uint64_t, bool,     2,  1>    field_non_immediate_oops;
-  typedef ZBitField<uint64_t, nmethod*, 3, 61, 3> field_method;
+  typedef ZBitField<uint64_t, nmethod*, 2, 62, 2> field_method;
 
   uint64_t _entry;
 
 public:
   explicit ZNMethodTableEntry(bool unregistered = false) :
-      _entry(field_unregistered::encode(unregistered) |
-             field_registered::encode(false)) {}
+      _entry(field_registered::encode(false) |
+             field_unregistered::encode(unregistered) |
+             field_method::encode(NULL)) {}
 
-  ZNMethodTableEntry(nmethod* method, bool non_immediate_oops, bool immediate_oops) :
-      _entry(field_method::encode(method) |
-             field_non_immediate_oops::encode(non_immediate_oops) |
-             field_immediate_oops::encode(immediate_oops) |
-             field_registered::encode(true)) {}
+  explicit ZNMethodTableEntry(nmethod* method) :
+      _entry(field_registered::encode(true) |
+             field_unregistered::encode(false) |
+             field_method::encode(method)) {}
 
   bool registered() const {
     return field_registered::decode(_entry);
@@ -77,14 +75,6 @@
     return field_unregistered::decode(_entry);
   }
 
-  bool immediate_oops() const {
-    return field_immediate_oops::decode(_entry);
-  }
-
-  bool non_immediate_oops() const {
-    return field_non_immediate_oops::decode(_entry);
-  }
-
   nmethod* method() const {
     return field_method::decode(_entry);
   }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethodTableIteration.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zNMethodTableEntry.hpp"
+#include "gc/z/zNMethodTableIteration.hpp"
+#include "memory/iterator.hpp"
+#include "runtime/atomic.hpp"
+#include "utilities/debug.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+ZNMethodTableIteration::ZNMethodTableIteration() :
+    _table(NULL),
+    _size(0),
+    _claimed(0) {}
+
+bool ZNMethodTableIteration::in_progress() const {
+  return _table != NULL;
+}
+
+void ZNMethodTableIteration::nmethods_do_begin(ZNMethodTableEntry* table, size_t size) {
+  assert(!in_progress(), "precondition");
+
+  _table = table;
+  _size = size;
+  _claimed = 0;
+}
+
+void ZNMethodTableIteration::nmethods_do_end() {
+  assert(_claimed >= _size, "Failed to claim all table entries");
+
+  // Finish iteration
+  _table = NULL;
+}
+
+void ZNMethodTableIteration::nmethods_do(NMethodClosure* cl) {
+  for (;;) {
+    // Claim table partition. Each partition is currently sized to span
+    // two cache lines. This number is just a guess, but seems to work well.
+    const size_t partition_size = (ZCacheLineSize * 2) / sizeof(ZNMethodTableEntry);
+    const size_t partition_start = MIN2(Atomic::add(partition_size, &_claimed) - partition_size, _size);
+    const size_t partition_end = MIN2(partition_start + partition_size, _size);
+    if (partition_start == partition_end) {
+      // End of table
+      break;
+    }
+
+    // Process table partition
+    for (size_t i = partition_start; i < partition_end; i++) {
+      const ZNMethodTableEntry entry = _table[i];
+      if (entry.registered()) {
+        cl->do_nmethod(entry.method());
+      }
+    }
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zNMethodTableIteration.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZNMETHODTABLEITERATION_HPP
+#define SHARE_GC_Z_ZNMETHODTABLEITERATION_HPP
+
+#include "gc/z/zGlobals.hpp"
+
+class NMethodClosure;
+class ZNMethodTableEntry;
+
+class ZNMethodTableIteration {
+private:
+  ZNMethodTableEntry* _table;
+  size_t              _size;
+  volatile size_t     _claimed ATTRIBUTE_ALIGNED(ZCacheLineSize);
+
+public:
+  ZNMethodTableIteration();
+
+  bool in_progress() const;
+
+  void nmethods_do_begin(ZNMethodTableEntry* table, size_t size);
+  void nmethods_do_end();
+  void nmethods_do(NMethodClosure* cl);
+};
+
+#endif // SHARE_GC_Z_ZNMETHODTABLEITERATION_HPP
--- a/src/hotspot/share/gc/z/zRelocate.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zRelocate.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -31,28 +31,20 @@
 #include "gc/z/zRelocationSet.inline.hpp"
 #include "gc/z/zRootsIterator.hpp"
 #include "gc/z/zTask.hpp"
+#include "gc/z/zThreadLocalAllocBuffer.hpp"
 #include "gc/z/zWorkers.hpp"
 
 ZRelocate::ZRelocate(ZWorkers* workers) :
     _workers(workers) {}
 
 class ZRelocateRootsIteratorClosure : public ZRootsIteratorClosure {
-private:
-  static void remap_address(HeapWord** p) {
-    *p = (HeapWord*)ZAddress::good_or_null((uintptr_t)*p);
-  }
-
 public:
   virtual void do_thread(Thread* thread) {
-    ZRootsIteratorClosure::do_thread(thread);
-
     // Update thread local address bad mask
     ZThreadLocalData::set_address_bad_mask(thread, ZAddressBadMask);
 
     // Remap TLAB
-    if (UseTLAB && thread->is_Java_thread()) {
-      thread->tlab().addresses_do(remap_address);
-    }
+    ZThreadLocalAllocBuffer::remap(thread);
   }
 
   virtual void do_oop(oop* p) {
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zRootsIterator.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -33,7 +33,7 @@
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/z/zBarrierSetNMethod.hpp"
 #include "gc/z/zGlobals.hpp"
-#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zNMethod.hpp"
 #include "gc/z/zOopClosures.inline.hpp"
 #include "gc/z/zRootsIterator.hpp"
 #include "gc/z/zStat.hpp"
@@ -135,29 +135,38 @@
   }
 }
 
-class ZCodeBlobClosure : public CodeBlobToOopClosure {
+class ZRootsIteratorCodeBlobClosure : public CodeBlobToOopClosure {
 private:
   BarrierSetNMethod* _bs;
 
 public:
-  ZCodeBlobClosure(OopClosure* cl) :
+  ZRootsIteratorCodeBlobClosure(OopClosure* cl) :
     CodeBlobToOopClosure(cl, true /* fix_relocations */),
     _bs(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
 
   virtual void do_code_blob(CodeBlob* cb) {
     nmethod* const nm = cb->as_nmethod_or_null();
-    if (nm == NULL || nm->test_set_oops_do_mark()) {
-      return;
+    if (nm != NULL && !nm->test_set_oops_do_mark()) {
+      CodeBlobToOopClosure::do_code_blob(cb);
+      _bs->disarm(nm);
     }
-    CodeBlobToOopClosure::do_code_blob(cb);
-    _bs->disarm(nm);
   }
 };
 
-void ZRootsIteratorClosure::do_thread(Thread* thread) {
-  ZCodeBlobClosure code_cl(this);
-  thread->oops_do(this, ClassUnloading ? &code_cl : NULL);
-}
+class ZRootsIteratorThreadClosure : public ThreadClosure {
+private:
+  ZRootsIteratorClosure* _cl;
+
+public:
+  ZRootsIteratorThreadClosure(ZRootsIteratorClosure* cl) :
+      _cl(cl) {}
+
+  virtual void do_thread(Thread* thread) {
+    ZRootsIteratorCodeBlobClosure code_cl(_cl);
+    thread->oops_do(_cl, ClassUnloading ? &code_cl : NULL);
+    _cl->do_thread(thread);
+  }
+};
 
 ZRootsIterator::ZRootsIterator() :
     _universe(this),
@@ -175,7 +184,7 @@
   if (ClassUnloading) {
     nmethod::oops_do_marking_prologue();
   } else {
-    ZNMethodTable::nmethod_entries_do_begin();
+    ZNMethod::oops_do_begin();
   }
 }
 
@@ -185,7 +194,7 @@
   if (ClassUnloading) {
     nmethod::oops_do_marking_epilogue();
   } else {
-    ZNMethodTable::nmethod_entries_do_end();
+    ZNMethod::oops_do_end();
   }
   JvmtiExport::gc_epilogue();
 
@@ -227,12 +236,13 @@
 void ZRootsIterator::do_threads(ZRootsIteratorClosure* cl) {
   ZStatTimer timer(ZSubPhasePauseRootsThreads);
   ResourceMark rm;
-  Threads::possibly_parallel_threads_do(true, cl);
+  ZRootsIteratorThreadClosure thread_cl(cl);
+  Threads::possibly_parallel_threads_do(true, &thread_cl);
 }
 
 void ZRootsIterator::do_code_cache(ZRootsIteratorClosure* cl) {
   ZStatTimer timer(ZSubPhasePauseRootsCodeCache);
-  ZNMethodTable::oops_do(cl);
+  ZNMethod::oops_do(cl);
 }
 
 void ZRootsIterator::oops_do(ZRootsIteratorClosure* cl, bool visit_jvmti_weak_export) {
--- a/src/hotspot/share/gc/z/zRootsIterator.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zRootsIterator.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -31,9 +31,9 @@
 #include "runtime/thread.hpp"
 #include "utilities/globalDefinitions.hpp"
 
-class ZRootsIteratorClosure : public OopClosure, public ThreadClosure {
+class ZRootsIteratorClosure : public OopClosure {
 public:
-  virtual void do_thread(Thread* thread);
+  virtual void do_thread(Thread* thread) {}
 };
 
 typedef OopStorage::ParState<true /* concurrent */, false /* is_const */> ZOopStorageIterator;
--- a/src/hotspot/share/gc/z/zStatTLAB.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#include "gc/z/zStatTLAB.hpp"
-
-ZPerWorker<ThreadLocalAllocStats>* ZStatTLAB::_stats = NULL;
-
-void ZStatTLAB::initialize() {
-  if (UseTLAB) {
-    assert(_stats == NULL, "Already initialized");
-    _stats = new ZPerWorker<ThreadLocalAllocStats>();
-    reset();
-  }
-}
-
-void ZStatTLAB::reset() {
-  if (UseTLAB) {
-    ZPerWorkerIterator<ThreadLocalAllocStats> iter(_stats);
-    for (ThreadLocalAllocStats* stats; iter.next(&stats);) {
-      stats->reset();
-    }
-  }
-}
-
-ThreadLocalAllocStats* ZStatTLAB::get() {
-  if (UseTLAB) {
-    return _stats->addr();
-  }
-
-  return NULL;
-}
-
-void ZStatTLAB::publish() {
-  if (UseTLAB) {
-    ThreadLocalAllocStats total;
-
-    ZPerWorkerIterator<ThreadLocalAllocStats> iter(_stats);
-    for (ThreadLocalAllocStats* stats; iter.next(&stats);) {
-      total.update(*stats);
-    }
-
-    total.publish();
-  }
-}
--- a/src/hotspot/share/gc/z/zStatTLAB.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,42 +0,0 @@
-/*
- * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- */
-
-#ifndef SHARE_GC_Z_ZSTATTLAB_HPP
-#define SHARE_GC_Z_ZSTATTLAB_HPP
-
-#include "gc/shared/threadLocalAllocBuffer.hpp"
-#include "gc/z/zValue.hpp"
-#include "memory/allocation.hpp"
-
-class ZStatTLAB : public AllStatic {
-private:
-  static ZPerWorker<ThreadLocalAllocStats>* _stats;
-
-public:
-  static void initialize();
-  static void reset();
-  static ThreadLocalAllocStats* get();
-  static void publish();
-};
-
-#endif // SHARE_GC_Z_ZSTATTLAB_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#include "precompiled.hpp"
+#include "gc/z/zAddress.inline.hpp"
+#include "gc/z/zThreadLocalAllocBuffer.hpp"
+#include "runtime/globals.hpp"
+#include "runtime/thread.hpp"
+
+ZPerWorker<ThreadLocalAllocStats>* ZThreadLocalAllocBuffer::_stats = NULL;
+
+void ZThreadLocalAllocBuffer::initialize() {
+  if (UseTLAB) {
+    assert(_stats == NULL, "Already initialized");
+    _stats = new ZPerWorker<ThreadLocalAllocStats>();
+    reset_statistics();
+  }
+}
+
+void ZThreadLocalAllocBuffer::reset_statistics() {
+  if (UseTLAB) {
+    ZPerWorkerIterator<ThreadLocalAllocStats> iter(_stats);
+    for (ThreadLocalAllocStats* stats; iter.next(&stats);) {
+      stats->reset();
+    }
+  }
+}
+
+void ZThreadLocalAllocBuffer::publish_statistics() {
+  if (UseTLAB) {
+    ThreadLocalAllocStats total;
+
+    ZPerWorkerIterator<ThreadLocalAllocStats> iter(_stats);
+    for (ThreadLocalAllocStats* stats; iter.next(&stats);) {
+      total.update(*stats);
+    }
+
+    total.publish();
+  }
+}
+
+static void fixup_address(HeapWord** p) {
+  *p = (HeapWord*)ZAddress::good_or_null((uintptr_t)*p);
+}
+
+void ZThreadLocalAllocBuffer::retire(Thread* thread) {
+  if (UseTLAB && thread->is_Java_thread()) {
+    ThreadLocalAllocStats* const stats = _stats->addr();
+    thread->tlab().addresses_do(fixup_address);
+    thread->tlab().retire(stats);
+    thread->tlab().resize();
+  }
+}
+
+void ZThreadLocalAllocBuffer::remap(Thread* thread) {
+  if (UseTLAB && thread->is_Java_thread()) {
+    thread->tlab().addresses_do(fixup_address);
+  }
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/z/zThreadLocalAllocBuffer.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+#ifndef SHARE_GC_Z_ZTHREADLOCALALLOCBUFFER_HPP
+#define SHARE_GC_Z_ZTHREADLOCALALLOCBUFFER_HPP
+
+#include "gc/shared/threadLocalAllocBuffer.hpp"
+#include "gc/z/zValue.hpp"
+#include "memory/allocation.hpp"
+
+class ZThreadLocalAllocBuffer : public AllStatic {
+private:
+  static ZPerWorker<ThreadLocalAllocStats>* _stats;
+
+public:
+  static void initialize();
+
+  static void reset_statistics();
+  static void publish_statistics();
+
+  static void retire(Thread* thread);
+  static void remap(Thread* thread);
+};
+
+#endif // SHARE_GC_Z_ZTHREADLOCALALLOCBUFFER_HPP
--- a/src/hotspot/share/gc/z/zUnload.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/gc/z/zUnload.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -30,7 +30,7 @@
 #include "gc/shared/gcBehaviours.hpp"
 #include "gc/shared/suspendibleThreadSet.hpp"
 #include "gc/z/zLock.inline.hpp"
-#include "gc/z/zNMethodTable.hpp"
+#include "gc/z/zNMethod.hpp"
 #include "gc/z/zOopClosures.hpp"
 #include "gc/z/zStat.hpp"
 #include "gc/z/zUnload.hpp"
@@ -75,7 +75,7 @@
 public:
   virtual bool is_unloading(CompiledMethod* method) const {
     nmethod* const nm = method->as_nmethod();
-    ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm);
+    ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
     if (lock == NULL) {
       return is_unloading(nm);
     } else {
@@ -89,7 +89,7 @@
 public:
   virtual bool lock(CompiledMethod* method) {
     nmethod* const nm = method->as_nmethod();
-    ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm);
+    ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
     if (lock != NULL) {
       lock->lock();
     }
@@ -98,7 +98,7 @@
 
   virtual void unlock(CompiledMethod* method) {
     nmethod* const nm = method->as_nmethod();
-    ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm);
+    ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
     if (lock != NULL) {
       lock->unlock();
     }
@@ -110,7 +110,7 @@
     }
 
     nmethod* const nm = method->as_nmethod();
-    ZReentrantLock* const lock = ZNMethodTable::lock_for_nmethod(nm);
+    ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
     return lock == NULL || lock->is_owned();
   }
 };
@@ -149,7 +149,7 @@
 
   Klass::clean_weak_klass_links(unloading_occurred);
 
-  ZNMethodTable::unlink(_workers, unloading_occurred);
+  ZNMethod::unlink(_workers, unloading_occurred);
 
   DependencyContext::cleaning_end();
 }
@@ -157,7 +157,7 @@
 void ZUnload::purge() {
   {
     SuspendibleThreadSetJoiner sts;
-    ZNMethodTable::purge(_workers);
+    ZNMethod::purge(_workers);
   }
 
   ClassLoaderDataGraph::purge();
--- a/src/hotspot/share/interpreter/linkResolver.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/interpreter/linkResolver.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -39,6 +39,7 @@
 #include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/cpCache.inline.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/method.hpp"
--- a/src/hotspot/share/interpreter/rewriter.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/interpreter/rewriter.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -28,6 +28,7 @@
 #include "interpreter/rewriter.hpp"
 #include "memory/metadataFactory.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/generateOopMap.hpp"
 #include "prims/methodHandles.hpp"
 #include "runtime/fieldDescriptor.inline.hpp"
--- a/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/instrumentation/jfrEventClassTransformer.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -43,6 +43,7 @@
 #include "memory/allocation.inline.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/array.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/method.hpp"
 #include "prims/jvmtiRedefineClasses.hpp"
--- a/src/hotspot/share/jfr/jni/jfrJniMethod.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/jni/jfrJniMethod.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -114,7 +114,7 @@
 NO_TRANSITION_END
 
 NO_TRANSITION(void, jfr_set_file_notification(JNIEnv* env, jobject jvm, jlong threshold))
-  JfrChunkRotation::set_threshold((intptr_t)threshold);
+  JfrChunkRotation::set_threshold(threshold);
 NO_TRANSITION_END
 
 NO_TRANSITION(void, jfr_set_sample_threads(JNIEnv* env, jobject jvm, jboolean sampleThreads))
--- a/src/hotspot/share/jfr/metadata/metadata.xml	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/metadata/metadata.xml	Tue Feb 26 11:17:12 2019 +0530
@@ -533,12 +533,6 @@
     <Field type="int" name="iterations" label="Iterations" description="Number of state check iterations" />
   </Event>
 
-  <Event name="SafepointWaitBlocked" category="Java Virtual Machine, Runtime, Safepoint" label="Safepoint Wait Blocked" description="Safepointing begin waiting on running threads to block"
-    thread="true">
-    <Field type="ulong" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
-    <Field type="int" name="runningThreadCount" label="Running Threads" description="The number running of threads wait for safe point" />
-  </Event>
-
   <Event name="SafepointCleanup" category="Java Virtual Machine, Runtime, Safepoint" label="Safepoint Cleanup" description="Safepointing begin running cleanup tasks"
     thread="true">
     <Field type="ulong" name="safepointId" label="Safepoint Identifier" relation="SafepointId" />
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -44,13 +44,13 @@
   }
 }
 
-static void write_checkpoint_header(u1* pos, jlong size, jlong time, bool flushpoint, juint type_count) {
+static void write_checkpoint_header(u1* pos, int64_t size, jlong time, bool flushpoint, u4 type_count) {
   assert(pos != NULL, "invariant");
   JfrBigEndianWriter be_writer(pos, sizeof(JfrCheckpointEntry));
   be_writer.write(size);
   be_writer.write(time);
   be_writer.write(JfrTicks::now().value() - time);
-  be_writer.write(flushpoint ? (juint)1 : (juint)0);
+  be_writer.write(flushpoint ? (u4)1 : (u4)0);
   be_writer.write(type_count);
   assert(be_writer.is_valid(), "invariant");
 }
@@ -71,7 +71,7 @@
   assert(this->is_valid(), "invariant");
   assert(count() > 0, "invariant");
   assert(this->used_size() > sizeof(JfrCheckpointEntry), "invariant");
-  const jlong size = this->current_offset();
+  const int64_t size = this->current_offset();
   assert(size + this->start_pos() == this->current_pos(), "invariant");
   write_checkpoint_header(const_cast<u1*>(this->start_pos()), size, _time, is_flushpoint(), count());
   release();
@@ -85,11 +85,11 @@
   return _flushpoint;
 }
 
-juint JfrCheckpointWriter::count() const {
+u4 JfrCheckpointWriter::count() const {
   return _count;
 }
 
-void JfrCheckpointWriter::set_count(juint count) {
+void JfrCheckpointWriter::set_count(u4 count) {
   _count = count;
 }
 
@@ -111,7 +111,7 @@
 }
 
 void JfrCheckpointWriter::write_key(u8 key) {
-  write<u8>(key);
+  write(key);
 }
 
 void JfrCheckpointWriter::increment() {
@@ -119,10 +119,10 @@
 }
 
 void JfrCheckpointWriter::write_count(u4 nof_entries) {
-  write<u4>((u4)nof_entries);
+  write(nof_entries);
 }
 
-void JfrCheckpointWriter::write_count(u4 nof_entries, jlong offset) {
+void JfrCheckpointWriter::write_count(u4 nof_entries, int64_t offset) {
   write_padded_at_offset(nof_entries, offset);
 }
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/checkpoint/jfrCheckpointWriter.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -49,21 +49,21 @@
 typedef EventWriterHost<BigEndianEncoder, CompressedIntegerEncoder, JfrTransactionalCheckpointWriter> JfrCheckpointWriterBase;
 
 struct JfrCheckpointContext {
-  jlong offset;
-  juint count;
+  int64_t offset;
+  u4 count;
 };
 
 class JfrCheckpointWriter : public JfrCheckpointWriterBase {
   friend class JfrSerializerRegistration;
  private:
   JfrTicks _time;
-  jlong _offset;
-  juint _count;
+  int64_t _offset;
+  u4 _count;
   bool _flushpoint;
   bool _header;
 
-  juint count() const;
-  void set_count(juint count);
+  u4 count() const;
+  void set_count(u4 count);
   void increment();
   void set_flushpoint(bool flushpoint);
   bool is_flushpoint() const;
@@ -75,7 +75,7 @@
   ~JfrCheckpointWriter();
   void write_type(JfrTypeId type_id);
   void write_count(u4 nof_entries);
-  void write_count(u4 nof_entries, jlong offset);
+  void write_count(u4 nof_entries, int64_t offset);
   void write_key(u8 key);
   const JfrCheckpointContext context() const;
   void set_context(const JfrCheckpointContext ctx);
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -65,7 +65,7 @@
  private:
   JfrCheckpointWriter& _writer;
   JfrCheckpointContext _ctx;
-  const intptr_t _count_position;
+  const int64_t _count_position;
   Thread* const _curthread;
   u4 _count;
 
--- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrTypeSetWriter.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -35,7 +35,7 @@
   WriterImpl _impl;
   JfrCheckpointWriter* _writer;
   JfrCheckpointContext _ctx;
-  jlong _count_offset;
+  int64_t _count_offset;
   int _count;
   bool _skip_header;
  public:
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkRotation.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkRotation.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -29,7 +29,7 @@
 #include "runtime/handles.inline.hpp"
 
 static jobject chunk_monitor = NULL;
-static intptr_t threshold = 0;
+static int64_t threshold = 0;
 static bool rotate = false;
 
 static jobject install_chunk_monitor(Thread* thread) {
@@ -62,7 +62,6 @@
     // already in progress
     return;
   }
-  assert(!rotate, "invariant");
   if (writer.size_written() > threshold) {
     rotate = true;
     notify();
@@ -77,6 +76,6 @@
   rotate = false;
 }
 
-void JfrChunkRotation::set_threshold(intptr_t bytes) {
+void JfrChunkRotation::set_threshold(int64_t bytes) {
   threshold = bytes;
 }
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkRotation.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkRotation.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -36,7 +36,7 @@
 class JfrChunkRotation : AllStatic {
  public:
   static void evaluate(const JfrChunkWriter& writer);
-  static void set_threshold(intptr_t bytes);
+  static void set_threshold(int64_t bytes);
   static bool should_rotate();
   static void on_rotation();
 };
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkState.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkState.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -53,19 +53,19 @@
   set_previous_checkpoint_offset(0);
 }
 
-void JfrChunkState::set_previous_checkpoint_offset(jlong offset) {
+void JfrChunkState::set_previous_checkpoint_offset(int64_t offset) {
   _previous_checkpoint_offset = offset;
 }
 
-jlong JfrChunkState::previous_checkpoint_offset() const {
+int64_t JfrChunkState::previous_checkpoint_offset() const {
   return _previous_checkpoint_offset;
 }
 
-jlong JfrChunkState::previous_start_ticks() const {
+int64_t JfrChunkState::previous_start_ticks() const {
   return _previous_start_ticks;
 }
 
-jlong JfrChunkState::previous_start_nanos() const {
+int64_t JfrChunkState::previous_start_nanos() const {
   return _previous_start_nanos;
 }
 
@@ -92,7 +92,7 @@
   save_current_and_update_start_ticks();
 }
 
-jlong JfrChunkState::last_chunk_duration() const {
+int64_t JfrChunkState::last_chunk_duration() const {
   return _start_nanos - _previous_start_nanos;
 }
 
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkState.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkState.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -25,7 +25,6 @@
 #ifndef SHARE_JFR_RECORDER_REPOSITORY_JFRCHUNKSTATE_HPP
 #define SHARE_JFR_RECORDER_REPOSITORY_JFRCHUNKSTATE_HPP
 
-#include "jni.h"
 #include "jfr/utilities/jfrAllocation.hpp"
 #include "jfr/utilities/jfrTypes.hpp"
 
@@ -33,11 +32,11 @@
   friend class JfrChunkWriter;
  private:
   char* _path;
-  jlong _start_ticks;
-  jlong _start_nanos;
-  jlong _previous_start_ticks;
-  jlong _previous_start_nanos;
-  jlong _previous_checkpoint_offset;
+  int64_t _start_ticks;
+  int64_t _start_nanos;
+  int64_t _previous_start_ticks;
+  int64_t _previous_start_nanos;
+  int64_t _previous_checkpoint_offset;
 
   void update_start_ticks();
   void update_start_nanos();
@@ -47,11 +46,11 @@
   JfrChunkState();
   ~JfrChunkState();
   void reset();
-  jlong previous_checkpoint_offset() const;
-  void set_previous_checkpoint_offset(jlong offset);
-  jlong previous_start_ticks() const;
-  jlong previous_start_nanos() const;
-  jlong last_chunk_duration() const;
+  int64_t previous_checkpoint_offset() const;
+  void set_previous_checkpoint_offset(int64_t offset);
+  int64_t previous_start_ticks() const;
+  int64_t previous_start_nanos() const;
+  int64_t last_chunk_duration() const;
   void update_time_to_now();
   void set_path(const char* path);
   const char* path() const;
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -32,9 +32,8 @@
 #include "runtime/os.hpp"
 #include "runtime/os.inline.hpp"
 
-const u2 JFR_VERSION_MAJOR = 2;
-const u2 JFR_VERSION_MINOR = 0;
-
+static const u2 JFR_VERSION_MAJOR = 2;
+static const u2 JFR_VERSION_MINOR = 0;
 static const size_t MAGIC_LEN = 4;
 static const size_t FILEHEADER_SLOT_SIZE = 8;
 static const size_t CHUNK_SIZE_OFFSET = 8;
@@ -79,14 +78,14 @@
   return is_open;
 }
 
-size_t JfrChunkWriter::close(intptr_t metadata_offset) {
+size_t JfrChunkWriter::close(int64_t metadata_offset) {
   write_header(metadata_offset);
   this->flush();
   this->close_fd();
-  return size_written();
+  return (size_t)size_written();
 }
 
-void JfrChunkWriter::write_header(intptr_t metadata_offset) {
+void JfrChunkWriter::write_header(int64_t metadata_offset) {
   assert(this->is_valid(), "invariant");
   // Chunk size
   this->write_be_at_offset(size_written(), CHUNK_SIZE_OFFSET);
@@ -106,15 +105,15 @@
   _chunkstate->set_path(chunk_path);
 }
 
-intptr_t JfrChunkWriter::size_written() const {
+int64_t JfrChunkWriter::size_written() const {
   return this->is_valid() ? this->current_offset() : 0;
 }
 
-intptr_t JfrChunkWriter::previous_checkpoint_offset() const {
+int64_t JfrChunkWriter::previous_checkpoint_offset() const {
   return _chunkstate->previous_checkpoint_offset();
 }
 
-void JfrChunkWriter::set_previous_checkpoint_offset(intptr_t offset) {
+void JfrChunkWriter::set_previous_checkpoint_offset(int64_t offset) {
   _chunkstate->set_previous_checkpoint_offset(offset);
 }
 
--- a/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/repository/jfrChunkWriter.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -41,16 +41,16 @@
   JfrChunkState* _chunkstate;
 
   bool open();
-  size_t close(intptr_t metadata_offset);
-  void write_header(intptr_t metadata_offset);
+  size_t close(int64_t metadata_offset);
+  void write_header(int64_t metadata_offset);
   void set_chunk_path(const char* chunk_path);
 
  public:
   JfrChunkWriter();
   bool initialize();
-  intptr_t size_written() const;
-  intptr_t previous_checkpoint_offset() const;
-  void set_previous_checkpoint_offset(intptr_t offset);
+  int64_t size_written() const;
+  int64_t previous_checkpoint_offset() const;
+  void set_previous_checkpoint_offset(int64_t offset);
   void time_stamp_chunk_now();
 };
 
--- a/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/repository/jfrRepository.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -147,10 +147,10 @@
   iso8601_to_date_time(buffer);
 }
 
-static jlong file_size(fio_fd fd) {
+static int64_t file_size(fio_fd fd) {
   assert(fd != invalid_fd, "invariant");
-  const jlong current_offset = os::current_file_offset(fd);
-  const jlong size = os::lseek(fd, 0, SEEK_END);
+  const int64_t current_offset = os::current_file_offset(fd);
+  const int64_t size = os::lseek(fd, 0, SEEK_END);
   os::seek_to_file_offset(fd, current_offset);
   return size;
 }
@@ -218,7 +218,7 @@
   if (invalid_fd == entry_fd) {
     return NULL;
   }
-  const jlong entry_size = file_size(entry_fd);
+  const int64_t entry_size = file_size(entry_fd);
   os::close(entry_fd);
   if (0 == entry_size) {
     return NULL;
@@ -260,6 +260,7 @@
   }
 }
 #endif
+
 bool RepositoryIterator::has_next() const {
   return (_files != NULL && _iterator < _files->length());
 }
@@ -275,21 +276,27 @@
   if (file_copy_block == NULL) {
     return;
   }
- jlong bytes_written_total = 0;
+ int64_t bytes_written_total = 0;
   while (iterator.has_next()) {
     fio_fd current_fd = invalid_fd;
     const char* const fqn = iterator.next();
     if (fqn != NULL) {
       current_fd = open_existing(fqn);
       if (current_fd != invalid_fd) {
-        const jlong current_filesize = file_size(current_fd);
+        const int64_t current_filesize = file_size(current_fd);
         assert(current_filesize > 0, "invariant");
-        jlong bytes_read = 0;
-        jlong bytes_written = 0;
+        int64_t bytes_read = 0;
+        int64_t bytes_written = 0;
         while (bytes_read < current_filesize) {
-          bytes_read += (jlong)os::read_at(current_fd, file_copy_block, size_of_file_copy_block, bytes_read);
-          assert(bytes_read - bytes_written <= (jlong)size_of_file_copy_block, "invariant");
-          bytes_written += (jlong)os::write(emergency_fd, file_copy_block, bytes_read - bytes_written);
+          const ssize_t read_result = os::read_at(current_fd, file_copy_block, size_of_file_copy_block, bytes_read);
+          if (-1 == read_result) {
+            log_info(jfr) ( // For user, should not be "jfr, system"
+              "Unable to recover JFR data");
+            break;
+          }
+          bytes_read += (int64_t)read_result;
+          assert(bytes_read - bytes_written <= (int64_t)size_of_file_copy_block, "invariant");
+          bytes_written += (int64_t)os::write(emergency_fd, file_copy_block, bytes_read - bytes_written);
           assert(bytes_read == bytes_written, "invariant");
         }
         os::close(current_fd);
@@ -468,6 +475,6 @@
   return _chunkwriter->open();
 }
 
-size_t JfrRepository::close_chunk(jlong metadata_offset) {
+size_t JfrRepository::close_chunk(int64_t metadata_offset) {
   return _chunkwriter->close(metadata_offset);
 }
--- a/src/hotspot/share/jfr/recorder/repository/jfrRepository.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/repository/jfrRepository.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -55,7 +55,7 @@
   bool set_path(const char* path);
   void set_chunk_path(const char* path);
   bool open_chunk(bool vm_error = false);
-  size_t close_chunk(jlong metadata_offset);
+  size_t close_chunk(int64_t metadata_offset);
   void on_vm_error();
   static void notify_on_new_chunk_path();
   static JfrChunkWriter& chunkwriter();
--- a/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/recorder/service/jfrRecorderService.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -130,18 +130,18 @@
   bool not_acquired() const { return !_acquired; }
 };
 
-static intptr_t write_checkpoint_event_prologue(JfrChunkWriter& cw, u8 type_id) {
-  const intptr_t prev_cp_offset = cw.previous_checkpoint_offset();
-  const intptr_t prev_cp_relative_offset = 0 == prev_cp_offset ? 0 : prev_cp_offset - cw.current_offset();
+static int64_t write_checkpoint_event_prologue(JfrChunkWriter& cw, u8 type_id) {
+  const int64_t prev_cp_offset = cw.previous_checkpoint_offset();
+  const int64_t prev_cp_relative_offset = 0 == prev_cp_offset ? 0 : prev_cp_offset - cw.current_offset();
   cw.reserve(sizeof(u4));
   cw.write<u8>(EVENT_CHECKPOINT);
   cw.write(JfrTicks::now());
-  cw.write<jlong>((jlong)0);
+  cw.write((int64_t)0);
   cw.write(prev_cp_relative_offset); // write previous checkpoint offset delta
   cw.write<bool>(false); // flushpoint
-  cw.write<u4>((u4)1); // nof types in this checkpoint
-  cw.write<u8>(type_id);
-  const intptr_t number_of_elements_offset = cw.current_offset();
+  cw.write((u4)1); // nof types in this checkpoint
+  cw.write(type_id);
+  const int64_t number_of_elements_offset = cw.current_offset();
   cw.reserve(sizeof(u4));
   return number_of_elements_offset;
 }
@@ -161,8 +161,8 @@
   }
   bool process() {
     // current_cp_offset is also offset for the event size header field
-    const intptr_t current_cp_offset = _cw.current_offset();
-    const intptr_t num_elements_offset = write_checkpoint_event_prologue(_cw, _type_id);
+    const int64_t current_cp_offset = _cw.current_offset();
+    const int64_t num_elements_offset = write_checkpoint_event_prologue(_cw, _type_id);
     // invocation
     _content_functor.process();
     const u4 number_of_elements = (u4)_content_functor.processed();
@@ -468,9 +468,9 @@
   JfrMetadataEvent::lock();
 }
 
-static jlong write_metadata_event(JfrChunkWriter& chunkwriter) {
+static int64_t write_metadata_event(JfrChunkWriter& chunkwriter) {
   assert(chunkwriter.is_valid(), "invariant");
-  const jlong metadata_offset = chunkwriter.current_offset();
+  const int64_t metadata_offset = chunkwriter.current_offset();
   JfrMetadataEvent::write(chunkwriter, metadata_offset);
   return metadata_offset;
 }
--- a/src/hotspot/share/jfr/writers/jfrEventWriterHost.inline.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/writers/jfrEventWriterHost.inline.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -49,7 +49,7 @@
 inline intptr_t EventWriterHost<BE, IE, WriterPolicyImpl>::end_write(void) {
   assert(this->is_acquired(),
     "state corruption, calling end with writer with non-acquired state!");
-  return this->is_valid() ? this->used_offset() : 0;
+  return this->is_valid() ? (intptr_t)this->used_offset() : 0;
 }
 
 template <typename BE, typename IE, typename WriterPolicyImpl>
--- a/src/hotspot/share/jfr/writers/jfrPosition.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/writers/jfrPosition.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -48,8 +48,8 @@
 
  public:
   size_t available_size() const;
-  intptr_t used_offset() const;
-  intptr_t current_offset() const;
+  int64_t used_offset() const;
+  int64_t current_offset() const;
   size_t used_size() const;
   void reset();
 };
--- a/src/hotspot/share/jfr/writers/jfrPosition.inline.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/writers/jfrPosition.inline.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -80,12 +80,12 @@
 }
 
 template <typename AP>
-inline intptr_t Position<AP>::used_offset() const {
+inline int64_t Position<AP>::used_offset() const {
   return _current_pos - _start_pos;
 }
 
 template <typename AP>
-inline intptr_t Position<AP>::current_offset() const {
+inline int64_t Position<AP>::current_offset() const {
   return this->used_offset();
 }
 
--- a/src/hotspot/share/jfr/writers/jfrStreamWriterHost.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/writers/jfrStreamWriterHost.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -33,9 +33,9 @@
  public:
   typedef typename Adapter::StorageType StorageType;
  private:
-  intptr_t _stream_pos;
+  int64_t _stream_pos;
   fio_fd _fd;
-  intptr_t current_stream_position() const;
+  int64_t current_stream_position() const;
 
  protected:
   StreamWriterHost(StorageType* storage, Thread* thread);
@@ -47,8 +47,8 @@
   bool has_valid_fd() const;
 
  public:
-  intptr_t current_offset() const;
-  void seek(intptr_t offset);
+  int64_t current_offset() const;
+  void seek(int64_t offset);
   void flush();
   void write_unbuffered(const void* src, size_t len);
   bool is_valid() const;
--- a/src/hotspot/share/jfr/writers/jfrStreamWriterHost.inline.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/writers/jfrStreamWriterHost.inline.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -44,7 +44,7 @@
 }
 
 template <typename Adapter, typename AP>
-inline intptr_t StreamWriterHost<Adapter, AP>::current_stream_position() const {
+inline int64_t StreamWriterHost<Adapter, AP>::current_stream_position() const {
   return this->used_offset() + _stream_pos;
 }
 
@@ -73,7 +73,7 @@
 inline void StreamWriterHost<Adapter, AP>::flush(size_t size) {
   assert(size > 0, "invariant");
   assert(this->is_valid(), "invariant");
-  _stream_pos += os::write(_fd, this->start_pos(), (int)size);
+  _stream_pos += os::write(_fd, this->start_pos(), (unsigned int)size);
   StorageHost<Adapter, AP>::reset();
   assert(0 == this->used_offset(), "invariant");
 }
@@ -84,12 +84,12 @@
 }
 
 template <typename Adapter, typename AP>
-inline intptr_t StreamWriterHost<Adapter, AP>::current_offset() const {
+inline int64_t StreamWriterHost<Adapter, AP>::current_offset() const {
   return current_stream_position();
 }
 
 template <typename Adapter, typename AP>
-void StreamWriterHost<Adapter, AP>::seek(intptr_t offset) {
+void StreamWriterHost<Adapter, AP>::seek(int64_t offset) {
   this->flush();
   assert(0 == this->used_offset(), "can only seek from beginning");
   _stream_pos = os::seek_to_file_offset(_fd, offset);
@@ -110,7 +110,7 @@
   this->flush();
   assert(0 == this->used_offset(), "can only seek from beginning");
   while (len > 0) {
-    const int n = MIN2<int>((int)len, INT_MAX);
+    const unsigned int n = MIN2((unsigned int)len, (unsigned int)INT_MAX);
     _stream_pos += os::write(_fd, buf, n);
     len -= n;
   }
--- a/src/hotspot/share/jfr/writers/jfrWriterHost.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/writers/jfrWriterHost.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -91,12 +91,12 @@
   void bytes(const void* buf, size_t len);
   void write_utf8_u2_len(const char* value);
   template <typename T>
-  void write_padded_at_offset(T value, intptr_t offset);
+  void write_padded_at_offset(T value, int64_t offset);
   template <typename T>
-  void write_at_offset(T value, intptr_t offset);
+  void write_at_offset(T value, int64_t offset);
   template <typename T>
-  void write_be_at_offset(T value, intptr_t offset);
-  intptr_t reserve(size_t size);
+  void write_be_at_offset(T value, int64_t offset);
+  int64_t reserve(size_t size);
 };
 
 #endif // SHARE_JFR_WRITERS_JFRWRITERHOST_HPP
--- a/src/hotspot/share/jfr/writers/jfrWriterHost.inline.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/jfr/writers/jfrWriterHost.inline.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -196,7 +196,7 @@
 
 template <typename BE, typename IE, typename WriterPolicyImpl>
 inline void WriterHost<BE, IE, WriterPolicyImpl>::write(double value) {
-  be_write(*(uintptr_t*)&(value));
+  be_write(*(u8*)&(value));
 }
 
 template <typename BE, typename IE, typename WriterPolicyImpl>
@@ -317,9 +317,9 @@
 }
 
 template <typename BE, typename IE, typename WriterPolicyImpl>
-inline intptr_t WriterHost<BE, IE, WriterPolicyImpl>::reserve(size_t size) {
+inline int64_t WriterHost<BE, IE, WriterPolicyImpl>::reserve(size_t size) {
   if (ensure_size(size) != NULL) {
-    intptr_t reserved_offset = this->current_offset();
+    const int64_t reserved_offset = this->current_offset();
     this->set_current_pos(size);
     return reserved_offset;
   }
@@ -329,9 +329,9 @@
 
 template <typename BE, typename IE, typename WriterPolicyImpl>
 template <typename T>
-inline void WriterHost<BE, IE, WriterPolicyImpl>::write_padded_at_offset(T value, intptr_t offset) {
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write_padded_at_offset(T value, int64_t offset) {
   if (this->is_valid()) {
-    const intptr_t current = this->current_offset();
+    const int64_t current = this->current_offset();
     this->seek(offset);
     write_padded(value);
     this->seek(current); // restore
@@ -340,9 +340,9 @@
 
 template <typename BE, typename IE, typename WriterPolicyImpl>
 template <typename T>
-inline void WriterHost<BE, IE, WriterPolicyImpl>::write_at_offset(T value, intptr_t offset) {
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write_at_offset(T value, int64_t offset) {
   if (this->is_valid()) {
-    const intptr_t current = this->current_offset();
+    const int64_t current = this->current_offset();
     this->seek(offset);
     write(value);
     this->seek(current); // restore
@@ -351,9 +351,9 @@
 
 template <typename BE, typename IE, typename WriterPolicyImpl>
 template <typename T>
-inline void WriterHost<BE, IE, WriterPolicyImpl>::write_be_at_offset(T value, intptr_t offset) {
+inline void WriterHost<BE, IE, WriterPolicyImpl>::write_be_at_offset(T value, int64_t offset) {
   if (this->is_valid()) {
-    const intptr_t current = this->current_offset();
+    const int64_t current = this->current_offset();
     this->seek(offset);
     be_write(value);
     this->seek(current); // restore
--- a/src/hotspot/share/logging/logFileOutput.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/logging/logFileOutput.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -51,6 +51,14 @@
   _file_name = make_file_name(name + strlen(Prefix), _pid_str, _vm_start_time_str);
 }
 
+const char* LogFileOutput::cur_log_file_name() {
+  if (strlen(_archive_name) == 0) {
+    return _file_name;
+  } else {
+    return _archive_name;
+  }
+}
+
 void LogFileOutput::set_file_name_parameters(jlong vm_start_time) {
   int res = jio_snprintf(_pid_str, sizeof(_pid_str), "%d", os::current_process_id());
   assert(res > 0, "PID buffer too small");
@@ -234,6 +242,7 @@
     _file_count_max_digits = number_of_digits(_file_count - 1);
     _archive_name_len = 2 + strlen(_file_name) + _file_count_max_digits;
     _archive_name = NEW_C_HEAP_ARRAY(char, _archive_name_len, mtLogging);
+    _archive_name[0] = 0;
   }
 
   log_trace(logging)("Initializing logging to file '%s' (filecount: %u"
--- a/src/hotspot/share/logging/logFileOutput.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/logging/logFileOutput.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -92,6 +92,7 @@
     return _name;
   }
 
+  const char* cur_log_file_name();
   static const char* const Prefix;
   static void set_file_name_parameters(jlong start_time);
 };
--- a/src/hotspot/share/memory/allocation.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/memory/allocation.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -132,6 +132,7 @@
   f(mtArguments,     "Arguments")                                                   \
   f(mtModule,        "Module")                                                      \
   f(mtSafepoint,     "Safepoint")                                                   \
+  f(mtSynchronizer,  "Synchronization")                                             \
   f(mtNone,          "Unknown")                                                     \
   //end
 
--- a/src/hotspot/share/memory/filemap.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/memory/filemap.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -45,6 +45,7 @@
 #include "prims/jvmtiExport.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/java.hpp"
+#include "runtime/mutexLocker.hpp"
 #include "runtime/os.inline.hpp"
 #include "runtime/vm_version.hpp"
 #include "services/memTracker.hpp"
@@ -501,6 +502,16 @@
   }
 
   _validating_shared_path_table = false;
+
+#if INCLUDE_JVMTI
+  if (_classpath_entries_for_jvmti != NULL) {
+    os::free(_classpath_entries_for_jvmti);
+  }
+  size_t sz = sizeof(ClassPathEntry*) *  _shared_path_table_size;
+  _classpath_entries_for_jvmti = (ClassPathEntry**)os::malloc(sz, mtClass);
+  memset(_classpath_entries_for_jvmti, 0, sz);
+#endif
+
   return true;
 }
 
@@ -550,7 +561,7 @@
 // Read the FileMapInfo information from the file.
 bool FileMapInfo::open_for_read() {
   _full_path = Arguments::GetSharedArchivePath();
-  int fd = open(_full_path, O_RDONLY | O_BINARY, 0);
+  int fd = os::open(_full_path, O_RDONLY | O_BINARY, 0);
   if (fd < 0) {
     if (errno == ENOENT) {
       // Not locating the shared archive is ok.
@@ -585,7 +596,7 @@
   // Use remove() to delete the existing file because, on Unix, this will
   // allow processes that have it open continued access to the file.
   remove(_full_path);
-  int fd = open(_full_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0444);
+  int fd = os::open(_full_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0444);
   if (fd < 0) {
     fail_stop("Unable to create shared archive file %s: (%s).", _full_path,
               os::strerror(errno));
@@ -1440,3 +1451,57 @@
     fail_stop("%s", msg);
   }
 }
+
+#if INCLUDE_JVMTI
+ClassPathEntry** FileMapInfo::_classpath_entries_for_jvmti = NULL;
+
+ClassPathEntry* FileMapInfo::get_classpath_entry_for_jvmti(int i, TRAPS) {
+  ClassPathEntry* ent = _classpath_entries_for_jvmti[i];
+  if (ent == NULL) {
+    if (i == 0) {
+      ent = ClassLoader:: get_jrt_entry();
+      assert(ent != NULL, "must be");
+    } else {
+      SharedClassPathEntry* scpe = shared_path(i);
+      assert(scpe->is_jar(), "must be"); // other types of scpe will not produce archived classes
+
+      const char* path = scpe->name();
+      struct stat st;
+      if (os::stat(path, &st) != 0) {
+        char *msg = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, strlen(path) + 128); ;
+        jio_snprintf(msg, strlen(path) + 127, "error in opening JAR file %s", path);
+        THROW_MSG_(vmSymbols::java_io_IOException(), msg, NULL);
+      } else {
+        ent = ClassLoader::create_class_path_entry(path, &st, /*throw_exception=*/true, false, CHECK_NULL);
+      }
+    }
+
+    MutexLocker mu(CDSClassFileStream_lock, THREAD);
+    if (_classpath_entries_for_jvmti[i] == NULL) {
+      _classpath_entries_for_jvmti[i] = ent;
+    } else {
+      // Another thread has beat me to creating this entry
+      delete ent;
+      ent = _classpath_entries_for_jvmti[i];
+    }
+  }
+
+  return ent;
+}
+
+ClassFileStream* FileMapInfo::open_stream_for_jvmti(InstanceKlass* ik, TRAPS) {
+  int path_index = ik->shared_classpath_index();
+  assert(path_index >= 0, "should be called for shared built-in classes only");
+  assert(path_index < (int)_shared_path_table_size, "sanity");
+
+  ClassPathEntry* cpe = get_classpath_entry_for_jvmti(path_index, CHECK_NULL);
+  assert(cpe != NULL, "must be");
+
+  Symbol* name = ik->name();
+  const char* const class_name = name->as_C_string();
+  const char* const file_name = ClassLoader::file_name_for_class_name(class_name,
+                                                                      name->utf8_length());
+  return cpe->open_stream(file_name, THREAD);
+}
+
+#endif
--- a/src/hotspot/share/memory/filemap.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/memory/filemap.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -302,6 +302,10 @@
   bool validate_shared_path_table();
   static void update_shared_classpath(ClassPathEntry *cpe, SharedClassPathEntry* ent, TRAPS);
 
+#if INCLUDE_JVMTI
+  static ClassFileStream* open_stream_for_jvmti(InstanceKlass* ik, TRAPS);
+#endif
+
   static SharedClassPathEntry* shared_path(int index) {
     if (index < 0) {
       return NULL;
@@ -348,6 +352,11 @@
   }
 
   address decode_start_address(CDSFileMapRegion* spc, bool with_current_oop_encoding_mode);
+
+#if INCLUDE_JVMTI
+  static ClassPathEntry** _classpath_entries_for_jvmti;
+  static ClassPathEntry* get_classpath_entry_for_jvmti(int i, TRAPS);
+#endif
 };
 
 #endif // SHARE_MEMORY_FILEMAP_HPP
--- a/src/hotspot/share/memory/iterator.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/memory/iterator.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -263,6 +263,11 @@
   virtual void do_code_blob(CodeBlob* cb);
 };
 
+class NMethodClosure : public Closure {
+ public:
+  virtual void do_nmethod(nmethod* n) = 0;
+};
+
 // MonitorClosure is used for iterating over monitors in the monitors cache
 
 class ObjectMonitor;
--- a/src/hotspot/share/memory/metaspaceShared.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/memory/metaspaceShared.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -207,7 +207,7 @@
 };
 
 
-DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md"), _od_region("od");
+DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _md_region("md");
 size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
 
 char* MetaspaceShared::misc_code_space_alloc(size_t num_bytes) {
@@ -598,23 +598,6 @@
   }
 }
 
-static void relocate_cached_class_file() {
-  for (int i = 0; i < _global_klass_objects->length(); i++) {
-    Klass* k = _global_klass_objects->at(i);
-    if (k->is_instance_klass()) {
-      InstanceKlass* ik = InstanceKlass::cast(k);
-      JvmtiCachedClassFileData* p = ik->get_archived_class_data();
-      if (p != NULL) {
-        int size = offset_of(JvmtiCachedClassFileData, data) + p->length;
-        JvmtiCachedClassFileData* q = (JvmtiCachedClassFileData*)_od_region.allocate(size);
-        q->length = p->length;
-        memcpy(q->data, p->data, p->length);
-        ik->set_archived_class_data(q);
-      }
-    }
-  }
-}
-
 // Objects of the Metadata types (such as Klass and ConstantPool) have C++ vtables.
 // (In GCC this is the field <Type>::_vptr, i.e., first word in the object.)
 //
@@ -1438,15 +1421,11 @@
 
   char* vtbl_list = _md_region.top();
   MetaspaceShared::allocate_cpp_vtable_clones();
-  _md_region.pack(&_od_region);
+  _md_region.pack();
 
-  // Relocate the archived class file data into the od region
-  relocate_cached_class_file();
-  _od_region.pack();
-
-  // The 5 core spaces are allocated consecutively mc->rw->ro->md->od, so there total size
+  // The 4 core spaces are allocated consecutively mc->rw->ro->md, so there total size
   // is just the spaces between the two ends.
-  size_t core_spaces_size = _od_region.end() - _mc_region.base();
+  size_t core_spaces_size = _md_region.end() - _mc_region.base();
   assert(core_spaces_size == (size_t)align_up(core_spaces_size, Metaspace::reserve_alignment()),
          "should already be aligned");
 
@@ -1488,7 +1467,6 @@
     write_region(mapinfo, MetaspaceShared::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
     write_region(mapinfo, MetaspaceShared::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
     write_region(mapinfo, MetaspaceShared::md, &_md_region, /*read_only=*/false,/*allow_exec=*/false);
-    write_region(mapinfo, MetaspaceShared::od, &_od_region, /*read_only=*/true, /*allow_exec=*/false);
 
     _total_closed_archive_region_size = mapinfo->write_archive_heap_regions(
                                         _closed_archive_heap_regions,
@@ -1535,12 +1513,10 @@
   // Print statistics of all the regions
   const size_t total_reserved = _ro_region.reserved()  + _rw_region.reserved() +
                                 _mc_region.reserved()  + _md_region.reserved() +
-                                _od_region.reserved()  +
                                 _total_closed_archive_region_size +
                                 _total_open_archive_region_size;
   const size_t total_bytes = _ro_region.used()  + _rw_region.used() +
                              _mc_region.used()  + _md_region.used() +
-                             _od_region.used()  +
                              _total_closed_archive_region_size +
                              _total_open_archive_region_size;
   const double total_u_perc = percent_of(total_bytes, total_reserved);
@@ -1549,7 +1525,6 @@
   _rw_region.print(total_reserved);
   _ro_region.print(total_reserved);
   _md_region.print(total_reserved);
-  _od_region.print(total_reserved);
   print_heap_region_stats(_closed_archive_heap_regions, "ca", total_reserved);
   print_heap_region_stats(_open_archive_heap_regions, "oa", total_reserved);
 
@@ -1931,33 +1906,30 @@
   char* rw_base = NULL; char* rw_top;
   char* mc_base = NULL; char* mc_top;
   char* md_base = NULL; char* md_top;
-  char* od_base = NULL; char* od_top;
 
   // Map each shared region
   if ((mc_base = mapinfo->map_region(mc, &mc_top)) != NULL &&
       (rw_base = mapinfo->map_region(rw, &rw_top)) != NULL &&
       (ro_base = mapinfo->map_region(ro, &ro_top)) != NULL &&
       (md_base = mapinfo->map_region(md, &md_top)) != NULL &&
-      (od_base = mapinfo->map_region(od, &od_top)) != NULL &&
       (image_alignment == (size_t)os::vm_allocation_granularity()) &&
       mapinfo->validate_shared_path_table()) {
     // Success -- set up MetaspaceObj::_shared_metaspace_{base,top} for
     // fast checking in MetaspaceShared::is_in_shared_metaspace() and
     // MetaspaceObj::is_shared().
     //
-    // We require that mc->rw->ro->md->od to be laid out consecutively, with no
+    // We require that mc->rw->ro->md to be laid out consecutively, with no
     // gaps between them. That way, we can ensure that the OS won't be able to
     // allocate any new memory spaces inside _shared_metaspace_{base,top}, which
     // would mess up the simple comparision in MetaspaceShared::is_in_shared_metaspace().
-    assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base && mc_base < od_base, "must be");
-    assert(od_top  > ro_top  && od_top  > rw_top  && od_top  > md_top  && od_top  > mc_top , "must be");
+    assert(mc_base < ro_base && mc_base < rw_base && mc_base < md_base, "must be");
+    assert(md_top  > ro_top  && md_top  > rw_top  && md_top  > mc_top , "must be");
     assert(mc_top == rw_base, "must be");
     assert(rw_top == ro_base, "must be");
     assert(ro_top == md_base, "must be");
-    assert(md_top == od_base, "must be");
 
     _core_spaces_size = mapinfo->core_spaces_size();
-    MetaspaceObj::set_shared_metaspace_range((void*)mc_base, (void*)od_top);
+    MetaspaceObj::set_shared_metaspace_range((void*)mc_base, (void*)md_top);
     return true;
   } else {
     // If there was a failure in mapping any of the spaces, unmap the ones
@@ -1966,7 +1938,6 @@
     if (rw_base != NULL) mapinfo->unmap_region(rw);
     if (mc_base != NULL) mapinfo->unmap_region(mc);
     if (md_base != NULL) mapinfo->unmap_region(md);
-    if (od_base != NULL) mapinfo->unmap_region(od);
 #ifndef _WINDOWS
     // Release the entire mapped region
     shared_rs.release();
@@ -2049,7 +2020,6 @@
   _rw_region.print_out_of_space_msg(name, needed_bytes);
   _ro_region.print_out_of_space_msg(name, needed_bytes);
   _md_region.print_out_of_space_msg(name, needed_bytes);
-  _od_region.print_out_of_space_msg(name, needed_bytes);
 
   vm_exit_during_initialization(err_msg("Unable to allocate from '%s' region", name),
                                 "Please reduce the number of shared classes.");
--- a/src/hotspot/share/memory/metaspaceShared.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/memory/metaspaceShared.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -69,14 +69,10 @@
     ro = 2,  // read-only shared space in the heap
     md = 3,  // miscellaneous data for initializing tables, etc.
     num_core_spaces = 4, // number of non-string regions
-
-    // optional mapped spaces
-    // Currently it only contains class file data.
-    od = num_core_spaces,
-    num_non_heap_spaces = od + 1,
+    num_non_heap_spaces = 4,
 
     // mapped java heap regions
-    first_closed_archive_heap_region = od + 1,
+    first_closed_archive_heap_region = md + 1,
     max_closed_archive_heap_region = 2,
     last_closed_archive_heap_region = first_closed_archive_heap_region + max_closed_archive_heap_region - 1,
     first_open_archive_heap_region = last_closed_archive_heap_region + 1,
--- a/src/hotspot/share/oops/array.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/array.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -27,6 +27,7 @@
 
 #include "memory/allocation.hpp"
 #include "memory/metaspace.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/align.hpp"
 
 // Array for metadata allocation
@@ -121,8 +122,8 @@
   T*   adr_at(const int i)             { assert(i >= 0 && i< _length, "oob: 0 <= %d < %d", i, _length); return &_data[i]; }
   int  find(const T& x)                { return index_of(x); }
 
-  T at_acquire(const int which);
-  void release_at_put(int which, T contents);
+  T at_acquire(const int i)            { return OrderAccess::load_acquire(adr_at(i)); }
+  void release_at_put(int i, T x)      { OrderAccess::release_store(adr_at(i), x); }
 
   static int size(int length) {
     size_t bytes = align_up(byte_sizeof(length), BytesPerWord);
--- a/src/hotspot/share/oops/array.inline.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,37 +0,0 @@
-/*
- * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef SHARE_OOPS_ARRAY_INLINE_HPP
-#define SHARE_OOPS_ARRAY_INLINE_HPP
-
-#include "oops/array.hpp"
-#include "runtime/orderAccess.hpp"
-
-template <typename T>
-inline T Array<T>::at_acquire(const int which) { return OrderAccess::load_acquire(adr_at(which)); }
-
-template <typename T>
-inline void Array<T>::release_at_put(int which, T contents) { OrderAccess::release_store(adr_at(which), contents); }
-
-#endif // SHARE_OOPS_ARRAY_INLINE_HPP
--- a/src/hotspot/share/oops/constantPool.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/constantPool.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -39,7 +39,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
-#include "oops/array.inline.hpp"
+#include "oops/array.hpp"
 #include "oops/constantPool.inline.hpp"
 #include "oops/cpCache.inline.hpp"
 #include "oops/instanceKlass.hpp"
@@ -56,10 +56,6 @@
 #include "runtime/vframe.inline.hpp"
 #include "utilities/copy.hpp"
 
-constantTag ConstantPool::tag_at(int which) const { return (constantTag)tags()->at_acquire(which); }
-
-void ConstantPool::release_tag_at_put(int which, jbyte t) { tags()->release_at_put(which, t); }
-
 ConstantPool* ConstantPool::allocate(ClassLoaderData* loader_data, int length, TRAPS) {
   Array<u1>* tags = MetadataFactory::new_array<u1>(loader_data, length, 0, CHECK_NULL);
   int size = ConstantPool::size(length);
--- a/src/hotspot/share/oops/constantPool.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/constantPool.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -131,7 +131,7 @@
 
   void set_tags(Array<u1>* tags)               { _tags = tags; }
   void tag_at_put(int which, jbyte t)          { tags()->at_put(which, t); }
-  void release_tag_at_put(int which, jbyte t);
+  void release_tag_at_put(int which, jbyte t)  { tags()->release_at_put(which, t); }
 
   u1* tag_addr_at(int which) const             { return tags()->adr_at(which); }
 
@@ -379,7 +379,7 @@
 
   // Tag query
 
-  constantTag tag_at(int which) const;
+  constantTag tag_at(int which) const { return (constantTag)tags()->at_acquire(which); }
 
   // Fetching constants
 
--- a/src/hotspot/share/oops/cpCache.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/cpCache.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -591,7 +591,7 @@
 
 // a constant pool cache entry should never contain old or obsolete methods
 bool ConstantPoolCacheEntry::check_no_old_or_obsolete_entries() {
-  Method* m = get_interesting_method_entry(NULL);
+  Method* m = get_interesting_method_entry();
   // return false if m refers to a non-deleted old or obsolete method
   if (m != NULL) {
     assert(m->is_valid() && m->is_method(), "m is a valid method");
@@ -601,7 +601,7 @@
   }
 }
 
-Method* ConstantPoolCacheEntry::get_interesting_method_entry(Klass* k) {
+Method* ConstantPoolCacheEntry::get_interesting_method_entry() {
   if (!is_method_entry()) {
     // not a method entry so not interesting by default
     return NULL;
@@ -622,12 +622,9 @@
     }
   }
   assert(m != NULL && m->is_method(), "sanity check");
-  if (m == NULL || !m->is_method() || (k != NULL && m->method_holder() != k)) {
-    // robustness for above sanity checks or method is not in
-    // the interesting class
+  if (m == NULL || !m->is_method()) {
     return NULL;
   }
-  // the method is in the interesting class so the entry is interesting
   return m;
 }
 #endif // INCLUDE_JVMTI
@@ -777,10 +774,10 @@
 // RedefineClasses() API support:
 // If any entry of this ConstantPoolCache points to any of
 // old_methods, replace it with the corresponding new_method.
-void ConstantPoolCache::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
+void ConstantPoolCache::adjust_method_entries(bool * trace_name_printed) {
   for (int i = 0; i < length(); i++) {
     ConstantPoolCacheEntry* entry = entry_at(i);
-    Method* old_method = entry->get_interesting_method_entry(holder);
+    Method* old_method = entry->get_interesting_method_entry();
     if (old_method == NULL || !old_method->is_old()) {
       continue; // skip uninteresting entries
     }
@@ -789,11 +786,7 @@
       entry->initialize_entry(entry->constant_pool_index());
       continue;
     }
-    Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
-
-    assert(new_method != NULL, "method_with_idnum() should not be NULL");
-    assert(old_method != new_method, "sanity check");
-
+    Method* new_method = old_method->get_new_method();
     entry_at(i)->adjust_method_entry(old_method, new_method, trace_name_printed);
   }
 }
@@ -801,7 +794,7 @@
 // the constant pool cache should never contain old or obsolete methods
 bool ConstantPoolCache::check_no_old_or_obsolete_entries() {
   for (int i = 1; i < length(); i++) {
-    if (entry_at(i)->get_interesting_method_entry(NULL) != NULL &&
+    if (entry_at(i)->get_interesting_method_entry() != NULL &&
         !entry_at(i)->check_no_old_or_obsolete_entries()) {
       return false;
     }
@@ -811,7 +804,7 @@
 
 void ConstantPoolCache::dump_cache() {
   for (int i = 1; i < length(); i++) {
-    if (entry_at(i)->get_interesting_method_entry(NULL) != NULL) {
+    if (entry_at(i)->get_interesting_method_entry() != NULL) {
       entry_at(i)->print(tty, i);
     }
   }
--- a/src/hotspot/share/oops/cpCache.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/cpCache.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -376,7 +376,7 @@
   void adjust_method_entry(Method* old_method, Method* new_method,
          bool* trace_name_printed);
   bool check_no_old_or_obsolete_entries();
-  Method* get_interesting_method_entry(Klass* k);
+  Method* get_interesting_method_entry();
 #endif // INCLUDE_JVMTI
 
   // Debugging & Printing
@@ -496,7 +496,7 @@
   // trace_name_printed is set to true if the current call has
   // printed the klass name so that other routines in the adjust_*
   // group don't print the klass name.
-  void adjust_method_entries(InstanceKlass* holder, bool* trace_name_printed);
+  void adjust_method_entries(bool* trace_name_printed);
   bool check_no_old_or_obsolete_entries();
   void dump_cache();
 #endif // INCLUDE_JVMTI
--- a/src/hotspot/share/oops/generateOopMap.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/generateOopMap.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -27,6 +27,7 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/allocation.inline.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/generateOopMap.hpp"
 #include "oops/oop.inline.hpp"
 #include "oops/symbol.hpp"
--- a/src/hotspot/share/oops/instanceKlass.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/instanceKlass.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -53,6 +53,7 @@
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/fieldStreams.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/instanceClassLoaderKlass.hpp"
 #include "oops/instanceKlass.inline.hpp"
 #include "oops/instanceMirrorKlass.hpp"
@@ -182,8 +183,14 @@
       if (name == k->name()) {
         log_trace(class, nestmates)("- Found it at nest_members[%d] => cp[%d]", i, cp_index);
 
-        // names match so check actual klass - this may trigger class loading if
-        // it doesn't match (but that should be impossible)
+        // Names match so check actual klass - this may trigger class loading if
+        // it doesn't match (though that should be impossible). But to be safe we
+        // have to check for a compiler thread executing here.
+        if (!THREAD->can_call_java() && !_constants->tag_at(cp_index).is_klass()) {
+          log_trace(class, nestmates)("- validation required resolution in an unsuitable thread");
+          return false;
+        }
+
         Klass* k2 = _constants->klass_at(cp_index, CHECK_false);
         if (k2 == k) {
           log_trace(class, nestmates)("- class is listed as a nest member");
@@ -295,7 +302,7 @@
            error);
       }
 
-      if (validationException != NULL) {
+      if (validationException != NULL && THREAD->can_call_java()) {
         ResourceMark rm(THREAD);
         Exceptions::fthrow(THREAD_AND_LOCATION,
                            validationException,
@@ -2346,6 +2353,7 @@
 #if INCLUDE_JVMTI
   guarantee(_breakpoints == NULL, "must be");
   guarantee(_previous_versions == NULL, "must be");
+  _cached_class_file = NULL;
 #endif
 
   _init_thread = NULL;
@@ -2502,7 +2510,7 @@
   }
 
   // deallocate the cached class file
-  if (_cached_class_file != NULL && !MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
+  if (_cached_class_file != NULL) {
     os::free(_cached_class_file);
     _cached_class_file = NULL;
   }
@@ -2909,22 +2917,18 @@
 // not yet in the vtable due to concurrent subclass define and superinterface
 // redefinition
 // Note: those in the vtable, should have been updated via adjust_method_entries
-void InstanceKlass::adjust_default_methods(InstanceKlass* holder, bool* trace_name_printed) {
+void InstanceKlass::adjust_default_methods(bool* trace_name_printed) {
   // search the default_methods for uses of either obsolete or EMCP methods
   if (default_methods() != NULL) {
     for (int index = 0; index < default_methods()->length(); index ++) {
       Method* old_method = default_methods()->at(index);
-      if (old_method == NULL || old_method->method_holder() != holder || !old_method->is_old()) {
+      if (old_method == NULL || !old_method->is_old()) {
         continue; // skip uninteresting entries
       }
       assert(!old_method->is_deleted(), "default methods may not be deleted");
-
-      Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
-
-      assert(new_method != NULL, "method_with_idnum() should not be NULL");
-      assert(old_method != new_method, "sanity check");
-
+      Method* new_method = old_method->get_new_method();
       default_methods()->at_put(index, new_method);
+
       if (log_is_enabled(Info, redefine, class, update)) {
         ResourceMark rm;
         if (!(*trace_name_printed)) {
@@ -3963,12 +3967,7 @@
 
 #if INCLUDE_JVMTI
 JvmtiCachedClassFileData* InstanceKlass::get_cached_class_file() {
-  if (MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
-    // Ignore the archived class stream data
-    return NULL;
-  } else {
-    return _cached_class_file;
-  }
+  return _cached_class_file;
 }
 
 jint InstanceKlass::get_cached_class_file_len() {
@@ -3978,19 +3977,4 @@
 unsigned char * InstanceKlass::get_cached_class_file_bytes() {
   return VM_RedefineClasses::get_cached_class_file_bytes(_cached_class_file);
 }
-
-#if INCLUDE_CDS
-JvmtiCachedClassFileData* InstanceKlass::get_archived_class_data() {
-  if (DumpSharedSpaces) {
-    return _cached_class_file;
-  } else {
-    assert(this->is_shared(), "class should be shared");
-    if (MetaspaceShared::is_in_shared_metaspace(_cached_class_file)) {
-      return _cached_class_file;
-    } else {
-      return NULL;
-    }
-  }
-}
 #endif
-#endif
--- a/src/hotspot/share/oops/instanceKlass.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/instanceKlass.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -847,14 +847,6 @@
   JvmtiCachedClassFieldMap* jvmti_cached_class_field_map() const {
     return _jvmti_cached_class_field_map;
   }
-
-#if INCLUDE_CDS
-  void set_archived_class_data(JvmtiCachedClassFileData* data) {
-    _cached_class_file = data;
-  }
-
-  JvmtiCachedClassFileData * get_archived_class_data();
-#endif // INCLUDE_CDS
 #else // INCLUDE_JVMTI
 
   static void purge_previous_versions(InstanceKlass* ik) { return; };
@@ -1149,7 +1141,7 @@
   Method* method_at_itable(Klass* holder, int index, TRAPS);
 
 #if INCLUDE_JVMTI
-  void adjust_default_methods(InstanceKlass* holder, bool* trace_name_printed);
+  void adjust_default_methods(bool* trace_name_printed);
 #endif // INCLUDE_JVMTI
 
   void clean_weak_instanceklass_links();
--- a/src/hotspot/share/oops/klassVtable.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/klassVtable.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -942,21 +942,18 @@
 }
 
 // search the vtable for uses of either obsolete or EMCP methods
-void klassVtable::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
+void klassVtable::adjust_method_entries(bool * trace_name_printed) {
   int prn_enabled = 0;
   for (int index = 0; index < length(); index++) {
     Method* old_method = unchecked_method_at(index);
-    if (old_method == NULL || old_method->method_holder() != holder || !old_method->is_old()) {
+    if (old_method == NULL || !old_method->is_old()) {
       continue; // skip uninteresting entries
     }
     assert(!old_method->is_deleted(), "vtable methods may not be deleted");
 
-    Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
+    Method* new_method = old_method->get_new_method();
+    put_method_at(new_method, index);
 
-    assert(new_method != NULL, "method_with_idnum() should not be NULL");
-    assert(old_method != new_method, "sanity check");
-
-    put_method_at(new_method, index);
     // For default methods, need to update the _default_methods array
     // which can only have one method entry for a given signature
     bool updated_default = false;
@@ -1272,21 +1269,16 @@
 
 #if INCLUDE_JVMTI
 // search the itable for uses of either obsolete or EMCP methods
-void klassItable::adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed) {
+void klassItable::adjust_method_entries(bool * trace_name_printed) {
 
   itableMethodEntry* ime = method_entry(0);
   for (int i = 0; i < _size_method_table; i++, ime++) {
     Method* old_method = ime->method();
-    if (old_method == NULL || old_method->method_holder() != holder || !old_method->is_old()) {
+    if (old_method == NULL || !old_method->is_old()) {
       continue; // skip uninteresting entries
     }
     assert(!old_method->is_deleted(), "itable methods may not be deleted");
-
-    Method* new_method = holder->method_with_idnum(old_method->orig_method_idnum());
-
-    assert(new_method != NULL, "method_with_idnum() should not be NULL");
-    assert(old_method != new_method, "sanity check");
-
+    Method* new_method = old_method->get_new_method();
     ime->initialize(new_method);
 
     if (log_is_enabled(Info, redefine, class, update)) {
--- a/src/hotspot/share/oops/klassVtable.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/klassVtable.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -103,7 +103,7 @@
   // printed the klass name so that other routines in the adjust_*
   // group don't print the klass name.
   bool adjust_default_method(int vtable_index, Method* old_method, Method* new_method);
-  void adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed);
+  void adjust_method_entries(bool* trace_name_printed);
   bool check_no_old_or_obsolete_entries();
   void dump_vtable();
 #endif // INCLUDE_JVMTI
@@ -322,7 +322,7 @@
   // trace_name_printed is set to true if the current call has
   // printed the klass name so that other routines in the adjust_*
   // group don't print the klass name.
-  void adjust_method_entries(InstanceKlass* holder, bool * trace_name_printed);
+  void adjust_method_entries(bool* trace_name_printed);
   bool check_no_old_or_obsolete_entries();
   void dump_itable();
 #endif // INCLUDE_JVMTI
--- a/src/hotspot/share/oops/method.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/method.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -42,6 +42,7 @@
 #include "memory/oopFactory.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/constMethod.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/method.inline.hpp"
 #include "oops/methodData.hpp"
 #include "oops/objArrayOop.inline.hpp"
--- a/src/hotspot/share/oops/method.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/method.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -974,6 +974,15 @@
   // Deallocation function for redefine classes or if an error occurs
   void deallocate_contents(ClassLoaderData* loader_data);
 
+  Method* get_new_method() const {
+    InstanceKlass* holder = method_holder();
+    Method* new_method = holder->method_with_idnum(orig_method_idnum());
+
+    assert(new_method != NULL, "method_with_idnum() should not be NULL");
+    assert(this != new_method, "sanity check");
+    return new_method;
+  }
+
   // Printing
 #ifndef PRODUCT
   void print_on(outputStream* st) const;
--- a/src/hotspot/share/oops/oopHandle.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/oopHandle.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -36,6 +36,7 @@
 // future uses for read barriers.
 
 class OopHandle {
+  friend class VMStructs;
 private:
   oop* _obj;
 
--- a/src/hotspot/share/oops/reflectionAccessorImplKlassHelper.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/oops/reflectionAccessorImplKlassHelper.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -26,6 +26,7 @@
 #include "precompiled.hpp"
 #include "classfile/systemDictionary.hpp"
 #include "memory/resourceArea.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/reflectionAccessorImplKlassHelper.hpp"
 #include "utilities/constantTag.hpp"
 #include "utilities/debug.hpp"
--- a/src/hotspot/share/opto/escape.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/opto/escape.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1726,6 +1726,18 @@
     // access its field since the field value is unknown after it.
     //
     Node* n = field->ideal_node();
+
+    // Test for an unsafe access that was parsed as maybe off heap
+    // (with a CheckCastPP to raw memory).
+    assert(n->is_AddP(), "expect an address computation");
+    if (n->in(AddPNode::Base)->is_top() &&
+        n->in(AddPNode::Address)->Opcode() == Op_CheckCastPP) {
+      assert(n->in(AddPNode::Address)->bottom_type()->isa_rawptr(), "raw address so raw cast expected");
+      assert(_igvn->type(n->in(AddPNode::Address)->in(1))->isa_oopptr(), "cast pattern at unsafe access expected");
+      jobj->set_scalar_replaceable(false);
+      return;
+    }
+
     for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
       Node* u = n->fast_out(i);
       if (u->is_LoadStore() || (u->is_Mem() && u->as_Mem()->is_mismatched_access())) {
--- a/src/hotspot/share/prims/jni.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/prims/jni.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -3953,9 +3953,6 @@
     }
 #endif
 
-    // Tracks the time application was running before GC
-    RuntimeService::record_application_start();
-
     // Notify JVMTI
     if (JvmtiExport::should_post_thread_life()) {
        JvmtiExport::post_thread_start(thread);
--- a/src/hotspot/share/prims/jvm.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/prims/jvm.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -45,6 +45,7 @@
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
 #include "oops/access.inline.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/fieldStreams.hpp"
 #include "oops/instanceKlass.hpp"
 #include "oops/method.hpp"
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -39,6 +39,7 @@
 #include "memory/metaspaceShared.hpp"
 #include "memory/resourceArea.hpp"
 #include "memory/universe.hpp"
+#include "oops/constantPool.hpp"
 #include "oops/fieldStreams.hpp"
 #include "oops/klassVtable.hpp"
 #include "oops/oop.inline.hpp"
@@ -61,10 +62,11 @@
 Method**  VM_RedefineClasses::_matching_new_methods = NULL;
 Method**  VM_RedefineClasses::_deleted_methods      = NULL;
 Method**  VM_RedefineClasses::_added_methods        = NULL;
-int         VM_RedefineClasses::_matching_methods_length = 0;
-int         VM_RedefineClasses::_deleted_methods_length  = 0;
-int         VM_RedefineClasses::_added_methods_length    = 0;
-Klass*      VM_RedefineClasses::_the_class = NULL;
+int       VM_RedefineClasses::_matching_methods_length = 0;
+int       VM_RedefineClasses::_deleted_methods_length  = 0;
+int       VM_RedefineClasses::_added_methods_length    = 0;
+bool      VM_RedefineClasses::_has_redefined_Object = false;
+bool      VM_RedefineClasses::_has_null_class_loader = false;
 
 
 VM_RedefineClasses::VM_RedefineClasses(jint class_count,
@@ -75,6 +77,9 @@
   _class_load_kind = class_load_kind;
   _any_class_has_resolved_methods = false;
   _res = JVMTI_ERROR_NONE;
+  _the_class = NULL;
+  _has_redefined_Object = false;
+  _has_null_class_loader = false;
 }
 
 static inline InstanceKlass* get_ik(jclass def) {
@@ -213,11 +218,12 @@
   // Flush all compiled code that depends on the classes redefined.
   flush_dependent_code();
 
-  // Clean out MethodData pointing to old Method*
+  // Adjust constantpool caches and vtables for all classes
+  // that reference methods of the evolved classes.
   // Have to do this after all classes are redefined and all methods that
   // are redefined are marked as old.
-  MethodDataCleaner clean_weak_method_links;
-  ClassLoaderDataGraph::classes_do(&clean_weak_method_links);
+  AdjustAndCleanMetadata adjust_and_clean_metadata(thread);
+  ClassLoaderDataGraph::classes_do(&adjust_and_clean_metadata);
 
   // JSR-292 support
   if (_any_class_has_resolved_methods) {
@@ -3414,25 +3420,35 @@
 // Unevolving classes may point to methods of the_class directly
 // from their constant pool caches, itables, and/or vtables. We
 // use the ClassLoaderDataGraph::classes_do() facility and this helper
-// to fix up these pointers.
+// to fix up these pointers.  MethodData also points to old methods and
+// must be cleaned.
 
 // Adjust cpools and vtables closure
-void VM_RedefineClasses::AdjustCpoolCacheAndVtable::do_klass(Klass* k) {
+void VM_RedefineClasses::AdjustAndCleanMetadata::do_klass(Klass* k) {
 
   // This is a very busy routine. We don't want too much tracing
   // printed out.
   bool trace_name_printed = false;
-  InstanceKlass *the_class = InstanceKlass::cast(_the_class);
 
   // If the class being redefined is java.lang.Object, we need to fix all
   // array class vtables also
-  if (k->is_array_klass() && _the_class == SystemDictionary::Object_klass()) {
-    k->vtable().adjust_method_entries(the_class, &trace_name_printed);
+  if (k->is_array_klass() && _has_redefined_Object) {
+    k->vtable().adjust_method_entries(&trace_name_printed);
 
   } else if (k->is_instance_klass()) {
     HandleMark hm(_thread);
     InstanceKlass *ik = InstanceKlass::cast(k);
 
+    // Clean MethodData of this class's methods so they don't refer to
+    // old methods that are no longer running.
+    Array<Method*>* methods = ik->methods();
+    int num_methods = methods->length();
+    for (int index = 0; index < num_methods; ++index) {
+      if (methods->at(index)->method_data() != NULL) {
+        methods->at(index)->method_data()->clean_weak_method_links();
+      }
+    }
+
     // HotSpot specific optimization! HotSpot does not currently
     // support delegation from the bootstrap class loader to a
     // user-defined class loader. This means that if the bootstrap
@@ -3445,57 +3461,29 @@
     // If the current class being redefined has a user-defined class
     // loader as its defining class loader, then we can skip all
     // classes loaded by the bootstrap class loader.
-    bool is_user_defined = (_the_class->class_loader() != NULL);
-    if (is_user_defined && ik->class_loader() == NULL) {
+    if (!_has_null_class_loader && ik->class_loader() == NULL) {
       return;
     }
 
-    // Fix the vtable embedded in the_class and subclasses of the_class,
-    // if one exists. We discard scratch_class and we don't keep an
-    // InstanceKlass around to hold obsolete methods so we don't have
-    // any other InstanceKlass embedded vtables to update. The vtable
-    // holds the Method*s for virtual (but not final) methods.
-    // Default methods, or concrete methods in interfaces are stored
-    // in the vtable, so if an interface changes we need to check
-    // adjust_method_entries() for every InstanceKlass, which will also
-    // adjust the default method vtable indices.
-    // We also need to adjust any default method entries that are
-    // not yet in the vtable, because the vtable setup is in progress.
-    // This must be done after we adjust the default_methods and
-    // default_vtable_indices for methods already in the vtable.
-    // If redefining Unsafe, walk all the vtables looking for entries.
-    if (ik->vtable_length() > 0 && (_the_class->is_interface()
-        || _the_class == SystemDictionary::internal_Unsafe_klass()
-        || ik->is_subtype_of(_the_class))) {
-      // ik->vtable() creates a wrapper object; rm cleans it up
-      ResourceMark rm(_thread);
-
-      ik->vtable().adjust_method_entries(the_class, &trace_name_printed);
-      ik->adjust_default_methods(the_class, &trace_name_printed);
+    // Adjust all vtables, default methods and itables, to clean out old methods.
+    ResourceMark rm(_thread);
+    if (ik->vtable_length() > 0) {
+      ik->vtable().adjust_method_entries(&trace_name_printed);
+      ik->adjust_default_methods(&trace_name_printed);
     }
 
-    // If the current class has an itable and we are either redefining an
-    // interface or if the current class is a subclass of the_class, then
-    // we potentially have to fix the itable. If we are redefining an
-    // interface, then we have to call adjust_method_entries() for
-    // every InstanceKlass that has an itable since there isn't a
-    // subclass relationship between an interface and an InstanceKlass.
-    // If redefining Unsafe, walk all the itables looking for entries.
-    if (ik->itable_length() > 0 && (_the_class->is_interface()
-        || _the_class == SystemDictionary::internal_Unsafe_klass()
-        || ik->is_subclass_of(_the_class))) {
-      ResourceMark rm(_thread);
-      ik->itable().adjust_method_entries(the_class, &trace_name_printed);
+    if (ik->itable_length() > 0) {
+      ik->itable().adjust_method_entries(&trace_name_printed);
     }
 
     // The constant pools in other classes (other_cp) can refer to
-    // methods in the_class. We have to update method information in
+    // old methods.  We have to update method information in
     // other_cp's cache. If other_cp has a previous version, then we
     // have to repeat the process for each previous version. The
     // constant pool cache holds the Method*s for non-virtual
     // methods and for virtual, final methods.
     //
-    // Special case: if the current class is the_class, then new_cp
+    // Special case: if the current class being redefined, then new_cp
     // has already been attached to the_class and old_cp has already
     // been added as a previous version. The new_cp doesn't have any
     // cached references to old methods so it doesn't need to be
@@ -3504,12 +3492,12 @@
     constantPoolHandle other_cp;
     ConstantPoolCache* cp_cache;
 
-    if (ik != _the_class) {
+    if (!ik->is_being_redefined()) {
       // this klass' constant pool cache may need adjustment
       other_cp = constantPoolHandle(ik->constants());
       cp_cache = other_cp->cache();
       if (cp_cache != NULL) {
-        cp_cache->adjust_method_entries(the_class, &trace_name_printed);
+        cp_cache->adjust_method_entries(&trace_name_printed);
       }
     }
 
@@ -3519,23 +3507,7 @@
          pv_node = pv_node->previous_versions()) {
       cp_cache = pv_node->constants()->cache();
       if (cp_cache != NULL) {
-        cp_cache->adjust_method_entries(pv_node, &trace_name_printed);
-      }
-    }
-  }
-}
-
-// Clean method data for this class
-void VM_RedefineClasses::MethodDataCleaner::do_klass(Klass* k) {
-  if (k->is_instance_klass()) {
-    InstanceKlass *ik = InstanceKlass::cast(k);
-    // Clean MethodData of this class's methods so they don't refer to
-    // old methods that are no longer running.
-    Array<Method*>* methods = ik->methods();
-    int num_methods = methods->length();
-    for (int index = 0; index < num_methods; ++index) {
-      if (methods->at(index)->method_data() != NULL) {
-        methods->at(index)->method_data()->clean_weak_method_links();
+        cp_cache->adjust_method_entries(&trace_name_printed);
       }
     }
   }
@@ -3971,6 +3943,10 @@
 
   InstanceKlass* the_class = get_ik(the_jclass);
 
+  // Set some flags to control and optimize adjusting method entries
+  _has_redefined_Object |= the_class == SystemDictionary::Object_klass();
+  _has_null_class_loader |= the_class->class_loader() == NULL;
+
   // Remove all breakpoints in methods of this class
   JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints();
   jvmti_breakpoints.clearall_in_class_at_safepoint(the_class);
@@ -4192,11 +4168,6 @@
     _timer_rsc_phase2.start();
   }
 
-  // Adjust constantpool caches and vtables for all classes
-  // that reference methods of the evolved class.
-  AdjustCpoolCacheAndVtable adjust_cpool_cache_and_vtable(THREAD);
-  ClassLoaderDataGraph::classes_do(&adjust_cpool_cache_and_vtable);
-
   if (the_class->oop_map_cache() != NULL) {
     // Flush references to any obsolete methods from the oop map cache
     // so that obsolete methods are not pinned.
--- a/src/hotspot/share/prims/jvmtiRedefineClasses.hpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/prims/jvmtiRedefineClasses.hpp	Tue Feb 26 11:17:12 2019 +0530
@@ -338,20 +338,22 @@
 class VM_RedefineClasses: public VM_Operation {
  private:
   // These static fields are needed by ClassLoaderDataGraph::classes_do()
-  // facility and the AdjustCpoolCacheAndVtable helper:
+  // facility and the CheckClass and AdjustAndCleanMetadata helpers.
   static Array<Method*>* _old_methods;
   static Array<Method*>* _new_methods;
-  static Method**      _matching_old_methods;
-  static Method**      _matching_new_methods;
-  static Method**      _deleted_methods;
-  static Method**      _added_methods;
+  static Method**        _matching_old_methods;
+  static Method**        _matching_new_methods;
+  static Method**        _deleted_methods;
+  static Method**        _added_methods;
   static int             _matching_methods_length;
   static int             _deleted_methods_length;
   static int             _added_methods_length;
-  static Klass*          _the_class;
+  static bool            _has_redefined_Object;
+  static bool            _has_null_class_loader;
 
   // The instance fields are used to pass information from
   // doit_prologue() to doit() and doit_epilogue().
+  Klass*                      _the_class;
   jint                        _class_count;
   const jvmtiClassDefinition *_class_defs;  // ptr to _class_count defs
 
@@ -513,20 +515,14 @@
   // Unevolving classes may point to methods of the_class directly
   // from their constant pool caches, itables, and/or vtables. We
   // use the ClassLoaderDataGraph::classes_do() facility and this helper
-  // to fix up these pointers.
-  class AdjustCpoolCacheAndVtable : public KlassClosure {
+  // to fix up these pointers and clean MethodData out.
+  class AdjustAndCleanMetadata : public KlassClosure {
     Thread* _thread;
    public:
-    AdjustCpoolCacheAndVtable(Thread* t) : _thread(t) {}
+    AdjustAndCleanMetadata(Thread* t) : _thread(t) {}
     void do_klass(Klass* k);
   };
 
-  // Clean MethodData out
-  class MethodDataCleaner : public KlassClosure {
-   public:
-    MethodDataCleaner() {}
-    void do_klass(Klass* k);
-  };
  public:
   VM_RedefineClasses(jint class_count,
                      const jvmtiClassDefinition *class_defs,
--- a/src/hotspot/share/prims/resolvedMethodTable.cpp	Tue Feb 26 11:08:07 2019 +0530
+++ b/src/hotspot/share/prims/resolvedMethodTable.cpp	Tue Feb 26 11:17:12 2019 +0530
@@ -1,5 +1,5 @@