OpenJDK / amber / amber
changeset 55709:eab05125102c patterns
Automatic merge with patterns-stage-1
author | mcimadamore |
---|---|
date | Thu, 04 Apr 2019 22:07:49 +0200 |
parents | 3a3ab2f576c7 981bafcc57e1 |
children | 01c894e7fb2c |
files | src/jdk.compiler/share/classes/com/sun/tools/javac/main/JavaCompiler.java src/jdk.compiler/share/classes/com/sun/tools/javac/resources/compiler.properties src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/AllClassesFrameWriter.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/FrameOutputWriter.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/ModuleFrameWriter.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/ModuleIndexFrameWriter.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/ModulePackageIndexFrameWriter.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/PackageFrameWriter.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/PackageIndexFrameWriter.java test/hotspot/jtreg/applications/ctw/modules/jdk_incubator_httpclient.java test/hotspot/jtreg/applications/ctw/modules/jdk_packager.java test/hotspot/jtreg/applications/ctw/modules/jdk_packager_services.java test/jdk/sun/security/ssl/rsa/BrokenRSAPrivateCrtKey.java test/langtools/jdk/javadoc/doclet/AccessFrameTitle/AccessFrameTitle.java test/langtools/jdk/javadoc/doclet/AccessFrameTitle/p1/C1.java test/langtools/jdk/javadoc/doclet/AccessFrameTitle/p2/C2.java test/langtools/jdk/javadoc/doclet/PackagesHeader/PackagesHeader.java test/langtools/jdk/javadoc/doclet/PackagesHeader/p1/C1.java test/langtools/jdk/javadoc/doclet/PackagesHeader/p2/C2.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/TestClassDocCatalog.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg1/EmptyAnnotation.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg1/EmptyClass.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg1/EmptyEnum.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg1/EmptyError.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg1/EmptyException.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg1/EmptyInterface.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg2/EmptyAnnotation.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg2/EmptyClass.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg2/EmptyEnum.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg2/EmptyError.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg2/EmptyException.java test/langtools/jdk/javadoc/doclet/testClassDocCatalog/pkg2/EmptyInterface.java test/langtools/jdk/javadoc/doclet/testFramesNoFrames/TestFramesNoFrames.java test/langtools/jdk/javadoc/doclet/testWindowTitle/TestWindowTitle.java test/langtools/jdk/javadoc/doclet/testWindowTitle/p1/C1.java test/langtools/jdk/javadoc/doclet/testWindowTitle/p2/C2.java |
diffstat | 543 files changed, 6552 insertions(+), 9092 deletions(-) [+] |
line wrap: on
line diff
--- a/.hgtags Thu Mar 28 22:08:15 2019 +0100 +++ b/.hgtags Thu Apr 04 22:07:49 2019 +0200 @@ -552,3 +552,4 @@ b67884871b5fff79c5ef3eb8ac74dd48d71ea9b1 jdk-12-ga 83cace4142c8563b6a921787db02388e1bc48d01 jdk-13+13 46cf212cdccaf4fb064d913b12004007d3322b67 jdk-13+14 +f855ec13aa2501ae184c8b3e0626a8cec9966116 jdk-13+15
--- a/make/Bundles.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/Bundles.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ include MakeBase.gmk PRODUCT_TARGETS := +LEGACY_TARGETS := TEST_TARGETS := DOCS_TARGETS := @@ -71,7 +72,7 @@ $(BUNDLES_OUTPUTDIR)/$$($1_BUNDLE_NAME): $$($1_FILES) $$(call MakeTargetDir) - # If any of the files contain a space in the file name, CacheFind + # If any of the files contain a space in the file name, FindFiles # will have replaced it with ?. Tar does not accept that so need to # switch it back. $$(foreach d, $$($1_BASE_DIRS), \ @@ -146,24 +147,31 @@ # correct base directories. ifeq ($(call isTargetOs, macosx)+$(DEBUG_LEVEL), true+release) JDK_IMAGE_DIR := $(JDK_MACOSX_BUNDLE_DIR) + JRE_IMAGE_DIR := $(JRE_MACOSX_BUNDLE_DIR) JDK_IMAGE_HOMEDIR := $(JDK_MACOSX_CONTENTS_DIR)/Home + JRE_IMAGE_HOMEDIR := $(JRE_MACOSX_CONTENTS_DIR)/Home JDK_BUNDLE_SUBDIR := + JRE_BUNDLE_SUBDIR := else JDK_IMAGE_HOMEDIR := $(JDK_IMAGE_DIR) + JRE_IMAGE_HOMEDIR := $(JRE_IMAGE_DIR) JDK_BUNDLE_SUBDIR := jdk-$(VERSION_NUMBER) + JRE_BUNDLE_SUBDIR := jre-$(VERSION_NUMBER) ifneq ($(DEBUG_LEVEL), release) JDK_BUNDLE_SUBDIR := $(JDK_BUNDLE_SUBDIR)/$(DEBUG_LEVEL) + JRE_BUNDLE_SUBDIR := $(JRE_BUNDLE_SUBDIR)/$(DEBUG_LEVEL) endif endif ################################################################################ -ifneq ($(filter product-bundles, $(MAKECMDGOALS)), ) - $(eval $(call FillCacheFind, $(IMAGES_OUTPUTDIR))) +ifneq ($(filter product-bundles legacy-bundles, $(MAKECMDGOALS)), ) SYMBOLS_EXCLUDE_PATTERN := %.debuginfo %.diz %.pdb %.map - ALL_JDK_FILES := $(call CacheFind, $(JDK_IMAGE_DIR)) + # There may be files with spaces in the names, so use ShellFindFiles + # explicitly. + ALL_JDK_FILES := $(call ShellFindFiles, $(JDK_IMAGE_DIR)) # Create special filter rules when dealing with unzipped .dSYM directories on # macosx @@ -194,10 +202,26 @@ $(ALL_JDK_FILES) \ ) \ ) \ - $(call CacheFind, $(SYMBOLS_IMAGE_DIR)) + $(call FindFiles, $(SYMBOLS_IMAGE_DIR)) TEST_DEMOS_BUNDLE_FILES := $(filter $(JDK_IMAGE_HOMEDIR)/demo/%, $(ALL_JDK_FILES)) + ALL_JRE_FILES := $(call ShellFindFiles, $(JRE_IMAGE_DIR)) + + # Create special filter rules when dealing with unzipped .dSYM directories on + # macosx + ifeq ($(OPENJDK_TARGET_OS), macosx) + ifeq ($(ZIP_EXTERNAL_DEBUG_SYMBOLS), false) + JRE_SYMBOLS_EXCLUDE_PATTERN := $(addprefix %, \ + $(call containing, .dSYM/, $(patsubst $(JRE_IMAGE_DIR)/%, %, $(ALL_JRE_FILES)))) + endif + endif + + JRE_BUNDLE_FILES := $(filter-out \ + $(JRE_SYMBOLS_EXCLUDE_PATTERN) \ + $(SYMBOLS_EXCLUDE_PATTERN), \ + $(ALL_JRE_FILES)) + $(eval $(call SetupBundleFile, BUILD_JDK_BUNDLE, \ BUNDLE_NAME := $(JDK_BUNDLE_NAME), \ FILES := $(JDK_BUNDLE_FILES), \ @@ -208,6 +232,15 @@ PRODUCT_TARGETS += $(BUILD_JDK_BUNDLE) + $(eval $(call SetupBundleFile, BUILD_JRE_BUNDLE, \ + BUNDLE_NAME := $(JRE_BUNDLE_NAME), \ + FILES := $(JRE_BUNDLE_FILES), \ + BASE_DIRS := $(JRE_IMAGE_DIR), \ + SUBDIR := $(JRE_BUNDLE_SUBDIR), \ + )) + + LEGACY_TARGETS += $(BUILD_JRE_BUNDLE) + $(eval $(call SetupBundleFile, BUILD_JDK_SYMBOLS_BUNDLE, \ BUNDLE_NAME := $(JDK_SYMBOLS_BUNDLE_NAME), \ FILES := $(JDK_SYMBOLS_BUNDLE_FILES), \ @@ -234,7 +267,7 @@ ################################################################################ ifneq ($(filter test-bundles, $(MAKECMDGOALS)), ) - TEST_BUNDLE_FILES := $(call CacheFind, $(TEST_IMAGE_DIR)) + TEST_BUNDLE_FILES := $(call FindFiles, $(TEST_IMAGE_DIR)) $(eval $(call SetupBundleFile, BUILD_TEST_BUNDLE, \ BUNDLE_NAME := $(TEST_BUNDLE_NAME), \ @@ -248,7 +281,7 @@ ################################################################################ ifneq ($(filter docs-bundles, $(MAKECMDGOALS)), ) - DOCS_BUNDLE_FILES := $(call CacheFind, $(DOCS_IMAGE_DIR)) + DOCS_BUNDLE_FILES := $(call FindFiles, $(DOCS_IMAGE_DIR)) $(eval $(call SetupBundleFile, BUILD_DOCS_BUNDLE, \ BUNDLE_NAME := $(DOCS_BUNDLE_NAME), \ @@ -263,7 +296,7 @@ ################################################################################ ifneq ($(filter jcov-bundles, $(MAKECMDGOALS)), ) - JCOV_BUNDLE_FILES := $(call CacheFind, $(JCOV_IMAGE_DIR)) + JCOV_BUNDLE_FILES := $(call FindFiles, $(JCOV_IMAGE_DIR)) $(eval $(call SetupBundleFile, BUILD_JCOV_BUNDLE, \ BUNDLE_NAME := $(JCOV_BUNDLE_NAME), \ @@ -283,6 +316,7 @@ ################################################################################ product-bundles: $(PRODUCT_TARGETS) +legacy-bundles: $(LEGACY_TARGETS) test-bundles: $(TEST_TARGETS) docs-bundles: $(DOCS_TARGETS) jcov-bundles: $(JCOV_TARGETS)
--- a/make/CompileDemos.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/CompileDemos.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -43,7 +43,7 @@ # Prepare the find cache. DEMO_SRC_DIRS += $(TOPDIR)/src/demo -$(eval $(call FillCacheFind, $(wildcard $(DEMO_SRC_DIRS)))) +$(call FillFindCache, $(DEMO_SRC_DIRS)) # Append demo goals to this variable. TARGETS = @@ -237,11 +237,11 @@ ifeq ($(call isTargetOs, solaris), true) TARGETS += $(patsubst $(DEMO_SHARE_SRC)/nbproject/%, \ $(SUPPORT_OUTPUTDIR)/demos/image/nbproject/%, \ - $(call CacheFind, $(DEMO_SHARE_SRC)/nbproject)) + $(call FindFiles, $(DEMO_SHARE_SRC)/nbproject)) else TARGETS += $(patsubst $(DEMO_SHARE_SRC)/nbproject/%, \ $(SUPPORT_OUTPUTDIR)/demos/image/nbproject/%, \ - $(call CacheFind, $(DEMO_SHARE_SRC)/nbproject)) + $(call FindFiles, $(DEMO_SHARE_SRC)/nbproject)) endif ################################################################################ @@ -250,7 +250,7 @@ $(eval $(call SetupCopyFiles, COPY_TO_TEST_IMAGE, \ SRC := $(SUPPORT_OUTPUTDIR)/demos/image, \ DEST := $(TEST_IMAGE_DIR)/jdk/demos, \ - FILES := $(call CacheFind, $(SUPPORT_OUTPUTDIR)/demos/image), \ + FILES := $(call FindFiles, $(SUPPORT_OUTPUTDIR)/demos/image), \ )) IMAGES_TARGETS := $(COPY_TO_TEST_IMAGE)
--- a/make/CompileJavaModules.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/CompileJavaModules.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -79,7 +79,7 @@ ################################################################################ -java.desktop_ADD_JAVAC_FLAGS += -Xdoclint:all/protected,-reference,-accessibility \ +java.desktop_ADD_JAVAC_FLAGS += -Xdoclint:all/protected,-reference \ '-Xdoclint/package:java.*,javax.*' -Xlint:exports \ --doclint-format html4 java.desktop_COPY += .gif .png .wav .txt .xml .css .pf @@ -653,7 +653,7 @@ ifneq ($(wildcard $(IMPORT_MODULE_DIR)), ) $(JDK_OUTPUTDIR)/modules/$(MODULE)/_imported.marker: \ - $(call CacheFind, $(IMPORT_MODULE_DIR)) + $(call FindFiles, $(IMPORT_MODULE_DIR)) $(call MakeDir, $(@D)) # Do not delete marker and build meta data files $(RM) -r $(filter-out $(@D)/_%, $(wildcard $(@D)/*))
--- a/make/CopyImportModules.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/CopyImportModules.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -35,7 +35,7 @@ CMDS_DIR := $(wildcard $(addsuffix /$(MODULE), $(IMPORT_MODULES_CMDS))) CONF_DIR := $(wildcard $(addsuffix /$(MODULE), $(IMPORT_MODULES_CONF))) -$(eval $(call FillCacheFind, $(LIBS_DIR) $(CMDS_DIR) $(CONF_DIR))) +$(call FillFindCache, $(LIBS_DIR) $(CMDS_DIR) $(CONF_DIR)) ifneq ($(LIBS_DIR), ) ifeq ($(call isTargetOs, windows), true) @@ -45,21 +45,21 @@ SRC := $(LIBS_DIR), \ DEST := $(JDK_OUTPUTDIR)/bin, \ FILES := $(filter $(TO_BIN_FILTER), \ - $(call CacheFind, $(LIBS_DIR))) \ + $(call FindFiles, $(LIBS_DIR))) \ )) $(eval $(call SetupCopyFiles, COPY_LIBS_TO_LIB, \ SRC := $(LIBS_DIR), \ DEST := $(JDK_OUTPUTDIR)/lib, \ FILES := $(filter-out $(TO_BIN_FILTER), \ - $(call CacheFind, $(LIBS_DIR))) \ + $(call FindFiles, $(LIBS_DIR))) \ )) TARGETS += $(COPY_LIBS_TO_BIN) $(COPY_LIBS_TO_LIB) else $(eval $(call SetupCopyFiles, COPY_LIBS, \ SRC := $(LIBS_DIR), \ DEST := $(JDK_OUTPUTDIR)/lib, \ - FILES := $(filter %$(SHARED_LIBRARY_SUFFIX), $(call CacheFind, $(LIBS_DIR))), \ + FILES := $(filter %$(SHARED_LIBRARY_SUFFIX), $(call FindFiles, $(LIBS_DIR))), \ )) # Use relative links if the import dir is inside the OUTPUTDIR, otherwise @@ -75,7 +75,7 @@ $(eval $(call SetupCopyFiles, LINK_LIBS, \ SRC := $(LIBS_DIR), \ DEST := $(JDK_OUTPUTDIR)/lib, \ - FILES := $(filter-out %$(SHARED_LIBRARY_SUFFIX), $(call CacheFind, $(LIBS_DIR))), \ + FILES := $(filter-out %$(SHARED_LIBRARY_SUFFIX), $(call FindFiles, $(LIBS_DIR))), \ MACRO := $(LINK_MACRO), \ LOG_ACTION := $(LOG_ACTION), \ )) @@ -87,7 +87,7 @@ $(eval $(call SetupCopyFiles, COPY_CMDS, \ SRC := $(CMDS_DIR), \ DEST := $(JDK_OUTPUTDIR)/bin, \ - FILES := $(call CacheFind, $(CMDS_DIR)), \ + FILES := $(call FindFiles, $(CMDS_DIR)), \ )) TARGETS += $(COPY_CMDS) endif @@ -96,7 +96,7 @@ $(eval $(call SetupCopyFiles, COPY_CONF, \ SRC := $(CONF_DIR), \ DEST := $(JDK_OUTPUTDIR)/lib, \ - FILES := $(call CacheFind, $(CONF_DIR)), \ + FILES := $(call FindFiles, $(CONF_DIR)), \ )) TARGETS += $(COPY_CONF) endif
--- a/make/CreateBuildJdkCopy.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/CreateBuildJdkCopy.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ COPY_CLASSES_TARGET := $(BUILDJDK_OUTPUTDIR)/jdk/modules/java.base/_the.buildjdk-copy-marker -$(COPY_CLASSES_TARGET): $(call CacheFind, $(wildcard \ +$(COPY_CLASSES_TARGET): $(call FindFiles, $(wildcard \ $(addprefix $(JDK_OUTPUTDIR)/modules/, $(MODULES_TO_COPY)))) $(call LogInfo, Copying java modules to buildjdk: $(MODULES_TO_COPY)) $(RM) -r $(BUILDJDK_OUTPUTDIR)/jdk/modules @@ -56,7 +56,7 @@ $(eval $(call SetupCopyFiles, COPY_SUPPORT_HEADERS, \ SRC := $(OUTPUTDIR), \ DEST := $(BUILDJDK_OUTPUTDIR), \ - FILES := $(call CacheFind, $(wildcard \ + FILES := $(call FindFiles, $(wildcard \ $(addprefix $(SUPPORT_OUTPUTDIR)/headers/, $(MODULES_TO_COPY)))), \ ))
--- a/make/CreateJmods.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/CreateJmods.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -53,33 +53,33 @@ MAN_DIR ?= $(firstword $(wildcard $(addsuffix /$(MODULE), \ $(SUPPORT_OUTPUTDIR)/modules_man $(IMPORT_MODULES_MAN)))) -$(eval $(call FillCacheFind, \ +$(call FillFindCache, \ $(LIBS_DIR) $(CMDS_DIR) $(CONF_DIR) $(CLASSES_DIR) \ -)) +) ifneq ($(LIBS_DIR), ) JMOD_FLAGS += --libs $(LIBS_DIR) - DEPS += $(call CacheFind, $(LIBS_DIR)) + DEPS += $(call FindFiles, $(LIBS_DIR)) endif ifneq ($(CMDS_DIR), ) JMOD_FLAGS += --cmds $(CMDS_DIR) - DEPS += $(call CacheFind, $(CMDS_DIR)) + DEPS += $(call FindFiles, $(CMDS_DIR)) endif ifneq ($(CONF_DIR), ) JMOD_FLAGS += --config $(CONF_DIR) - DEPS += $(call CacheFind, $(CONF_DIR)) + DEPS += $(call FindFiles, $(CONF_DIR)) endif ifneq ($(CLASSES_DIR), ) JMOD_FLAGS += --class-path $(CLASSES_DIR) - DEPS += $(call CacheFind, $(CLASSES_DIR)) + DEPS += $(call FindFiles, $(CLASSES_DIR)) endif ifneq ($(INCLUDE_HEADERS_DIR), ) JMOD_FLAGS += --header-files $(INCLUDE_HEADERS_DIR) - DEPS += $(call CacheFind, $(INCLUDE_HEADERS_DIR)) + DEPS += $(call FindFiles, $(INCLUDE_HEADERS_DIR)) endif ifneq ($(MAN_DIR), ) JMOD_FLAGS += --man-pages $(MAN_DIR) - DEPS += $(call CacheFind, $(MAN_DIR)) + DEPS += $(call FindFiles, $(MAN_DIR)) endif # If a specific modules_legal dir exists for this module, only pick up files @@ -93,7 +93,7 @@ ) LEGAL_NOTICES_PATH := $(call PathList, $(LEGAL_NOTICES)) -DEPS += $(call CacheFind, $(LEGAL_NOTICES)) +DEPS += $(call FindFiles, $(LEGAL_NOTICES)) JMOD_FLAGS += --legal-notices $(LEGAL_NOTICES_PATH) @@ -147,7 +147,7 @@ # the actual command. Filter that out using wildcard before adding to DEPS. DEPS += $(wildcard $(JMOD_CMD)) ifeq ($(EXTERNAL_BUILDJDK), false) - DEPS += $(call CacheFind, $(JDK_OUTPUTDIR)/modules/jdk.jlink/jdk/tools/jmod) + DEPS += $(call FindFiles, $(JDK_OUTPUTDIR)/modules/jdk.jlink/jdk/tools/jmod) endif # If creating interim versions of jmods, certain files need to be filtered out
--- a/make/Docs.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/Docs.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -337,7 +337,7 @@ $$(SUPPORT_OUTPUTDIR)/docs/$1.vardeps) # Get a list of all files in all the source dirs for all included modules - $1_SOURCE_DEPS := $$(call CacheFind, $$(wildcard $$(foreach module, \ + $1_SOURCE_DEPS := $$(call FindFiles, $$(wildcard $$(foreach module, \ $$($1_ALL_MODULES), $$(call FindModuleSrcDirs, $$(module))))) $$(eval $$(call SetupExecute, javadoc_$1, \ @@ -484,7 +484,7 @@ GLOBAL_SPECS_RESOURCES_DIR := $(TOPDIR)/make/data/docs-resources/ $(eval $(call SetupCopyFiles, COPY_GLOBAL_RESOURCES, \ SRC := $(GLOBAL_SPECS_RESOURCES_DIR), \ - FILES := $(call CacheFind, $(GLOBAL_SPECS_RESOURCES_DIR)), \ + FILES := $(call FindFiles, $(GLOBAL_SPECS_RESOURCES_DIR)), \ DEST := $(DOCS_OUTPUTDIR), \ )) JDK_INDEX_TARGETS += $(COPY_GLOBAL_RESOURCES) @@ -509,10 +509,10 @@ $(foreach m, $(ALL_MODULES), \ $(eval SPECS_$m := $(call FindModuleSpecsDirs, $m)) \ $(foreach d, $(SPECS_$m), \ - $(if $(filter $(COPY_SPEC_FILTER), $(call CacheFind, $d)), \ + $(if $(filter $(COPY_SPEC_FILTER), $(call FindFiles, $d)), \ $(eval $(call SetupCopyFiles, COPY_$m, \ SRC := $d, \ - FILES := $(filter $(COPY_SPEC_FILTER), $(call CacheFind, $d)), \ + FILES := $(filter $(COPY_SPEC_FILTER), $(call FindFiles, $d)), \ DEST := $(DOCS_OUTPUTDIR)/specs/, \ )) \ $(eval JDK_SPECS_TARGETS += $(COPY_$m)) \ @@ -529,11 +529,11 @@ $(foreach m, $(ALL_MODULES), \ $(eval SPECS_$m := $(call FindModuleSpecsDirs, $m)) \ $(foreach d, $(SPECS_$m), \ - $(if $(filter %.md, $(call CacheFind, $d)), \ + $(if $(filter %.md, $(call FindFiles, $d)), \ $(eval $m_$d_NAME := SPECS_TO_HTML_$m_$(strip $(call RelativePath, $d, $(TOPDIR)))) \ $(eval $(call SetupProcessMarkdown, $($m_$d_NAME), \ SRC := $d, \ - FILES := $(filter %.md, $(call CacheFind, $d)), \ + FILES := $(filter %.md, $(call FindFiles, $d)), \ DEST := $(DOCS_OUTPUTDIR)/specs/, \ CSS := $(GLOBAL_SPECS_DEFAULT_CSS_FILE), \ )) \ @@ -556,11 +556,11 @@ $(foreach m, $(ALL_MODULES), \ $(eval MAN_$m := $(call FindModuleManDirs, $m)) \ $(foreach d, $(MAN_$m), \ - $(if $(filter %.md, $(call CacheFind, $d)), \ + $(if $(filter %.md, $(call FindFiles, $d)), \ $(eval $m_$d_NAME := MAN_TO_HTML_$m_$(strip $(call RelativePath, $d, $(TOPDIR)))) \ $(eval $(call SetupProcessMarkdown, $($m_$d_NAME), \ SRC := $d, \ - FILES := $(filter %.md, $(call CacheFind, $d)), \ + FILES := $(filter %.md, $(call FindFiles, $d)), \ DEST := $(DOCS_OUTPUTDIR)/specs/man, \ FILTER := $(PANDOC_HTML_MANPAGE_FILTER), \ CSS := $(GLOBAL_SPECS_DEFAULT_CSS_FILE), \
--- a/make/Images.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/Images.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -183,21 +183,14 @@ ifeq ($(GCOV_ENABLED), true) - GCOV_FIND_EXPR := -type f -name "*.gcno" - - $(eval $(call SetupCopyFiles,COPY_HOTSPOT_GCOV_GCNO, \ + $(eval $(call SetupCopyFiles,COPY_GCOV_GCNO, \ SRC := $(OUTPUTDIR), \ DEST := $(SYMBOLS_IMAGE_DIR)/gcov, \ - FILES := $(shell $(FIND) $(HOTSPOT_OUTPUTDIR) $(GCOV_FIND_EXPR)))) + FILES := $(call FindFiles, $(HOTSPOT_OUTPUTDIR) \ + $(SUPPORT_OUTPUTDIR)/native, *.gcno) \ + )) - SYMBOLS_TARGETS += $(COPY_HOTSPOT_GCOV_GCNO) - - $(eval $(call SetupCopyFiles,COPY_JDK_GCOV_GCNO, \ - SRC := $(OUTPUTDIR), \ - DEST := $(SYMBOLS_IMAGE_DIR)/gcov, \ - FILES := $(shell $(FIND) $(SUPPORT_OUTPUTDIR)/native $(GCOV_FIND_EXPR)))) - - SYMBOLS_TARGETS += $(COPY_JDK_GCOV_GCNO) + SYMBOLS_TARGETS += $(COPY_GCOV_GCNO) endif @@ -230,10 +223,10 @@ # On Macosx, if debug symbols have not been zipped, find all files inside *.dSYM # dirs. ifeq ($(call isTargetOs, macosx), true) - $(eval $(call FillCacheFind, \ - $(SUPPORT_OUTPUTDIR)/modules_cmds $(SUPPORT_OUTPUTDIR)/modules_libs)) + $(call FillFindCache, \ + $(SUPPORT_OUTPUTDIR)/modules_cmds $(SUPPORT_OUTPUTDIR)/modules_libs) FindDebuginfoFiles = \ - $(if $(wildcard $1), $(call containing, .dSYM/, $(call CacheFind, $1))) + $(if $(wildcard $1), $(call containing, .dSYM/, $(call FindFiles, $1))) endif endif
--- a/make/Init.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/Init.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -238,11 +238,13 @@ ifeq ($(LOG_NOFILE), true) # Disable build log if LOG=[level,]nofile was given override BUILD_LOG_PIPE := + override BUILD_LOG_PIPE_SIMPLE := endif ifeq ($(filter dist-clean, $(SEQUENTIAL_TARGETS)), dist-clean) # We can't have a log file if we're about to remove it. override BUILD_LOG_PIPE := + override BUILD_LOG_PIPE_SIMPLE := endif ifeq ($(OUTPUT_SYNC_SUPPORTED), true) @@ -303,7 +305,7 @@ main: $(INIT_TARGETS) ifneq ($(SEQUENTIAL_TARGETS)$(PARALLEL_TARGETS), ) $(call RotateLogFiles) - $(PRINTF) "Building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE) + $(PRINTF) "Building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE_SIMPLE) ifneq ($(SEQUENTIAL_TARGETS), ) # Don't touch build output dir since we might be cleaning. That # means no log pipe. @@ -325,7 +327,7 @@ $(PARALLEL_TARGETS) $(COMPARE_BUILD_MAKE) $(BUILD_LOG_PIPE) || \ ( exitcode=$$? && \ $(PRINTF) "\nERROR: Build failed for $(TARGET_DESCRIPTION) (exit code $$exitcode) \n" \ - $(BUILD_LOG_PIPE) && \ + $(BUILD_LOG_PIPE_SIMPLE) && \ cd $(TOPDIR) && $(MAKE) $(MAKE_ARGS) -j 1 -f make/Init.gmk \ HAS_SPEC=true on-failure ; \ exit $$exitcode ) ) @@ -336,7 +338,7 @@ if test -f $(MAKESUPPORT_OUTPUTDIR)/exit-with-error ; then \ exit 1 ; \ fi - $(PRINTF) "Finished building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE) + $(PRINTF) "Finished building $(TARGET_DESCRIPTION)\n" $(BUILD_LOG_PIPE_SIMPLE) $(call ReportProfileTimes) endif
--- a/make/InitSupport.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/InitSupport.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -264,13 +264,9 @@ endif endif - # The --no-print-directory is needed to make the call from - # FindTest.gmk to Test.gmk work with LOG=debug/trace. See - # JDK-8213736 $$(main_targets_file): @( cd $$(topdir) && \ - $$(MAKE) $$(MAKE_LOG_FLAGS) -r -R --no-print-directory \ - -f $$(topdir)/make/Main.gmk \ + $$(MAKE) $$(MAKE_LOG_FLAGS) -r -R -f $$(topdir)/make/Main.gmk \ -I $$(topdir)/make/common SPEC=$(strip $2) NO_RECIPES=true \ $$(MAKE_LOG_VARS) \ create-main-targets-include ) @@ -300,6 +296,9 @@ BUILD_PROFILE_LOG := $(OUTPUTDIR)/build-profile.log BUILD_LOG_PIPE := > >($(TEE) -a $(BUILD_LOG)) 2> >($(TEE) -a $(BUILD_LOG) >&2) && wait + # Use this for simple echo/printf commands that are never expected to print + # to stderr. + BUILD_LOG_PIPE_SIMPLE := | $(TEE) -a $(BUILD_LOG) ifneq ($(CUSTOM_ROOT), ) topdir=$(CUSTOM_ROOT) @@ -518,7 +517,7 @@ "`$(LS) $(BUILDTIMESDIR)/build_time_diff_* | $(GREP) -v _TOTAL | \ $(XARGS) $(CAT) | $(SORT) -k 2`" \ "`$(CAT) $(BUILDTIMESDIR)/build_time_diff_TOTAL`" \ - $(BUILD_LOG_PIPE) + $(BUILD_LOG_PIPE_SIMPLE) endef define ReportProfileTimes @@ -528,7 +527,7 @@ $(CAT) $(BUILD_PROFILE_LOG) && \ $(ECHO) End $(notdir $(BUILD_PROFILE_LOG)); \ } \ - $(BUILD_LOG_PIPE) + $(BUILD_LOG_PIPE_SIMPLE) ) endef
--- a/make/MacBundles.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/MacBundles.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -52,13 +52,13 @@ $(eval $(call SetupCopyFiles, COPY_JDK_IMAGE, \ SRC := $(JDK_IMAGE_DIR), \ DEST := $(JDK_MACOSX_CONTENTS_DIR)/Home, \ - FILES := $(call CacheFind, $(JDK_IMAGE_DIR)), \ + FILES := $(call FindFiles, $(JDK_IMAGE_DIR)), \ )) $(eval $(call SetupCopyFiles, COPY_JRE_IMAGE, \ SRC := $(JRE_IMAGE_DIR), \ DEST := $(JRE_MACOSX_CONTENTS_DIR)/Home, \ - FILES := $(call CacheFind, $(JRE_IMAGE_DIR)), \ + FILES := $(call FindFiles, $(JRE_IMAGE_DIR)), \ )) $(JDK_MACOSX_CONTENTS_DIR)/MacOS/libjli.dylib:
--- a/make/Main.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/Main.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -581,6 +581,9 @@ product-bundles: +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk product-bundles) +legacy-bundles: + +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk legacy-bundles) + test-bundles: +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk test-bundles) @@ -592,7 +595,7 @@ +($(CD) $(TOPDIR)/make && $(MAKE) $(MAKE_ARGS) -f Bundles.gmk jcov-bundles) endif -ALL_TARGETS += product-bundles test-bundles docs-bundles jcov-bundles +ALL_TARGETS += product-bundles legacy-bundles test-bundles docs-bundles jcov-bundles ################################################################################ # Install targets @@ -647,7 +650,6 @@ # Declare dependencies between hotspot-<variant>* targets $(foreach v, $(JVM_VARIANTS), \ - $(eval hotspot-$v: hotspot-$v-gensrc hotspot-$v-libs) \ $(eval hotspot-$v-gensrc: java.base-copy) \ $(eval hotspot-$v-libs: hotspot-$v-gensrc java.base-copy) \ ) @@ -918,6 +920,8 @@ product-bundles: product-images + legacy-bundles: legacy-images + test-bundles: test-image docs-bundles: docs-image @@ -938,6 +942,10 @@ buildtools: buildtools-langtools interim-langtools interim-rmic \ buildtools-jdk $(JVM_TOOLS_TARGETS) +# Declare dependencies from hotspot-<variant> targets +$(foreach v, $(JVM_VARIANTS), \ + $(eval hotspot-$v: hotspot-$v-gensrc hotspot-$v-libs) \ +) hotspot: $(HOTSPOT_VARIANT_TARGETS) # Create targets hotspot-libs and hotspot-gensrc. @@ -1020,6 +1028,9 @@ # (and possibly other, more specific versions) product-images: jdk-image symbols-image exploded-image +# This target builds the legacy images, e.g. the legacy JRE image +legacy-images: legacy-jre-image + # zip-security is actually a bundle, but for now it needs to be considered # an image until this can be cleaned up properly. product-images: zip-security @@ -1036,6 +1047,8 @@ ifeq ($(call isTargetOs, macosx), true) product-images: mac-jdk-bundle + + legacy-images: mac-legacy-jre-bundle endif # This target builds the documentation image @@ -1070,7 +1083,7 @@ jdk.jdwp.agent-gensrc $(ALL_MODULES) demos \ exploded-image-base exploded-image \ create-buildjdk docs-jdk-api docs-javase-api docs-reference-api docs-jdk \ - docs-javase docs-reference docs-javadoc mac-bundles product-images \ + docs-javase docs-reference docs-javadoc mac-bundles product-images legacy-images \ docs-image test-image all-images \ all-bundles
--- a/make/RunTestsPrebuilt.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/RunTestsPrebuilt.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -338,6 +338,12 @@ run-test-prebuilt: @$(RM) -f $(MAKESUPPORT_OUTPUTDIR)/exit-with-error + # The lazy initialization of the cache file in FindTests.gmk does not + # always work with RunTests.gmk. To guarantee that the jtreg test groups + # are always found and parsed, call FindTests.gmk stand alone once + # before calling RunTests.gmk. + @cd $(TOPDIR) && $(MAKE) $(MAKE_ARGS) -f make/common/FindTests.gmk \ + SPEC=$(SPEC) FINDTESTS_STAND_ALONE=true @cd $(TOPDIR) && $(MAKE) $(MAKE_ARGS) -f make/RunTests.gmk run-test \ TEST="$(TEST)"
--- a/make/ZipSource.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/ZipSource.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -87,6 +87,7 @@ EXCLUDE_FILES := $(SRC_ZIP_EXCLUDE_FILES), \ SUFFIXES := .java, \ ZIP := $(SUPPORT_OUTPUTDIR)/src.zip, \ + FOLLOW_SYMLINKS := true, \ )) do-zip: $(BUILD_SRC_ZIP)
--- a/make/autoconf/spec.gmk.in Thu Mar 28 22:08:15 2019 +0100 +++ b/make/autoconf/spec.gmk.in Thu Apr 04 22:07:49 2019 +0200 @@ -906,6 +906,7 @@ JDK_BUNDLE_EXTENSION := tar.gz endif JDK_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART).$(JDK_BUNDLE_EXTENSION) +JRE_BUNDLE_NAME := jre-$(BASE_NAME)_bin$(DEBUG_PART).$(JDK_BUNDLE_EXTENSION) JDK_SYMBOLS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin$(DEBUG_PART)-symbols.tar.gz TEST_DEMOS_BUNDLE_NAME := jdk-$(BASE_NAME)_bin-tests-demos$(DEBUG_PART).tar.gz TEST_BUNDLE_NAME := jdk-$(BASE_NAME)_bin-tests$(DEBUG_PART).tar.gz @@ -913,6 +914,7 @@ JCOV_BUNDLE_NAME := jdk-jcov-$(BASE_NAME)_bin$(DEBUG_PART).$(JDK_BUNDLE_EXTENSION) JDK_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(JDK_BUNDLE_NAME) +JRE_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(JRE_BUNDLE_NAME) JDK_SYMBOLS_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(JDK_SYMBOLS_BUNDLE_NAME) TEST_DEMOS_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(TEST_DEMOS_BUNDLE_NAME) TEST_BUNDLE := $(BUNDLES_OUTPUTDIR)/$(TEST_BUNDLE_NAME)
--- a/make/common/FindTests.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/common/FindTests.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -26,6 +26,16 @@ ifndef _FIND_TESTS_GMK _FIND_TESTS_GMK := 1 +# This makefile can be called directly to just trigger generation of the cache +# makefile. If so, SPEC and MakeBase.gmk need to be included. +ifeq ($(FINDTESTS_STAND_ALONE), true) + include $(SPEC) + include MakeBase.gmk +endif + +# Make sure this variable is not lazy evaled. +ALL_NAMED_TESTS := + # Hook to include the corresponding custom file, if present. $(eval $(call IncludeCustomExtension, common/FindTests.gmk)) @@ -35,31 +45,40 @@ # JTREG_TESTROOTS might have been set by a custom extension JTREG_TESTROOTS += $(addprefix $(TOPDIR)/test/, hotspot/jtreg jdk langtools nashorn jaxp) -################################################################################ -# Find the Jtreg test groups for the given component. +# Extract the names of the Jtreg group files from the TEST.ROOT files. The +# TEST.ROOT files being properties files can be interpreted as makefiles so +# use include to get at the contents instead of expensive shell calls. We are +# looking for the "groups" property in each file. +JTREG_ROOT_FILES := $(addsuffix /TEST.ROOT, $(JTREG_TESTROOTS)) +JTREG_GROUP_FILES := +$(foreach root, $(JTREG_TESTROOTS), \ + $(eval include $(root)/TEST.ROOT) \ + $(eval $(root)_JTREG_GROUP_FILES := $$(addprefix $(root)/, $$(groups))) \ + $(eval JTREG_GROUP_FILES += $$($(root)_JTREG_GROUP_FILES)) \ +) + +# Cache the expensive to calculate test names in a generated makefile. +FIND_TESTS_CACHE_FILE := $(MAKESUPPORT_OUTPUTDIR)/find-tests.gmk + +# If this file is deemed outdated, it will automatically get regenerated +# by this rule before being included below. # -# Parameter 1 is the jtreg root dir. This name is used as variable prefix. -# -# After this macro has been evaluated, the following variables are defined for -# the component: -# JTREG_TESTROOT - The path to the root of the test directory -# JTREG_GROUP_FILES - The file(s) containing the group definitions -# JTREG_TEST_GROUPS - The name of the test groups that the component defines -# -FindJtregGroups = $(NamedParamsMacroTemplate) -define FindJtregGroupsBody - ifneq ($$(wildcard $1), ) - $1_JTREG_GROUP_FILENAMES := $$(shell $$(SED) -n -e 's/\[\(.*\)\]/\1/g' \ - -e 's/^groups\w*=//p' $1/TEST.ROOT) - $1_JTREG_GROUP_FILES := $$(addprefix $1/, $$($1_JTREG_GROUP_FILENAMES)) - $1_JTREG_TEST_GROUPS := $$(strip $$(shell $$(SED) -n \ - -e 's/\([^ ]*\)\w*=.*/\1/gp' $$(wildcard $$($1_JTREG_GROUP_FILES)) \ - | $$(SORT) -u)) - endif -endef +# When calling TestMake.gmk, override the log level to avoid any kind of debug +# output being captured into the generated makefile. +$(FIND_TESTS_CACHE_FILE): $(JTREG_ROOT_FILES) $(JTREG_GROUP_FILES) + $(call MakeTargetDir) + ( $(foreach root, $(JTREG_TESTROOTS), \ + $(PRINTF) "\n$(root)_JTREG_TEST_GROUPS := " ; \ + $(SED) -n -e 's/^\#.*//g' -e 's/\([^ ]*\)\w*=.*/\1/gp' \ + $($(root)_JTREG_GROUP_FILES) \ + | $(SORT) -u | $(TR) '\n' ' ' ; \ + ) \ + $(PRINTF) "\nMAKE_TEST_TARGETS := " ; \ + $(MAKE) -s --no-print-directory $(MAKE_ARGS) LOG_LEVEL=warn \ + SPEC=$(SPEC) -f $(TOPDIR)/test/make/TestMake.gmk print-targets \ + ) > $@ -# Configure definitions for all available test components -$(foreach root, $(JTREG_TESTROOTS), $(eval $(call FindJtregGroups, $(root)))) +-include $(FIND_TESTS_CACHE_FILE) # Create a list of all available Jtreg test groups in all components JTREG_TEST_GROUPS += $(sort $(foreach root, $(JTREG_TESTROOTS), \ @@ -69,18 +88,15 @@ # ALL_NAMED_TESTS might have been set by a custom extension ALL_NAMED_TESTS += $(JTREG_TEST_GROUPS) +# Add the make test targets +ALL_NAMED_TESTS += $(addprefix make-, $(MAKE_TEST_TARGETS)) + # Add Gtest ALL_NAMED_TESTS += gtest # Add microbenchmarks ALL_NAMED_TESTS += micro -# Find make test targets -MAKE_TEST_TARGETS := $(shell $(MAKE) -s --no-print-directory $(MAKE_ARGS) \ - SPEC=$(SPEC) -f $(TOPDIR)/test/make/TestMake.gmk print-targets) - -ALL_NAMED_TESTS += $(addprefix make-, $(MAKE_TEST_TARGETS)) - # Add special tests ALL_NAMED_TESTS += failure-handler make
--- a/make/common/JarArchive.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/common/JarArchive.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -124,9 +124,9 @@ ifeq ($$($1_DEPENDENCIES), ) # Add all source roots to the find cache since we are likely going to run find # on these more than once. The cache will only be updated if necessary. - $$(eval $$(call FillCacheFind, $$($1_FIND_LIST))) + $$(call FillFindCache, $$($1_FIND_LIST)) $1_DEPENDENCIES:=$$(filter $$(addprefix %,$$($1_SUFFIXES)), \ - $$(call CacheFind,$$($1_SRCS))) + $$(call FindFiles,$$($1_SRCS))) ifneq (,$$($1_GREP_INCLUDE_PATTERNS)) $1_DEPENDENCIES:=$$(filter $$(addsuffix %,$$($1_GREP_INCLUDE_PATTERNS)),$$($1_DEPENDENCIES)) endif @@ -137,7 +137,7 @@ $1_DEPENDENCIES+=$$(wildcard $$(foreach src, $$($1_SRCS), \ $$(addprefix $$(src)/, $$($1_EXTRA_FILES))) $$($1_EXTRA_FILES)) ifeq (,$$($1_SKIP_METAINF)) - $1_DEPENDENCIES+=$$(call CacheFind,$$(wildcard $$(addsuffix /META-INF,$$($1_SRCS)))) + $1_DEPENDENCIES+=$$(call FindFiles,$$(wildcard $$(addsuffix /META-INF,$$($1_SRCS)))) endif endif # The dependency list should never be empty
--- a/make/common/JavaCompilation.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/common/JavaCompilation.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -222,13 +222,12 @@ ) \ ) $$(call MakeDir,$$($1_BIN)) - # Add all source roots to the find cache since we are likely going to run find - # on these more than once. The cache will only be updated if necessary. - $$(eval $$(call FillCacheFind, $$($1_SRC))) - # Find all files in the source trees. Preserve order of source roots so that - # the first version in case of multiple instances of the same file is selected. - # CacheFind does not preserve order so need to call it for each root. - $1_ALL_SRCS += $$($1_EXTRA_FILES) $$(foreach s, $$($1_SRC), $$(call CacheFind, $$s)) + # Order src files according to the order of the src dirs. Correct odering is + # needed for correct overriding between different source roots. + $1_ALL_SRC_RAW := $$(call FindFiles, $$($1_SRC)) + $1_ALL_SRCS := $$($1_EXTRA_FILES) \ + $$(foreach d, $$($1_SRC), $$(filter $$d%, $$($1_ALL_SRC_RAW))) + # Extract the java files. $1_SRCS := $$(filter %.java, $$($1_ALL_SRCS))
--- a/make/common/MakeBase.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/common/MakeBase.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -64,10 +64,12 @@ endef -# In GNU Make 4.0 and higher, there is a file function for writing to files. +# Certain features only work in newer version of GNU Make. The build will still +# function in 3.81, but will be less performant. ifeq (4.0, $(firstword $(sort 4.0 $(MAKE_VERSION)))) HAS_FILE_FUNCTION := true CORRECT_FUNCTION_IN_RECIPE_EVALUATION := true + RWILDCARD_WORKS := true endif @@ -341,74 +343,116 @@ ################################################################################ -ifneq ($(DISABLE_CACHE_FIND), true) - # In Cygwin, finds are very costly, both because of expensive forks and because - # of bad file system caching. Find is used extensively in $(shell) commands to - # find source files. This makes rerunning make with no or few changes rather - # expensive. To speed this up, these two macros are used to cache the results - # of simple find commands for reuse. - # - # Runs a find and stores both the directories where it was run and the results. - # This macro can be called multiple times to add to the cache. Only finds files - # with no filters. - # - # Files containing space will get spaces replaced with ? because GNU Make - # cannot handle lists of files with space in them. By using ?, make will match - # the wildcard to space in many situations so we don't need to replace back - # to space on every use. While not a complete solution it does allow some uses - # of CacheFind to function with spaces in file names, including for - # SetupCopyFiles. - # - # Needs to be called with $(eval ) - # - # Even if the performance benifit is negligible on other platforms, keep the - # functionality active unless explicitly disabled to exercise it more. - # - # Initialize FIND_CACHE_DIRS with := to make it a non recursively-expanded variable - FIND_CACHE_DIRS := - # Param 1 - Dirs to find in - # Param 2 - (optional) specialization. Normally "-a \( ... \)" expression. - define FillCacheFind - # Filter out already cached dirs. The - is needed when FIND_CACHE_DIRS is empty - # since filter out will then return empty. - FIND_CACHE_NEW_DIRS := $$(filter-out $$(addsuffix /%,\ - - $(FIND_CACHE_DIRS)) $(FIND_CACHE_DIRS), $1) - ifneq ($$(FIND_CACHE_NEW_DIRS), ) - # Remove any trailing slash from dirs in the cache dir list - FIND_CACHE_DIRS += $$(patsubst %/,%, $$(FIND_CACHE_NEW_DIRS)) - FIND_CACHE := $$(sort $$(FIND_CACHE) \ - $$(shell $(FIND) $$(wildcard $$(FIND_CACHE_NEW_DIRS)) \ - \( -type f -o -type l \) $2 | $(TR) ' ' '?')) - endif - endef +# Recursive wildcard function. Walks down directories recursively and matches +# files with the search patterns. Patterns use standard file wildcards (* and +# ?). +# +# $1 - Directories to start search in +# $2 - Search patterns +rwildcard = \ + $(strip \ + $(foreach d, \ + $(patsubst %/,%,$(sort $(dir $(wildcard $(addsuffix /*/*, $(strip $1)))))), \ + $(call rwildcard,$d,$2) \ + ) \ + $(call DoubleDollar, $(wildcard $(foreach p, $2, $(addsuffix /$(strip $p), $(strip $1))))) \ + ) - # Mimics find by looking in the cache if all of the directories have been cached. - # Otherwise reverts to shell find. This is safe to call on all platforms, even if - # cache is deactivated. - # - # $1 can be either a directory or a file. If it's a directory, make - # sure we have exactly one trailing slash before the wildcard. - # The extra - is needed when FIND_CACHE_DIRS is empty but should be harmless. - # - # Param 1 - Dirs to find in - # Param 2 - (optional) specialization. Normally "-a \( ... \)" expression. - # Param 3 - (optional) options to find. - define CacheFind - $(if $(filter-out $(addsuffix /%,- $(FIND_CACHE_DIRS)) $(FIND_CACHE_DIRS),$1), \ - $(if $(wildcard $1), $(shell $(FIND) $3 $(wildcard $1) \( -type f -o -type l \) $2 \ - | $(TR) ' ' '?')), \ - $(filter $(addsuffix /%,$(patsubst %/,%,$1)) $1,$(FIND_CACHE))) - endef +# Find non directories using recursive wildcard function. This function may +# be used directly when a small amount of directories is expected to be +# searched and caching is not expected to be of use. +# +# $1 - Directory to start search in +# $2 - Optional search patterns, defaults to '*'. +WildcardFindFiles = \ + $(sort $(strip \ + $(eval WildcardFindFiles_result := $(call rwildcard,$(patsubst %/,%,$1),$(if $(strip $2),$2,*))) \ + $(filter-out $(patsubst %/,%,$(sort $(dir $(WildcardFindFiles_result)))), \ + $(WildcardFindFiles_result) \ + ) \ + )) +# Find non directories using the find utility in the shell. Safe to call for +# non existing directories, or directories containing wildcards. +# +# Files containing space will get spaces replaced with ? because GNU Make +# cannot handle lists of files with space in them. By using ?, make will match +# the wildcard to space in many situations so we don't need to replace back +# to space on every use. While not a complete solution it does allow some uses +# of FindFiles to function with spaces in file names, including for +# SetupCopyFiles. Unfortunately this does not work for WildcardFindFiles so +# if files with spaces are anticipated, use ShellFindFiles directly. +# +# $1 - Directories to start search in. +# $2 - Optional search patterns, empty means find everything. Patterns use +# standard file wildcards (* and ?) and should not be quoted. +# $3 - Optional options to find. +ShellFindFiles = \ + $(if $(wildcard $1), \ + $(sort \ + $(shell $(FIND) $3 $(patsubst %/,%,$(wildcard $1)) \( -type f -o -type l \) \ + $(if $(strip $2), -a \( -name "$(firstword $2)" \ + $(foreach p, $(filter-out $(firstword $2), $2), -o -name "$(p)") \)) \ + | $(TR) ' ' '?' \ + ) \ + ) \ + ) + +# Find non directories using the method most likely to work best for the +# current build host +# +# $1 - Directory to start search in +# $2 - Optional search patterns, defaults to '*'. +ifeq ($(OPENJDK_BUILD_OS)-$(RWILDCARD_WORKS), windows-true) + DirectFindFiles = $(WildcardFindFiles) else - # If CacheFind is disabled, just run the find command. - # Param 1 - Dirs to find in - # Param 2 - (optional) specialization. Normally "-a \( ... \)" expression. - define CacheFind - $(if $(wildcard $1, \ - $(shell $(FIND) $(wildcard $1) \( -type f -o -type l \) $2 | $(TR) ' ' '?') \ + DirectFindFiles = $(ShellFindFiles) +endif + +# Finds files using a cache that is populated by FillFindCache below. If any of +# the directories given have not been cached, DirectFindFiles is used for +# everything. Caching is especially useful in Cygwin, where file finds are very +# costly. +# +# $1 - Directories to start search in. +# $2 - Optional search patterns. If used, no caching is done. +CacheFindFiles_CACHED_DIRS := +CacheFindFiles_CACHED_FILES := +CacheFindFiles = \ + $(if $2, \ + $(call DirectFindFiles, $1, $2) \ + , \ + $(if $(filter-out $(addsuffix /%, $(CacheFindFiles_CACHED_DIRS)) \ + $(CacheFindFiles_CACHED_DIRS), $1), \ + $(call DirectFindFiles, $1) \ + , \ + $(filter $(addsuffix /%,$(patsubst %/,%,$1)) $1,$(CacheFindFiles_CACHED_FILES)) \ + ) \ ) - endef + +# Explicitly adds files to the find cache used by CacheFindFiles. +# +# $1 - Directories to start search in +FillFindCache = \ + $(eval CacheFindFiles_NEW_DIRS := $$(filter-out $$(addsuffix /%,\ + $$(CacheFindFiles_CACHED_DIRS)) $$(CacheFindFiles_CACHED_DIRS), $1)) \ + $(if $(CacheFindFiles_NEW_DIRS), \ + $(eval CacheFindFiles_CACHED_DIRS += $$(patsubst %/,%,$$(CacheFindFiles_NEW_DIRS))) \ + $(eval CacheFindFiles_CACHED_FILES := $$(sort $$(CacheFindFiles_CACHED_FILES) \ + $$(call DirectFindFiles, $$(CacheFindFiles_NEW_DIRS)))) \ + ) + +# Findfiles is the default macro that should be used to find files in the file +# system. This function does not always support files with spaces in the names. +# If files with spaces are anticipated, use ShellFindFiles directly. +# +# $1 - Directories to start search in. +# $2 - Optional search patterns, empty means find everything. Patterns use +# standard file wildcards (* and ?) and should not be quoted. +ifeq ($(DISABLE_CACHE_FIND), true) + FindFiles = $(DirectFindFiles) +else + FindFiles = $(CacheFindFiles) endif ################################################################################
--- a/make/common/NativeCompilation.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/common/NativeCompilation.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -231,8 +231,8 @@ # Only continue if this object file hasn't been processed already. This lets # the first found source file override any other with the same name. - ifeq ($$(findstring $$($1_OBJ), $$($$($1_BASE)_OBJS_SO_FAR)), ) - $$($1_BASE)_OBJS_SO_FAR += $$($1_OBJ) + ifeq ($$($1_OBJ_PROCESSED), ) + $1_OBJ_PROCESSED := true # This is the definite source file to use for $1_FILENAME. $1_SRC_FILE := $$($1_FILE) @@ -308,14 +308,18 @@ ifeq ($$(filter %.s %.S, $$($1_FILENAME)), ) # And this is the dependency file for this obj file. - $1_DEP := $$(patsubst %$(OBJ_SUFFIX),%.d,$$($1_OBJ)) + $1_DEPS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d,$$($1_OBJ)) # The dependency target file lists all dependencies as empty targets to # avoid make error "No rule to make target" for removed files - $1_DEP_TARGETS := $$(patsubst %$(OBJ_SUFFIX),%.d.targets,$$($1_OBJ)) + $1_DEPS_TARGETS_FILE := $$(patsubst %$(OBJ_SUFFIX),%.d.targets,$$($1_OBJ)) - # Include previously generated dependency information. (if it exists) - -include $$($1_DEP) - -include $$($1_DEP_TARGETS) + # Only try to load individual dependency information files if the global + # file hasn't been loaded (could happen if make was interrupted). + ifneq ($$($$($1_BASE)_DEPS_FILE_LOADED), true) + # Include previously generated dependency information. (if it exists) + -include $$($1_DEPS_FILE) + -include $$($1_DEPS_TARGETS_FILE) + endif endif ifneq ($$(strip $$($1_CFLAGS) $$($1_CXXFLAGS) $$($1_OPTIMIZATION)), ) @@ -340,16 +344,16 @@ # object file in the generated deps files. Fixing it with sed. If # compiling assembly, don't try this. $$(call ExecuteWithLog, $$@, \ - $$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEP).tmp $$($1_COMPILE_OPTIONS)) - $(SED) 's|^$$(@F):|$$@:|' $$($1_DEP).tmp > $$($1_DEP) + $$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEPS_FILE).tmp $$($1_COMPILE_OPTIONS)) + $(SED) 's|^$$(@F):|$$@:|' $$($1_DEPS_FILE).tmp > $$($1_DEPS_FILE) else $$(call ExecuteWithLog, $$@, \ - $$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEP) $$($1_COMPILE_OPTIONS)) + $$($1_COMPILER) $$($1_DEP_FLAG) $$($1_DEPS_FILE) $$($1_COMPILE_OPTIONS)) endif # Create a dependency target file from the dependency file. # Solution suggested by http://make.mad-scientist.net/papers/advanced-auto-dependency-generation/ - ifneq ($$($1_DEP), ) - $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEP) > $$($1_DEP_TARGETS) + ifneq ($$($1_DEPS_FILE), ) + $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE) endif else # The Visual Studio compiler lacks a feature for generating make @@ -363,10 +367,10 @@ $$($1_COMPILER) -showIncludes $$($1_COMPILE_OPTIONS)) \ | $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \ -e "^$$($1_FILENAME)$$$$" || test "$$$$?" = "1" ; \ - $(ECHO) $$@: \\ > $$($1_DEP) ; \ + $(ECHO) $$@: \\ > $$($1_DEPS_FILE) ; \ $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_OBJ).log \ - | $(SORT) -u >> $$($1_DEP) ; \ - $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEP) > $$($1_DEP_TARGETS) + | $(SORT) -u >> $$($1_DEPS_FILE) ; \ + $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_DEPS_FILE) > $$($1_DEPS_TARGETS_FILE) endif endif endef @@ -486,6 +490,9 @@ $1_NOSUFFIX := $$($1_PREFIX)$$($1_NAME) $1_SAFE_NAME := $$(strip $$(subst /,_, $1)) +# Need to make sure TARGET is first on list + $1 := $$($1_TARGET) + # Setup the toolchain to be used $$(call SetIfEmpty, $1_TOOLCHAIN, TOOLCHAIN_DEFAULT) $$(call SetIfEmpty, $1_CC, $$($$($1_TOOLCHAIN)_CC)) @@ -505,7 +512,7 @@ $$(foreach d, $$($1_SRC), $$(if $$(wildcard $$d), , \ $$(error SRC specified to SetupNativeCompilation $1 contains missing directory $$d))) - $1_SRCS_RAW = $$(call CacheFind, $$($1_SRC)) + $1_SRCS_RAW := $$(call FindFiles, $$($1_SRC)) # Order src files according to the order of the src dirs $1_SRCS := $$(foreach d, $$($1_SRC), $$(filter $$d%, $$($1_SRCS_RAW))) $1_SRCS := $$(filter $$(NATIVE_SOURCE_EXTENSIONS), $$($1_SRCS)) @@ -719,20 +726,21 @@ $1_PCH_FILE := $$($1_OBJECT_DIR)/precompiled/$$(notdir $$($1_PRECOMPILED_HEADER)).pch $1_USE_PCH_FLAGS := -include-pch $$($1_PCH_FILE) endif - $1_PCH_DEP := $$($1_PCH_FILE).d - $1_PCH_DEP_TARGETS := $$($1_PCH_FILE).d.targets + $1_PCH_DEPS_FILE := $$($1_PCH_FILE).d + $1_PCH_DEPS_TARGETS_FILE := $$($1_PCH_FILE).d.targets - -include $$($1_PCH_DEP) - -include $$($1_PCH_DEP_TARGETS) + -include $$($1_PCH_DEPS_FILE) + -include $$($1_PCH_DEPS_TARGETS_FILE) $1_PCH_COMMAND := $$($1_CC) $$($1_CFLAGS) $$($1_EXTRA_CFLAGS) $$($1_SYSROOT_CFLAGS) \ - $$($1_OPT_CFLAGS) -x c++-header -c $(C_FLAG_DEPS) $$($1_PCH_DEP) + $$($1_OPT_CFLAGS) -x c++-header -c $(C_FLAG_DEPS) $$($1_PCH_DEPS_FILE) $$($1_PCH_FILE): $$($1_PRECOMPILED_HEADER) $$($1_COMPILE_VARDEPS_FILE) $$(call LogInfo, Generating precompiled header) $$(call MakeDir, $$(@D)) $$(call ExecuteWithLog, $$@, $$($1_PCH_COMMAND) $$< -o $$@) - $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_PCH_DEP) > $$($1_PCH_DEP_TARGETS) + $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_PCH_DEPS_FILE) \ + > $$($1_PCH_DEPS_TARGETS_FILE) $$($1_ALL_OBJS): $$($1_PCH_FILE) @@ -748,6 +756,34 @@ endif endif + # Create a rule to collect all the individual make dependency files into a + # single makefile. + $1_DEPS_FILE := $$($1_OBJECT_DIR)/$1.d + + $$($1_DEPS_FILE): $$($1_ALL_OBJS) + $(RM) $$@ + # CD into dir to reduce risk of hitting command length limits, which + # could otherwise happen if TOPDIR is a very long path. + $(CD) $$($1_OBJECT_DIR) && $(CAT) *.d > $$@.tmp + $(CD) $$($1_OBJECT_DIR) && $(CAT) *.d.targets | $(SORT) -u >> $$@.tmp + # After generating the file, which happens after all objects have been + # compiled, copy it to .old extension. On the next make invocation, this + # .old file will be included by make. + $(CP) $$@.tmp $$@.old + $(MV) $$@.tmp $$@ + + $1 += $$($1_DEPS_FILE) + + # The include must be on the .old file, which represents the state from the + # previous invocation of make. The file being included must not have a rule + # defined for it as otherwise make will think it has to run the rule before + # being able to include the file, which would be wrong since we specifically + # need the file as it was generated by a previous make invocation. + ifneq ($$(wildcard $$($1_DEPS_FILE).old), ) + $1_DEPS_FILE_LOADED := true + -include $$($1_DEPS_FILE).old + endif + # Now call SetupCompileNativeFile for each source file we are going to compile. $$(foreach file, $$($1_SRCS), \ $$(eval $$(call SetupCompileNativeFile, $1_$$(notdir $$(file)),\ @@ -774,10 +810,10 @@ ifeq ($(call isTargetOs, windows), true) ifneq ($$($1_VERSIONINFO_RESOURCE), ) $1_RES := $$($1_OBJECT_DIR)/$$($1_BASENAME).res - $1_RES_DEP := $$($1_RES).d - $1_RES_DEP_TARGETS := $$($1_RES).d.targets - -include $$($1_RES_DEP) - -include $$($1_RES_DEP_TARGETS) + $1_RES_DEPS_FILE := $$($1_RES).d + $1_RES_DEPS_TARGETS_FILE := $$($1_RES).d.targets + -include $$($1_RES_DEPS_FILE) + -include $$($1_RES_DEPS_TARGETS_FILE) $1_RES_VARDEPS := $$($1_RC) $$($1_RC_FLAGS) $1_RES_VARDEPS_FILE := $$(call DependOnVariable, $1_RES_VARDEPS, \ @@ -794,16 +830,18 @@ # For some unknown reason, in this case CL actually outputs the show # includes to stderr so need to redirect it to hide the output from the # main log. - $$(call ExecuteWithLog, $$($1_RES_DEP).obj, \ + $$(call ExecuteWithLog, $$($1_RES_DEPS_FILE).obj, \ $$($1_CC) $$(filter-out -l%, $$($1_RC_FLAGS)) \ $$($1_SYSROOT_CFLAGS) -showIncludes -nologo -TC \ - $(CC_OUT_OPTION)$$($1_RES_DEP).obj -P -Fi$$($1_RES_DEP).pp \ + $(CC_OUT_OPTION)$$($1_RES_DEPS_FILE).obj -P -Fi$$($1_RES_DEPS_FILE).pp \ $$($1_VERSIONINFO_RESOURCE)) 2>&1 \ | $(TR) -d '\r' | $(GREP) -v -e "^Note: including file:" \ -e "^$$(notdir $$($1_VERSIONINFO_RESOURCE))$$$$" || test "$$$$?" = "1" ; \ - $(ECHO) $$($1_RES): \\ > $$($1_RES_DEP) ; \ - $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_RES_DEP).obj.log >> $$($1_RES_DEP) ; \ - $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_RES_DEP) > $$($1_RES_DEP_TARGETS) + $(ECHO) $$($1_RES): \\ > $$($1_RES_DEPS_FILE) ; \ + $(SED) $(WINDOWS_SHOWINCLUDE_SED_PATTERN) $$($1_RES_DEPS_FILE).obj.log \ + >> $$($1_RES_DEPS_FILE) ; \ + $(SED) $(DEPENDENCY_TARGET_SED_PATTERN) $$($1_RES_DEPS_FILE) \ + > $$($1_RES_DEPS_TARGETS_FILE) endif endif @@ -830,9 +868,6 @@ $1_EXTRA_LDFLAGS += $(call SET_SHARED_LIBRARY_MAPFILE,$$($1_REAL_MAPFILE)) endif - # Need to make sure TARGET is first on list - $1 := $$($1_TARGET) - ifneq ($$($1_COPY_DEBUG_SYMBOLS), false) $1_COPY_DEBUG_SYMBOLS := $(COPY_DEBUG_SYMBOLS) endif
--- a/make/common/RMICompilation.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/common/RMICompilation.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -35,6 +35,7 @@ # RUN_V11:=Set to run rmic with -v1.1 # RUN_V12:=Set to run rmic with -v1.2 # KEEP_GENERATED:=Set to keep generated sources around +# STUB_SOURCES_DIR:=Directory to put generated sources in SetupRMICompilation = $(NamedParamsMacroTemplate) define SetupRMICompilationBody @@ -58,9 +59,16 @@ $1_TIE_FILES := $$(addprefix $$($1_STUB_CLASSES_DIR)/org/omg/stub/,$$(addsuffix _Tie.class,$$($1_TIE_BASE_FILES))) $1_TIE_STDPKG_FILES := $$(addprefix $$($1_STUB_CLASSES_DIR)/,$$(addsuffix _Tie.class,$$($1_TIE_BASE_FILES))) - ifneq (,$$($1_KEEP_GENERATED)) + ifneq ($$($1_KEEP_GENERATED), ) $1_ARGS += -keepgenerated - $1_TARGETS += $$(subst .class,.java,$$($1_TARGETS)) + $1_JAVA_TARGETS := $$(subst .class,.java,$$($1_TARGETS)) + ifneq ($$($1_STUB_SOURCES_DIR), ) + # This is where the java files are created by rmic + $1_JAVA_TARGETS_REL := $$(subst $$($1_STUB_CLASSES_DIR),, $$($1_JAVA_TARGETS)) + # This is where the caller wants the java files + $1_JAVA_TARGETS := $$(addprefix $$($1_STUB_SOURCES_DIR), $$($1_JAVA_TARGETS_REL)) + endif + $1_TARGETS += $$($1_JAVA_TARGETS) endif $1_DOLLAR_SAFE_CLASSES := $$(subst $$$$,\$$$$,$$($1_CLASSES)) @@ -72,11 +80,12 @@ $$(call MakeDir, $$($1_STUB_CLASSES_DIR)) $(RMIC) $$($1_ARGS) -classpath "$$($1_CLASSES_DIR)" \ -d $$($1_STUB_CLASSES_DIR) $$($1_DOLLAR_SAFE_CLASSES); \ - if [ "x$$($1_ARGS2)" != "x" ]; then \ - $(ECHO) $(LOG_INFO) Running rmic $$($1_ARGS2) for $$($1_DOLLAR_SAFE_CLASSES) && \ - $(RMIC) $$($1_ARGS2) -classpath "$$($1_CLASSES_DIR)" \ - -d $$($1_STUB_CLASSES_DIR) $$($1_DOLLAR_SAFE_CLASSES); \ - fi; \ + $$(if $$($1_STUB_SOURCES_DIR), \ + $$(foreach f, $$($1_JAVA_TARGETS_REL), \ + $(MKDIR) -p $$(dir $$($1_STUB_SOURCES_DIR)/$$f) ; \ + $(MV) $$($1_STUB_CLASSES_DIR)/$$f $$($1_STUB_SOURCES_DIR)/$$f ; \ + ) \ + ) \ $(TOUCH) $$@
--- a/make/common/TestFilesCompilation.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/common/TestFilesCompilation.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -77,8 +77,7 @@ # Locate all files with the matching prefix $1_FILE_LIST := \ - $$(shell $$(FIND) $$($1_SOURCE_DIRS) -type f \( -name "$$($1_PREFIX)*.c" \ - -o -name "$$($1_PREFIX)*.cpp" \)) + $$(call FindFiles, $$($1_SOURCE_DIRS), $$($1_PREFIX)*.c $$($1_PREFIX)*.cpp) $1_EXCLUDE_PATTERN := $$(addprefix %/, $$($1_EXCLUDE)) $1_FILTERED_FILE_LIST := $$(filter-out $$($1_EXCLUDE_PATTERN), $$($1_FILE_LIST))
--- a/make/common/TextFileProcessing.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/common/TextFileProcessing.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -103,7 +103,7 @@ $$(error SOURCE_DIRS contains directory $$(src) outside \ SOURCE_BASE_DIR $$($1_SOURCE_BASE_DIR) (in $1)))) endif - $1_SOURCE_FILES := $$(sort $$(call CacheFind,$$($1_SOURCE_DIRS))) + $1_SOURCE_FILES := $$(sort $$(call FindFiles,$$($1_SOURCE_DIRS))) $1_EXCLUDE_FILES:=$$(foreach i,$$($1_SOURCE_DIRS),$$(addprefix $$i/,$$($1_EXCLUDE_FILES))) $1_INCLUDE_FILES:=$$(foreach i,$$($1_SOURCE_DIRS),$$(addprefix $$i/,$$($1_INCLUDE_FILES))) $1_SOURCE_FILES := $$(filter-out $$($1_EXCLUDE_FILES),$$($1_SOURCE_FILES))
--- a/make/common/Utils.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/common/Utils.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -70,8 +70,8 @@ ################################################################################ # Replace question marks with space in string. This macro needs to be called on -# files from CacheFind in case any of them contains space in their file name, -# since CacheFind replaces space with ?. +# files from FindFiles in case any of them contains space in their file name, +# since FindFiles replaces space with ?. # Param 1 - String to replace in DecodeSpace = \ $(subst ?,$(SPACE),$(strip $1))
--- a/make/common/ZipArchive.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/common/ZipArchive.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -48,6 +48,8 @@ # src dir # SUFFIXES # EXTRA_DEPS +# FOLLOW_SYMLINKS - Set to explicitly follow symlinks. Affects performance of +# finding files. # ZIP_OPTIONS extra options to pass to zip SetupZipArchive = $(NamedParamsMacroTemplate) define SetupZipArchiveBody @@ -62,9 +64,14 @@ $1_FIND_LIST := $$($1_SRC) endif - # Find all files in the source tree. Follow symlinks in this find since that is - # what zip does. - $1_ALL_SRCS := $$(call not-containing,_the.,$$(call CacheFind,$$($1_FIND_LIST), , -L)) + # Find all files in the source tree. + # If asked to, follow symlinks in this find since that is what zip does. To do + # this, we need to call ShellFindFiles directly. + ifeq ($$($1_FOLLOW_SYMLINKS), true) + $1_ALL_SRCS := $$(call not-containing,_the.,$$(call ShellFindFiles,$$($1_FIND_LIST), , -L)) + else + $1_ALL_SRCS := $$(call not-containing,_the.,$$(call FindFiles,$$($1_FIND_LIST))) + endif # Filter on suffixes if set ifneq ($$($1_SUFFIXES),)
--- a/make/copy/CopyCommon.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/copy/CopyCommon.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -40,7 +40,7 @@ $(eval $(call SetupCopyFiles, COPY_EXPORTED_INCLUDE, \ SRC := $(INCLUDE_SOURCE_DIR), \ DEST := $(INCLUDE_TARGET_DIR), \ - FILES := $(filter %.h, $(call CacheFind, $(INCLUDE_SOURCE_DIR))), \ + FILES := $(filter %.h, $(call FindFiles, $(INCLUDE_SOURCE_DIR))), \ )) TARGETS += $(COPY_EXPORTED_INCLUDE) @@ -56,7 +56,7 @@ $(eval $(call SetupCopyFiles, COPY_EXPORTED_INCLUDE_OS, \ SRC := $(INCLUDE_SOURCE_OS_DIR), \ DEST := $(INCLUDE_TARGET_DIR)/$(OPENJDK_TARGET_OS_INCLUDE_SUBDIR), \ - FILES := $(filter %.h, $(call CacheFind, $(INCLUDE_SOURCE_OS_DIR))), \ + FILES := $(filter %.h, $(call FindFiles, $(INCLUDE_SOURCE_OS_DIR))), \ )) TARGETS += $(COPY_EXPORTED_INCLUDE_OS)
--- a/make/data/cldr/common/main/ja.xml Thu Mar 28 22:08:15 2019 +0100 +++ b/make/data/cldr/common/main/ja.xml Thu Apr 04 22:07:49 2019 +0200 @@ -3633,7 +3633,7 @@ <era type="233">大正</era> <era type="234">昭和</era> <era type="235">平成</era> - <era type="236">元号</era> <!-- NewEra --> + <era type="236">令和</era> </eraAbbr> <eraNarrow> <era type="0">大化</era> @@ -3872,7 +3872,7 @@ <era type="233">T</era> <era type="234">S</era> <era type="235">H</era> - <era type="236">N</era> <!-- NewEra --> + <era type="236">R</era> </eraNarrow> </eras> <dateFormats>
--- a/make/data/cldr/common/main/root.xml Thu Mar 28 22:08:15 2019 +0100 +++ b/make/data/cldr/common/main/root.xml Thu Apr 04 22:07:49 2019 +0200 @@ -2030,7 +2030,7 @@ <era type="233">Taishō</era> <era type="234">Shōwa</era> <era type="235">Heisei</era> - <era type="236">NewEra</era> <!-- NewEra --> + <era type="236">Reiwa</era> </eraAbbr> <eraNarrow> <era type="0">Taika (645–650)</era> @@ -2269,7 +2269,7 @@ <era type="233">T</era> <era type="234">S</era> <era type="235">H</era> - <era type="236">N</era> <!-- NewEra --> + <era type="236">R</era> </eraNarrow> </eras> <dateFormats>
--- a/make/data/unicodedata/UnicodeData.txt Thu Mar 28 22:08:15 2019 +0100 +++ b/make/data/unicodedata/UnicodeData.txt Thu Apr 04 22:07:49 2019 +0200 @@ -11836,7 +11836,7 @@ 32FC;CIRCLED KATAKANA WI;So;0;L;<circle> 30F0;;;;N;;;;; 32FD;CIRCLED KATAKANA WE;So;0;L;<circle> 30F1;;;;N;;;;; 32FE;CIRCLED KATAKANA WO;So;0;L;<circle> 30F2;;;;N;;;;; -32FF;SQUARE ERA NAME NEWERA;So;0;L;<square> 5143 53F7;;;;N;SQUARED TWO IDEOGRAPHS ERA NAME NEWERA;;;; +32FF;SQUARE ERA NAME REIWA;So;0;L;<square> 4EE4 548C;;;;N;SQUARED TWO IDEOGRAPHS ERA NAME REIWA;;;; 3300;SQUARE APAATO;So;0;L;<square> 30A2 30D1 30FC 30C8;;;;N;SQUARED APAATO;;;; 3301;SQUARE ARUHUA;So;0;L;<square> 30A2 30EB 30D5 30A1;;;;N;SQUARED ARUHUA;;;; 3302;SQUARE ANPEA;So;0;L;<square> 30A2 30F3 30DA 30A2;;;;N;SQUARED ANPEA;;;;
--- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -65,7 +65,7 @@ PROC_SRC_DIRS := $(patsubst %, $(SRC_DIR)/%/src, $(PROC_SRC_SUBDIRS)) -PROC_SRCS := $(filter %.java, $(call CacheFind, $(PROC_SRC_DIRS))) +PROC_SRCS := $(filter %.java, $(call FindFiles, $(PROC_SRC_DIRS))) ALL_SRC_DIRS := $(SRC_DIR) $(wildcard $(SRC_DIR)/*/src) SOURCEPATH := $(call PathList, $(ALL_SRC_DIRS))
--- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.management.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.management.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -39,7 +39,7 @@ PROC_SRC_DIRS := $(patsubst %, $(SRC_DIR)/%/src, $(PROC_SRC_SUBDIRS)) -PROC_SRCS := $(filter %.java, $(call CacheFind, $(PROC_SRC_DIRS))) +PROC_SRCS := $(filter %.java, $(call FindFiles, $(PROC_SRC_DIRS))) ALL_SRC_DIRS := $(SRC_DIR) $(wildcard $(SRC_DIR)/*/src) SOURCEPATH := $(call PathList, $(ALL_SRC_DIRS))
--- a/make/gensrc/GensrcCommonLangtools.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/gensrc/GensrcCommonLangtools.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -64,7 +64,7 @@ define SetupCompileProperties # Lookup the properties that need to be compiled into resource bundles. PROPSOURCES := $2 \ - $$(shell $(FIND) $(TOPDIR)/src/$(MODULE)/share/classes -name "*.properties") + $$(call FindFiles, $(TOPDIR)/src/$(MODULE)/share/classes, *.properties) # Filter out any excluded translations PROPSOURCES := $$(call FilterExcludedTranslations, $$(PROPSOURCES), .properties)
--- a/make/gensrc/GensrcLocaleData.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/gensrc/GensrcLocaleData.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -28,16 +28,16 @@ # into LocaleDataMetaInfo.java # First go look for all locale files -LOCALE_FILES := $(shell $(FIND) \ +LOCALE_FILES := $(call FindFiles, \ $(TOPDIR)/src/$(MODULE)/share/classes/sun/text/resources \ - $(TOPDIR)/src/$(MODULE)/share/classes/sun/util/resources \ - -name "FormatData_*.java" -o -name "FormatData_*.properties" -o \ - -name "CollationData_*.java" -o -name "CollationData_*.properties" -o \ - -name "TimeZoneNames_*.java" -o -name "TimeZoneNames_*.properties" -o \ - -name "LocaleNames_*.java" -o -name "LocaleNames_*.properties" -o \ - -name "CurrencyNames_*.java" -o -name "CurrencyNames_*.properties" -o \ - -name "CalendarData_*.java" -o -name "CalendarData_*.properties" -o \ - -name "BreakIteratorInfo_*.java" -o -name "BreakIteratorRules_*.java") + $(TOPDIR)/src/$(MODULE)/share/classes/sun/util/resources, \ + FormatData_*.java FormatData_*.properties \ + CollationData_*.java CollationData_*.properties \ + TimeZoneNames_*.java TimeZoneNames_*.properties \ + LocaleNames_*.java LocaleNames_*.properties \ + CurrencyNames_*.java CurrencyNames_*.properties \ + CalendarData_*.java CalendarData_*.properties \ + BreakIteratorInfo_*.java BreakIteratorRules_*.java) # Then translate the locale files into for example: FormatData_sv LOCALE_RESOURCES := $(sort $(subst .properties,,$(subst .java,,$(notdir $(LOCALE_FILES)))))
--- a/make/gensrc/GensrcProperties.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/gensrc/GensrcProperties.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -68,7 +68,7 @@ endif # Locate all properties files in the given source dirs. - $1_SRC_FILES := $$(filter %.properties, $$(call CacheFind, $$($1_SRC_DIRS))) + $1_SRC_FILES := $$(call FindFiles, $$($1_SRC_DIRS), *.properties) ifneq ($$($1_EXCLUDE), ) $1_SRC_FILES := $$(filter-out $$($1_EXCLUDE), $$($1_SRC_FILES))
--- a/make/hotspot/lib/CompileJvm.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/hotspot/lib/CompileJvm.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -167,7 +167,7 @@ JVM_STRIPFLAGS ?= $(STRIPFLAGS) # This source set is reused so save in cache. -$(eval $(call FillCacheFind, $(JVM_SRC_DIRS))) +$(call FillFindCache, $(JVM_SRC_DIRS)) ################################################################################ # Now set up the actual compilation of the main hotspot native library
--- a/make/hotspot/lib/JvmFeatures.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/hotspot/lib/JvmFeatures.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -172,8 +172,6 @@ ifneq ($(call check-jvm-feature, shenandoahgc), true) JVM_CFLAGS_FEATURES += -DINCLUDE_SHENANDOAHGC=0 JVM_EXCLUDE_PATTERNS += gc/shenandoah -else - JVM_CFLAGS_FEATURES += -DSUPPORT_BARRIER_ON_PRIMITIVES -DSUPPORT_NOT_TO_SPACE_INVARIANT endif ifneq ($(call check-jvm-feature, jfr), true)
--- a/make/hotspot/lib/JvmOverrideFiles.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/hotspot/lib/JvmOverrideFiles.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -70,7 +70,7 @@ # significantly reduce the GC pause time on 32 bit Linux/Unix platforms by # compiling without the PIC flag (-fPIC on linux). # See 6454213 for more details. - ALL_SRC := $(filter %.cpp, $(call CacheFind, $(TOPDIR)/src/hotspot/share)) + ALL_SRC := $(call FindFiles, $(TOPDIR)/src/hotspot/share, *.cpp) NONPIC_FILTER := $(addsuffix %, $(addprefix $(TOPDIR)/src/hotspot/share/, \ memory oops gc)) # Due to what looks like a bug in the old build implementation of this, add a
--- a/make/lib/Lib-java.base.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/lib/Lib-java.base.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ $(eval $(call IncludeCustomExtension, lib/Lib-java.base.gmk)) # Prepare the find cache. -$(eval $(call FillCacheFind, $(wildcard $(TOPDIR)/src/java.base/*/native))) +$(call FillFindCache, $(wildcard $(TOPDIR)/src/java.base/*/native)) ################################################################################ # Create all the core libraries
--- a/make/lib/Lib-java.desktop.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/lib/Lib-java.desktop.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ # -# Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. # # This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ $(eval $(call IncludeCustomExtension, lib/Lib-java.desktop.gmk)) # Prepare the find cache. -$(eval $(call FillCacheFind, $(wildcard $(TOPDIR)/src/java.desktop/*/native))) +$(call FillFindCache, $(wildcard $(TOPDIR)/src/java.desktop/*/native)) ################################################################################ # Create the AWT/2D libraries
--- a/make/rmic/Rmic-java.management.rmi.gmk Thu Mar 28 22:08:15 2019 +0100 +++ b/make/rmic/Rmic-java.management.rmi.gmk Thu Apr 04 22:07:49 2019 +0200 @@ -40,23 +40,14 @@ $(eval $(call SetupRMICompilation,RMI_GEN, \ CLASSES := $(JMX_RMI_CLASSES), \ CLASSES_DIR := $(CLASSES_DIR)/java.management.rmi, \ - STUB_CLASSES_DIR := $(RMIC_GENSRC_DIR)/java.management.rmi, \ + STUB_CLASSES_DIR := $(STUB_CLASSES_DIR)/java.management.rmi, \ RUN_V12 := true, \ KEEP_GENERATED := true, \ + STUB_SOURCES_DIR := $(RMIC_GENSRC_DIR)/java.management.rmi, \ )) -# Find all classes generated and move them from the gensrc dir to the stub classes dir -$(RMIC_GENSRC_DIR)/_classes.moved: $(RMI_GEN) - $(eval classfiles := $(shell $(FIND) $(RMIC_GENSRC_DIR) -name "*.class")) - $(foreach src, $(classfiles), \ - $(eval target := $(patsubst $(RMIC_GENSRC_DIR)/%, \ - $(STUB_CLASSES_DIR)/%, $(src))) \ - $(call MakeDir, $(dir $(target))) \ - $(MV) $(src) $(target) $(NEWLINE)) - $(TOUCH) $@ - ########################################################################################## -all: $(RMIC_GENSRC_DIR)/_classes.moved $(RMI_GEN) +all: $(RMI_GEN) .PHONY: all
--- a/src/hotspot/cpu/aarch64/aarch64.ad Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/cpu/aarch64/aarch64.ad Thu Apr 04 22:07:49 2019 +0200 @@ -3445,7 +3445,7 @@ // markOop of object (disp_hdr) with the stack pointer. __ mov(rscratch1, sp); __ sub(disp_hdr, disp_hdr, rscratch1); - __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place)); + __ mov(tmp, (address) (~(os::vm_page_size()-1) | (uintptr_t)markOopDesc::lock_mask_in_place)); // If condition is true we are cont and hence we can store 0 as the // displaced header in the box, which indicates that it is a recursive lock. __ ands(tmp/*==0?*/, disp_hdr, tmp); // Sets flags for result
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -40,13 +40,24 @@ #define __ masm-> -address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL; +address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL; void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, Register addr, Register count, RegSet saved_regs) { if (is_oop) { bool dest_uninitialized = (decorators & IS_DEST_UNINITIALIZED) != 0; - if (!dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) { + if (ShenandoahSATBBarrier && !dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) { + + Label done; + + // Avoid calling runtime if count == 0 + __ cbz(count, done); + + // Is marking active? + Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ ldrb(rscratch1, gc_state); + __ tbz(rscratch1, ShenandoahHeap::MARKING_BITPOS, done); + __ push(saved_regs, sp); if (count == c_rarg0) { if (addr == c_rarg1) { @@ -68,6 +79,7 @@ __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_pre_oop_entry), 2); } __ pop(saved_regs, sp); + __ bind(done); } } } @@ -75,6 +87,16 @@ void ShenandoahBarrierSetAssembler::arraycopy_epilogue(MacroAssembler* masm, DecoratorSet decorators, bool is_oop, Register start, Register count, Register scratch, RegSet saved_regs) { if (is_oop) { + Label done; + + // Avoid calling runtime if count == 0 + __ cbz(count, done); + + // Is updating references? + Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ ldrb(rscratch1, gc_state); + __ tbz(rscratch1, ShenandoahHeap::UPDATEREFS_BITPOS, done); + __ push(saved_regs, sp); assert_different_registers(start, count, scratch); assert_different_registers(c_rarg0, count); @@ -82,6 +104,8 @@ __ mov(c_rarg1, count); __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_ref_array_post_entry), 2); __ pop(saved_regs, sp); + + __ bind(done); } } @@ -186,60 +210,31 @@ __ bind(done); } -void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) { - if (ShenandoahReadBarrier) { - read_barrier_impl(masm, dst); - } -} - -void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) { - assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled"); +void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst) { + assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled"); Label is_null; __ cbz(dst, is_null); - read_barrier_not_null_impl(masm, dst); + resolve_forward_pointer_not_null(masm, dst); __ bind(is_null); } -void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) { - if (ShenandoahReadBarrier) { - read_barrier_not_null_impl(masm, dst); - } -} - - -void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) { - assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled"); +// IMPORTANT: This must preserve all registers, even rscratch1 and rscratch2. +void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst) { + assert(ShenandoahLoadRefBarrier || ShenandoahCASBarrier, "Should be enabled"); __ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset())); } -void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) { - if (ShenandoahWriteBarrier) { - write_barrier_impl(masm, dst); - } -} - -void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled"); - assert(dst != rscratch1, "need rscratch1"); +void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp) { + assert(ShenandoahLoadRefBarrier, "Should be enabled"); assert(dst != rscratch2, "need rscratch2"); Label done; - + __ enter(); Address gc_state(rthread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); - __ ldrb(rscratch1, gc_state); + __ ldrb(rscratch2, gc_state); // Check for heap stability - __ mov(rscratch2, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); - __ tst(rscratch1, rscratch2); - __ br(Assembler::EQ, done); - - // Heap is unstable, need to perform the read-barrier even if WB is inactive - __ ldr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset())); - - // Check for evacuation-in-progress and jump to WB slow-path if needed - __ mov(rscratch2, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); - __ tst(rscratch1, rscratch2); - __ br(Assembler::EQ, done); + __ tbz(rscratch2, ShenandoahHeap::HAS_FORWARDED_BITPOS, done); RegSet to_save = RegSet::of(r0); if (dst != r0) { @@ -247,7 +242,7 @@ __ mov(r0, dst); } - __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb()))); + __ far_call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb()))); if (dst != r0) { __ mov(dst, r0); @@ -255,14 +250,11 @@ } __ bind(done); + __ leave(); } void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { if (ShenandoahStoreValEnqueueBarrier) { - Label is_null; - __ cbz(dst, is_null); - write_barrier_impl(masm, dst); - __ bind(is_null); // Save possibly live regs. RegSet live_regs = RegSet::range(r0, r4) - dst; __ push(live_regs, sp); @@ -274,44 +266,45 @@ __ ldrd(v0, __ post(sp, 2 * wordSize)); __ pop(live_regs, sp); } - if (ShenandoahStoreValReadBarrier) { - read_barrier_impl(masm, dst); +} + +void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp) { + if (ShenandoahLoadRefBarrier) { + Label is_null; + __ cbz(dst, is_null); + load_reference_barrier_not_null(masm, dst, tmp); + __ bind(is_null); } } void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread) { bool on_oop = type == T_OBJECT || type == T_ARRAY; - bool in_heap = (decorators & IN_HEAP) != 0; bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; bool on_reference = on_weak || on_phantom; - if (in_heap) { - read_barrier_not_null(masm, src.base()); - } + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + if (on_oop) { + load_reference_barrier(masm, dst, tmp1); - BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); - if (ShenandoahKeepAliveBarrier && on_oop && on_reference) { - __ enter(); - satb_write_barrier_pre(masm /* masm */, - noreg /* obj */, - dst /* pre_val */, - rthread /* thread */, - tmp1 /* tmp */, - true /* tosca_live */, - true /* expand_call */); - __ leave(); + if (ShenandoahKeepAliveBarrier && on_reference) { + __ enter(); + satb_write_barrier_pre(masm /* masm */, + noreg /* obj */, + dst /* pre_val */, + rthread /* thread */, + tmp1 /* tmp */, + true /* tosca_live */, + true /* expand_call */); + __ leave(); + } } } void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2) { bool on_oop = type == T_OBJECT || type == T_ARRAY; - bool in_heap = (decorators & IN_HEAP) != 0; - if (in_heap) { - write_barrier(masm, dst.base()); - } if (!on_oop) { BarrierSetAssembler::store_at(masm, decorators, type, dst, val, tmp1, tmp2); return; @@ -349,21 +342,6 @@ } -void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) { - __ cmp(op1, op2); - if (ShenandoahAcmpBarrier) { - Label done; - __ br(Assembler::EQ, done); - // The object may have been evacuated, but we won't see it without a - // membar here. - __ membar(Assembler::LoadStore| Assembler::LoadLoad); - read_barrier(masm, op1); - read_barrier(masm, op2); - __ cmp(op1, op2); - __ bind(done); - } -} - void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register obj, Register var_size_in_bytes, int con_size_in_bytes, @@ -398,27 +376,6 @@ } } -void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) { - bool oop_not_null = (decorators & IS_NOT_NULL) != 0; - bool is_write = (decorators & ACCESS_WRITE) != 0; - if (is_write) { - if (oop_not_null) { - write_barrier(masm, obj); - } else { - Label done; - __ cbz(obj, done); - write_barrier(masm, obj); - __ bind(done); - } - } else { - if (oop_not_null) { - read_barrier_not_null(masm, obj); - } else { - read_barrier(masm, obj); - } - } -} - void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val, bool acquire, bool release, bool weak, bool is_cae, Register result) { @@ -457,8 +414,8 @@ __ decode_heap_oop(tmp1, tmp1); __ decode_heap_oop(tmp2, tmp2); } - read_barrier_impl(masm, tmp1); - read_barrier_impl(masm, tmp2); + resolve_forward_pointer(masm, tmp1); + resolve_forward_pointer(masm, tmp2); __ cmp(tmp1, tmp2); // Retry with expected now being the value we just loaded from addr. __ br(Assembler::EQ, retry); @@ -503,7 +460,7 @@ __ b(*stub->continuation()); } -void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) { +void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { Register obj = stub->obj()->as_register(); Register res = stub->result()->as_register(); @@ -520,7 +477,7 @@ __ cbz(res, done); } - write_barrier(ce->masm(), res); + load_reference_barrier_not_null(ce->masm(), res, rscratch1); __ bind(done); __ b(*stub->continuation()); @@ -580,14 +537,14 @@ #endif // COMPILER1 -address ShenandoahBarrierSetAssembler::shenandoah_wb() { - assert(_shenandoah_wb != NULL, "need write barrier stub"); - return _shenandoah_wb; +address ShenandoahBarrierSetAssembler::shenandoah_lrb() { + assert(_shenandoah_lrb != NULL, "need load reference barrier stub"); + return _shenandoah_lrb; } #define __ cgen->assembler()-> -// Shenandoah write barrier. +// Shenandoah load reference barrier. // // Input: // r0: OOP to evacuate. Not null. @@ -596,13 +553,13 @@ // r0: Pointer to evacuated OOP. // // Trash rscratch1, rscratch2. Preserve everything else. -address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) { +address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) { __ align(6); - StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb"); + StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb"); address start = __ pc(); - Label work; + Label work, done; __ mov(rscratch2, ShenandoahHeap::in_cset_fast_test_addr()); __ lsr(rscratch1, r0, ShenandoahHeapRegion::region_size_bytes_shift_jint()); __ ldrb(rscratch2, Address(rscratch2, rscratch1)); @@ -610,19 +567,23 @@ __ ret(lr); __ bind(work); - Register obj = r0; + __ mov(rscratch2, r0); + resolve_forward_pointer_not_null(cgen->assembler(), r0); + __ cmp(rscratch2, r0); + __ br(Assembler::NE, done); __ enter(); // required for proper stackwalking of RuntimeStub frame __ push_call_clobbered_registers(); - __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT)); + __ mov(lr, CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT)); __ blrt(lr, 1, 0, MacroAssembler::ret_type_integral); - __ mov(rscratch1, obj); + __ mov(rscratch1, r0); __ pop_call_clobbered_registers(); - __ mov(obj, rscratch1); + __ mov(r0, rscratch1); __ leave(); // required for proper stackwalking of RuntimeStub frame + __ bind(done); __ ret(lr); return start; @@ -631,12 +592,12 @@ #undef __ void ShenandoahBarrierSetAssembler::barrier_stubs_init() { - if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) { + if (ShenandoahLoadRefBarrier) { int stub_code_size = 2048; ResourceMark rm; BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size); CodeBuffer buf(bb); StubCodeGenerator cgen(&buf); - _shenandoah_wb = generate_shenandoah_wb(&cgen); + _shenandoah_lrb = generate_shenandoah_lrb(&cgen); } }
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -29,7 +29,7 @@ #ifdef COMPILER1 class LIR_Assembler; class ShenandoahPreBarrierStub; -class ShenandoahWriteBarrierStub; +class ShenandoahLoadReferenceBarrierStub; class StubAssembler; class StubCodeGenerator; #endif @@ -37,7 +37,7 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { private: - static address _shenandoah_wb; + static address _shenandoah_lrb; void satb_write_barrier_pre(MacroAssembler* masm, Register obj, @@ -54,24 +54,21 @@ bool tosca_live, bool expand_call); - void read_barrier(MacroAssembler* masm, Register dst); - void read_barrier_impl(MacroAssembler* masm, Register dst); - void read_barrier_not_null(MacroAssembler* masm, Register dst); - void read_barrier_not_null_impl(MacroAssembler* masm, Register dst); - void write_barrier(MacroAssembler* masm, Register dst); - void write_barrier_impl(MacroAssembler* masm, Register dst); - void asm_acmp_barrier(MacroAssembler* masm, Register op1, Register op2); + void resolve_forward_pointer(MacroAssembler* masm, Register dst); + void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst); + void load_reference_barrier(MacroAssembler* masm, Register dst, Register tmp); + void load_reference_barrier_not_null(MacroAssembler* masm, Register dst, Register tmp); - address generate_shenandoah_wb(StubCodeGenerator* cgen); + address generate_shenandoah_lrb(StubCodeGenerator* cgen); public: - static address shenandoah_wb(); + static address shenandoah_lrb(); void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp); #ifdef COMPILER1 void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub); - void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub); + void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm); #endif @@ -83,8 +80,6 @@ Register dst, Address src, Register tmp1, Register tmp_thread); virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2); - virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2); - virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj); virtual void tlab_allocate(MacroAssembler* masm, Register obj, Register var_size_in_bytes, int con_size_in_bytes,
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetC1_aarch64.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetC1_aarch64.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -99,6 +99,7 @@ __ xchg(access.resolved_addr(), value_opr, result, tmp); if (access.is_oop()) { + result = load_reference_barrier(access.gen(), result, access.access_emit_info(), true); if (ShenandoahSATBBarrier) { pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr, result /* pre_val */);
--- a/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/cpu/aarch64/gc/shenandoah/shenandoah_aarch64.ad Thu Apr 04 22:07:49 2019 +0200 @@ -45,18 +45,6 @@ %} %} -instruct shenandoahRB(iRegPNoSp dst, iRegP src, rFlagsReg cr) %{ - match(Set dst (ShenandoahReadBarrier src)); - format %{ "shenandoah_rb $dst,$src" %} - ins_encode %{ - Register s = $src$$Register; - Register d = $dst$$Register; - __ ldr(d, Address(s, ShenandoahBrooksPointer::byte_offset())); - %} - ins_pipe(pipe_class_memory); -%} - - instruct compareAndSwapP_shenandoah(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, iRegPNoSp tmp, rFlagsReg cr) %{ match(Set res (ShenandoahCompareAndSwapP mem (Binary oldval newval)));
--- a/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/cpu/ppc/gc/shared/barrierSetAssembler_ppc.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -27,6 +27,7 @@ #include "asm/macroAssembler.inline.hpp" #include "gc/shared/barrierSetAssembler.hpp" #include "interpreter/interp_masm.hpp" +#include "runtime/jniHandles.hpp" #define __ masm->
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -41,7 +41,7 @@ #define __ masm-> -address ShenandoahBarrierSetAssembler::_shenandoah_wb = NULL; +address ShenandoahBarrierSetAssembler::_shenandoah_lrb = NULL; void ShenandoahBarrierSetAssembler::arraycopy_prologue(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register src, Register dst, Register count) { @@ -66,26 +66,22 @@ } #endif - if (!dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) { + if (ShenandoahSATBBarrier && !dest_uninitialized && !ShenandoahHeap::heap()->heuristics()->can_do_traversal_gc()) { Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); #ifndef _LP64 __ push(thread); __ get_thread(thread); #endif - Label filtered; - Address in_progress(thread, in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset())); - // Is marking active? - if (in_bytes(SATBMarkQueue::byte_width_of_active()) == 4) { - __ cmpl(in_progress, 0); - } else { - assert(in_bytes(SATBMarkQueue::byte_width_of_active()) == 1, "Assumption"); - __ cmpb(in_progress, 0); - } + Label done; + // Short-circuit if count == 0. + __ testptr(count, count); + __ jcc(Assembler::zero, done); - NOT_LP64(__ pop(thread);) - - __ jcc(Assembler::equal, filtered); + // Avoid runtime call when not marking. + Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ testb(gc_state, ShenandoahHeap::MARKING); + __ jcc(Assembler::zero, done); __ pusha(); // push registers #ifdef _LP64 @@ -111,7 +107,8 @@ dst, count); #endif __ popa(); - __ bind(filtered); + __ bind(done); + NOT_LP64(__ pop(thread);) } } @@ -141,6 +138,22 @@ } #endif + Register thread = NOT_LP64(rax) LP64_ONLY(r15_thread); +#ifndef _LP64 + __ push(thread); + __ get_thread(thread); +#endif + + // Short-circuit if count == 0. + Label done; + __ testptr(count, count); + __ jcc(Assembler::zero, done); + + // Skip runtime call if no forwarded objects. + Address gc_state(thread, in_bytes(ShenandoahThreadLocalData::gc_state_offset())); + __ testb(gc_state, ShenandoahHeap::UPDATEREFS); + __ jcc(Assembler::zero, done); + __ pusha(); // push registers (overkill) #ifdef _LP64 if (c_rarg0 == count) { // On win64 c_rarg0 == rcx @@ -158,6 +171,9 @@ dst, count); #endif __ popa(); + + __ bind(done); + NOT_LP64(__ pop(thread);) } } @@ -296,41 +312,23 @@ __ bind(done); } -void ShenandoahBarrierSetAssembler::read_barrier(MacroAssembler* masm, Register dst) { - if (ShenandoahReadBarrier) { - read_barrier_impl(masm, dst); - } -} - -void ShenandoahBarrierSetAssembler::read_barrier_impl(MacroAssembler* masm, Register dst) { - assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled"); +void ShenandoahBarrierSetAssembler::resolve_forward_pointer(MacroAssembler* masm, Register dst) { + assert(ShenandoahCASBarrier, "should be enabled"); Label is_null; __ testptr(dst, dst); __ jcc(Assembler::zero, is_null); - read_barrier_not_null_impl(masm, dst); + resolve_forward_pointer_not_null(masm, dst); __ bind(is_null); } -void ShenandoahBarrierSetAssembler::read_barrier_not_null(MacroAssembler* masm, Register dst) { - if (ShenandoahReadBarrier) { - read_barrier_not_null_impl(masm, dst); - } -} - -void ShenandoahBarrierSetAssembler::read_barrier_not_null_impl(MacroAssembler* masm, Register dst) { - assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier || ShenandoahCASBarrier), "should be enabled"); +void ShenandoahBarrierSetAssembler::resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst) { + assert(ShenandoahCASBarrier || ShenandoahLoadRefBarrier, "should be enabled"); __ movptr(dst, Address(dst, ShenandoahBrooksPointer::byte_offset())); } -void ShenandoahBarrierSetAssembler::write_barrier(MacroAssembler* masm, Register dst) { - if (ShenandoahWriteBarrier) { - write_barrier_impl(masm, dst); - } -} - -void ShenandoahBarrierSetAssembler::write_barrier_impl(MacroAssembler* masm, Register dst) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled"); +void ShenandoahBarrierSetAssembler::load_reference_barrier_not_null(MacroAssembler* masm, Register dst) { + assert(ShenandoahLoadRefBarrier, "Should be enabled"); #ifdef _LP64 Label done; @@ -338,8 +336,8 @@ __ testb(gc_state, ShenandoahHeap::HAS_FORWARDED | ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); __ jccb(Assembler::zero, done); - // Heap is unstable, need to perform the read-barrier even if WB is inactive - read_barrier_not_null(masm, dst); + // Heap is unstable, need to perform the resolve even if LRB is inactive + resolve_forward_pointer_not_null(masm, dst); __ testb(gc_state, ShenandoahHeap::EVACUATION | ShenandoahHeap::TRAVERSAL); __ jccb(Assembler::zero, done); @@ -348,7 +346,7 @@ __ xchgptr(dst, rax); // Move obj into rax and save rax into obj. } - __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_wb()))); + __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, ShenandoahBarrierSetAssembler::shenandoah_lrb()))); if (dst != rax) { __ xchgptr(rax, dst); // Swap back obj with rax. @@ -361,24 +359,18 @@ } void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) { - if (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier) { + if (ShenandoahStoreValEnqueueBarrier) { storeval_barrier_impl(masm, dst, tmp); } } void ShenandoahBarrierSetAssembler::storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp) { - assert(UseShenandoahGC && (ShenandoahStoreValReadBarrier || ShenandoahStoreValEnqueueBarrier), "should be enabled"); + assert(ShenandoahStoreValEnqueueBarrier, "should be enabled"); if (dst == noreg) return; #ifdef _LP64 if (ShenandoahStoreValEnqueueBarrier) { - Label is_null; - __ testptr(dst, dst); - __ jcc(Assembler::zero, is_null); - write_barrier_impl(masm, dst); - __ bind(is_null); - // The set of registers to be saved+restored is the same as in the write-barrier above. // Those are the commonly used registers in the interpreter. __ pusha(); @@ -392,50 +384,54 @@ //__ pop_callee_saved_registers(); __ popa(); } - if (ShenandoahStoreValReadBarrier) { - read_barrier_impl(masm, dst); - } #else Unimplemented(); #endif } +void ShenandoahBarrierSetAssembler::load_reference_barrier(MacroAssembler* masm, Register dst) { + if (ShenandoahLoadRefBarrier) { + Label done; + __ testptr(dst, dst); + __ jcc(Assembler::zero, done); + load_reference_barrier_not_null(masm, dst); + __ bind(done); + } +} + void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Register dst, Address src, Register tmp1, Register tmp_thread) { bool on_oop = type == T_OBJECT || type == T_ARRAY; - bool in_heap = (decorators & IN_HEAP) != 0; bool on_weak = (decorators & ON_WEAK_OOP_REF) != 0; bool on_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; bool on_reference = on_weak || on_phantom; - if (in_heap) { - read_barrier_not_null(masm, src.base()); - } - BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); - if (ShenandoahKeepAliveBarrier && on_oop && on_reference) { - const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread); - NOT_LP64(__ get_thread(thread)); + BarrierSetAssembler::load_at(masm, decorators, type, dst, src, tmp1, tmp_thread); + if (on_oop) { + load_reference_barrier(masm, dst); - // Generate the SATB pre-barrier code to log the value of - // the referent field in an SATB buffer. - shenandoah_write_barrier_pre(masm /* masm */, - noreg /* obj */, - dst /* pre_val */, - thread /* thread */, - tmp1 /* tmp */, - true /* tosca_live */, - true /* expand_call */); + if (ShenandoahKeepAliveBarrier && on_reference) { + const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread); + NOT_LP64(__ get_thread(thread)); + // Generate the SATB pre-barrier code to log the value of + // the referent field in an SATB buffer. + shenandoah_write_barrier_pre(masm /* masm */, + noreg /* obj */, + dst /* pre_val */, + thread /* thread */, + tmp1 /* tmp */, + true /* tosca_live */, + true /* expand_call */); + } } } void ShenandoahBarrierSetAssembler::store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2) { + bool on_oop = type == T_OBJECT || type == T_ARRAY; bool in_heap = (decorators & IN_HEAP) != 0; bool as_normal = (decorators & AS_NORMAL) != 0; - if (in_heap) { - write_barrier(masm, dst.base()); - } - if (type == T_OBJECT || type == T_ARRAY) { + if (on_oop && in_heap) { bool needs_pre_barrier = as_normal; Register tmp3 = LP64_ONLY(r8) NOT_LP64(rsi); @@ -478,44 +474,6 @@ } } -#ifndef _LP64 -void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, - Address obj1, jobject obj2) { - Unimplemented(); -} - -void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, - Register obj1, jobject obj2) { - Unimplemented(); -} -#endif - - -void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register op1, Register op2) { - __ cmpptr(op1, op2); - if (ShenandoahAcmpBarrier) { - Label done; - __ jccb(Assembler::equal, done); - read_barrier(masm, op1); - read_barrier(masm, op2); - __ cmpptr(op1, op2); - __ bind(done); - } -} - -void ShenandoahBarrierSetAssembler::obj_equals(MacroAssembler* masm, Register src1, Address src2) { - __ cmpptr(src1, src2); - if (ShenandoahAcmpBarrier) { - Label done; - __ jccb(Assembler::equal, done); - __ movptr(rscratch2, src2); - read_barrier(masm, src1); - read_barrier(masm, rscratch2); - __ cmpptr(src1, rscratch2); - __ bind(done); - } -} - void ShenandoahBarrierSetAssembler::tlab_allocate(MacroAssembler* masm, Register thread, Register obj, Register var_size_in_bytes, @@ -565,28 +523,6 @@ __ verify_tlab(); } -void ShenandoahBarrierSetAssembler::resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj) { - bool oop_not_null = (decorators & IS_NOT_NULL) != 0; - bool is_write = (decorators & ACCESS_WRITE) != 0; - if (is_write) { - if (oop_not_null) { - write_barrier(masm, obj); - } else { - Label done; - __ testptr(obj, obj); - __ jcc(Assembler::zero, done); - write_barrier(masm, obj); - __ bind(done); - } - } else { - if (oop_not_null) { - read_barrier_not_null(masm, obj); - } else { - read_barrier(masm, obj); - } - } -} - // Special Shenandoah CAS implementation that handles false negatives // due to concurrent evacuation. #ifndef _LP64 @@ -625,14 +561,14 @@ // Step 2. CAS had failed. This may be a false negative. // // The trouble comes when we compare the to-space pointer with the from-space - // pointer to the same object. To resolve this, it will suffice to read both - // oldval and the value from memory through the read barriers -- this will give - // both to-space pointers. If they mismatch, then it was a legitimate failure. + // pointer to the same object. To resolve this, it will suffice to resolve both + // oldval and the value from memory -- this will give both to-space pointers. + // If they mismatch, then it was a legitimate failure. // if (UseCompressedOops) { __ decode_heap_oop(tmp1); } - read_barrier_impl(masm, tmp1); + resolve_forward_pointer(masm, tmp1); if (UseCompressedOops) { __ movl(tmp2, oldval); @@ -640,7 +576,7 @@ } else { __ movptr(tmp2, oldval); } - read_barrier_impl(masm, tmp2); + resolve_forward_pointer(masm, tmp2); __ cmpptr(tmp1, tmp2); __ jcc(Assembler::notEqual, done, true); @@ -649,8 +585,8 @@ // // Corner case: it may happen that somebody stored the from-space pointer // to memory while we were preparing for retry. Therefore, we can fail again - // on retry, and so need to do this in loop, always re-reading the failure - // witness through the read barrier. + // on retry, and so need to do this in loop, always resolving the failure + // witness. __ bind(retry); if (os::is_MP()) __ lock(); if (UseCompressedOops) { @@ -666,7 +602,7 @@ } else { __ movptr(tmp2, oldval); } - read_barrier_impl(masm, tmp2); + resolve_forward_pointer(masm, tmp2); __ cmpptr(tmp1, tmp2); __ jcc(Assembler::equal, retry, true); @@ -814,7 +750,7 @@ } -void ShenandoahBarrierSetAssembler::gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub) { +void ShenandoahBarrierSetAssembler::gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub) { __ bind(*stub->entry()); Label done; @@ -831,7 +767,7 @@ __ jcc(Assembler::zero, done); } - write_barrier(ce->masm(), res); + load_reference_barrier_not_null(ce->masm(), res); __ bind(done); __ jmp(*stub->continuation()); @@ -901,16 +837,16 @@ #endif // COMPILER1 -address ShenandoahBarrierSetAssembler::shenandoah_wb() { - assert(_shenandoah_wb != NULL, "need write barrier stub"); - return _shenandoah_wb; +address ShenandoahBarrierSetAssembler::shenandoah_lrb() { + assert(_shenandoah_lrb != NULL, "need load reference barrier stub"); + return _shenandoah_lrb; } #define __ cgen->assembler()-> -address ShenandoahBarrierSetAssembler::generate_shenandoah_wb(StubCodeGenerator* cgen) { +address ShenandoahBarrierSetAssembler::generate_shenandoah_lrb(StubCodeGenerator* cgen) { __ align(CodeEntryAlignment); - StubCodeMark mark(cgen, "StubRoutines", "shenandoah_wb"); + StubCodeMark mark(cgen, "StubRoutines", "shenandoah_lrb"); address start = __ pc(); #ifdef _LP64 @@ -958,7 +894,7 @@ __ push(r15); save_vector_registers(cgen->assembler()); __ movptr(rdi, rax); - __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT), rdi); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), rdi); restore_vector_registers(cgen->assembler()); __ pop(r15); __ pop(r14); @@ -985,12 +921,12 @@ #undef __ void ShenandoahBarrierSetAssembler::barrier_stubs_init() { - if (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier) { + if (ShenandoahLoadRefBarrier) { int stub_code_size = 4096; ResourceMark rm; BufferBlob* bb = BufferBlob::create("shenandoah_barrier_stubs", stub_code_size); CodeBuffer buf(bb); StubCodeGenerator cgen(&buf); - _shenandoah_wb = generate_shenandoah_wb(&cgen); + _shenandoah_lrb = generate_shenandoah_lrb(&cgen); } }
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetAssembler_x86.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -29,7 +29,7 @@ #ifdef COMPILER1 class LIR_Assembler; class ShenandoahPreBarrierStub; -class ShenandoahWriteBarrierStub; +class ShenandoahLoadReferenceBarrierStub; class StubAssembler; class StubCodeGenerator; #endif @@ -37,7 +37,7 @@ class ShenandoahBarrierSetAssembler: public BarrierSetAssembler { private: - static address _shenandoah_wb; + static address _shenandoah_lrb; void satb_write_barrier_pre(MacroAssembler* masm, Register obj, @@ -55,32 +55,30 @@ bool tosca_live, bool expand_call); - void read_barrier(MacroAssembler* masm, Register dst); - void read_barrier_impl(MacroAssembler* masm, Register dst); + void resolve_forward_pointer(MacroAssembler* masm, Register dst); + void resolve_forward_pointer_not_null(MacroAssembler* masm, Register dst); - void read_barrier_not_null(MacroAssembler* masm, Register dst); - void read_barrier_not_null_impl(MacroAssembler* masm, Register dst); - - void write_barrier(MacroAssembler* masm, Register dst); - void write_barrier_impl(MacroAssembler* masm, Register dst); + void load_reference_barrier_not_null(MacroAssembler* masm, Register dst); void storeval_barrier_impl(MacroAssembler* masm, Register dst, Register tmp); - address generate_shenandoah_wb(StubCodeGenerator* cgen); + address generate_shenandoah_lrb(StubCodeGenerator* cgen); void save_vector_registers(MacroAssembler* masm); void restore_vector_registers(MacroAssembler* masm); public: - static address shenandoah_wb(); + static address shenandoah_lrb(); void storeval_barrier(MacroAssembler* masm, Register dst, Register tmp); #ifdef COMPILER1 void gen_pre_barrier_stub(LIR_Assembler* ce, ShenandoahPreBarrierStub* stub); - void gen_write_barrier_stub(LIR_Assembler* ce, ShenandoahWriteBarrierStub* stub); + void gen_load_reference_barrier_stub(LIR_Assembler* ce, ShenandoahLoadReferenceBarrierStub* stub); void generate_c1_pre_barrier_runtime_stub(StubAssembler* sasm); #endif + void load_reference_barrier(MacroAssembler* masm, Register dst); + void cmpxchg_oop(MacroAssembler* masm, Register res, Address addr, Register oldval, Register newval, bool exchange, Register tmp1, Register tmp2); @@ -93,16 +91,6 @@ virtual void store_at(MacroAssembler* masm, DecoratorSet decorators, BasicType type, Address dst, Register val, Register tmp1, Register tmp2); -#ifndef _LP64 - virtual void obj_equals(MacroAssembler* masm, - Address obj1, jobject obj2); - virtual void obj_equals(MacroAssembler* masm, - Register obj1, jobject obj2); -#endif - - virtual void obj_equals(MacroAssembler* masm, Register src1, Register src2); - virtual void obj_equals(MacroAssembler* masm, Register src1, Address src2); - virtual void tlab_allocate(MacroAssembler* masm, Register thread, Register obj, Register var_size_in_bytes, @@ -110,8 +98,6 @@ Register t1, Register t2, Label& slow_case); - virtual void resolve(MacroAssembler* masm, DecoratorSet decorators, Register obj); - virtual void barrier_stubs_init(); };
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetC1_x86.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoahBarrierSetC1_x86.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -107,6 +107,7 @@ __ xchg(access.resolved_addr(), result, result, LIR_OprFact::illegalOpr); if (access.is_oop()) { + result = load_reference_barrier(access.gen(), result, access.access_emit_info(), true); if (ShenandoahSATBBarrier) { pre_barrier(access.gen(), access.access_emit_info(), access.decorators(), LIR_OprFact::illegalOpr, result /* pre_val */);
--- a/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/cpu/x86/gc/shenandoah/shenandoah_x86_64.ad Thu Apr 04 22:07:49 2019 +0200 @@ -23,47 +23,7 @@ source_hpp %{ #include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp" -%} - -instruct shenandoahRB(rRegP dst, rRegP src, rFlagsReg cr) %{ - match(Set dst (ShenandoahReadBarrier src)); - effect(DEF dst, USE src); - ins_cost(125); // XXX - format %{ "shenandoah_rb $dst, $src" %} - ins_encode %{ - Register d = $dst$$Register; - Register s = $src$$Register; - __ movptr(d, Address(s, ShenandoahBrooksPointer::byte_offset())); - %} - ins_pipe(ialu_reg_mem); -%} - -instruct shenandoahRBNarrow(rRegP dst, rRegN src) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_shift() == 0)); - match(Set dst (ShenandoahReadBarrier (DecodeN src))); - effect(DEF dst, USE src); - ins_cost(125); // XXX - format %{ "shenandoah_rb $dst, $src" %} - ins_encode %{ - Register d = $dst$$Register; - Register s = $src$$Register; - __ movptr(d, Address(r12, s, Address::times_1, ShenandoahBrooksPointer::byte_offset())); - %} - ins_pipe(ialu_reg_mem); -%} - -instruct shenandoahRBNarrowShift(rRegP dst, rRegN src) %{ - predicate(UseCompressedOops && (Universe::narrow_oop_shift() == Address::times_8)); - match(Set dst (ShenandoahReadBarrier (DecodeN src))); - effect(DEF dst, USE src); - ins_cost(125); // XXX - format %{ "shenandoah_rb $dst, $src" %} - ins_encode %{ - Register d = $dst$$Register; - Register s = $src$$Register; - __ movptr(d, Address(r12, s, Address::times_8, ShenandoahBrooksPointer::byte_offset())); - %} - ins_pipe(ialu_reg_mem); +#include "gc/shenandoah/c2/shenandoahSupport.hpp" %} instruct compareAndSwapP_shenandoah(rRegI res,
--- a/src/hotspot/os/bsd/os_bsd.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/os/bsd/os_bsd.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -1598,6 +1598,8 @@ } void os::print_memory_info(outputStream* st) { + xsw_usage swap_usage; + size_t size = sizeof(swap_usage); st->print("Memory:"); st->print(" %dk page", os::vm_page_size()>>10); @@ -1606,6 +1608,16 @@ os::physical_memory() >> 10); st->print("(" UINT64_FORMAT "k free)", os::available_memory() >> 10); + + if((sysctlbyname("vm.swapusage", &swap_usage, &size, NULL, 0) == 0) || (errno == ENOMEM)) { + if (size >= offset_of(xsw_usage, xsu_used)) { + st->print(", swap " UINT64_FORMAT "k", + ((julong) swap_usage.xsu_total) >> 10); + st->print("(" UINT64_FORMAT "k free)", + ((julong) swap_usage.xsu_avail) >> 10); + } + } + st->cr(); }
--- a/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/os_cpu/windows_x86/os_windows_x86.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -465,6 +465,9 @@ } #ifndef AMD64 +// Ignore "C4172: returning address of local variable or temporary" on 32bit +PRAGMA_DIAG_PUSH +PRAGMA_DISABLE_MSVC_WARNING(4172) // Returns an estimate of the current stack pointer. Result must be guaranteed // to point into the calling threads stack, and be no lower than the current // stack pointer. @@ -473,6 +476,7 @@ address sp = (address)&dummy; return sp; } +PRAGMA_DIAG_POP #else // Returns the current stack pointer. Accurate value needed for // os::verify_stack_alignment().
--- a/src/hotspot/share/adlc/formssel.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/adlc/formssel.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -777,8 +777,7 @@ !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeP") || !strcmp(_matrule->_rChild->_opType,"CompareAndExchangeN") || !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeP") || - !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN") || - !strcmp(_matrule->_rChild->_opType,"ShenandoahReadBarrier"))) return true; + !strcmp(_matrule->_rChild->_opType,"ShenandoahCompareAndExchangeN"))) return true; else if ( is_ideal_load() == Form::idealP ) return true; else if ( is_ideal_store() != Form::none ) return true; @@ -3506,7 +3505,6 @@ "ClearArray", "GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP", "GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN", - "ShenandoahReadBarrier", "LoadBarrierSlowReg", "LoadBarrierWeakSlowReg" }; int cnt = sizeof(needs_ideal_memory_list)/sizeof(char*);
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/aot/aotCompiledMethod.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -206,8 +206,6 @@ // AOT compiled methods do not get into zombie state virtual bool can_convert_to_zombie() { return false; } - // Evol dependent methods already marked. - virtual bool is_evol_dependent() { return false; } virtual bool is_dependent_on_method(Method* dependee) { return true; } virtual void clear_inline_caches();
--- a/src/hotspot/share/ci/ciReplay.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/ci/ciReplay.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -40,6 +40,7 @@ #include "runtime/handles.inline.hpp" #include "utilities/copy.hpp" #include "utilities/macros.hpp" +#include "utilities/utf8.hpp" #ifndef PRODUCT
--- a/src/hotspot/share/classfile/classFileParser.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/classFileParser.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -32,6 +32,7 @@ #include "classfile/dictionary.hpp" #include "classfile/javaClasses.inline.hpp" #include "classfile/moduleEntry.hpp" +#include "classfile/packageEntry.hpp" #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/verificationType.hpp" @@ -77,6 +78,8 @@ #include "utilities/macros.hpp" #include "utilities/ostream.hpp" #include "utilities/resourceHash.hpp" +#include "utilities/utf8.hpp" + #if INCLUDE_CDS #include "classfile/systemDictionaryShared.hpp" #endif @@ -312,7 +315,7 @@ const char* const str = java_lang_String::as_utf8_string(patch()); // (could use java_lang_String::as_symbol instead, but might as well batch them) utf8_buffer = (const u1*) str; - utf8_length = (int) strlen(str); + utf8_length = (u2) strlen(str); } unsigned int hash;
--- a/src/hotspot/share/classfile/classLoader.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/classLoader.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -396,6 +396,10 @@ } } +ClassFileStream* ClassPathImageEntry::open_stream(const char* name, TRAPS) { + return open_stream_for_loader(name, ClassLoaderData::the_null_class_loader_data(), THREAD); +} + // For a class in a named module, look it up in the jimage file using this syntax: // /<module-name>/<package-name>/<base-class> // @@ -403,7 +407,7 @@ // 1. There are no unnamed modules in the jimage file. // 2. A package is in at most one module in the jimage file. // -ClassFileStream* ClassPathImageEntry::open_stream(const char* name, TRAPS) { +ClassFileStream* ClassPathImageEntry::open_stream_for_loader(const char* name, ClassLoaderData* loader_data, TRAPS) { jlong size; JImageLocationRef location = (*JImageFindResource)(_jimage, "", get_jimage_version_string(), name, &size); @@ -414,20 +418,8 @@ if (pkg_name != NULL) { if (!Universe::is_module_initialized()) { location = (*JImageFindResource)(_jimage, JAVA_BASE_NAME, get_jimage_version_string(), name, &size); -#if INCLUDE_CDS - // CDS uses the boot class loader to load classes whose packages are in - // modules defined for other class loaders. So, for now, get their module - // names from the "modules" jimage file. - if (DumpSharedSpaces && location == 0) { - const char* module_name = (*JImagePackageToModule)(_jimage, pkg_name); - if (module_name != NULL) { - location = (*JImageFindResource)(_jimage, module_name, get_jimage_version_string(), name, &size); - } - } -#endif - } else { - PackageEntry* package_entry = ClassLoader::get_package_entry(name, ClassLoaderData::the_null_class_loader_data(), CHECK_NULL); + PackageEntry* package_entry = ClassLoader::get_package_entry(name, loader_data, CHECK_NULL); if (package_entry != NULL) { ResourceMark rm; // Get the module name
--- a/src/hotspot/share/classfile/classLoader.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/classLoader.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -61,6 +61,10 @@ // Attempt to locate file_name through this class path entry. // Returns a class file parsing stream if successfull. virtual ClassFileStream* open_stream(const char* name, TRAPS) = 0; + // Open the stream for a specific class loader + virtual ClassFileStream* open_stream_for_loader(const char* name, ClassLoaderData* loader_data, TRAPS) { + return open_stream(name, THREAD); + } }; class ClassPathDirEntry: public ClassPathEntry { @@ -125,6 +129,7 @@ ClassPathImageEntry(JImageFile* jimage, const char* name); virtual ~ClassPathImageEntry(); ClassFileStream* open_stream(const char* name, TRAPS); + ClassFileStream* open_stream_for_loader(const char* name, ClassLoaderData* loader_data, TRAPS); }; // ModuleClassPathList contains a linked list of ClassPathEntry's
--- a/src/hotspot/share/classfile/classLoaderData.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/classLoaderData.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -288,7 +288,7 @@ // it is being defined, therefore _keep_alive is not volatile or atomic. void ClassLoaderData::inc_keep_alive() { if (is_unsafe_anonymous()) { - assert(_keep_alive >= 0, "Invalid keep alive increment count"); + assert(_keep_alive > 0, "Invalid keep alive increment count"); _keep_alive++; } }
--- a/src/hotspot/share/classfile/classLoaderDataGraph.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/classLoaderDataGraph.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -163,7 +163,7 @@ // TODO: have redefinition clean old methods out of the code cache. They still exist in some places. bool walk_all_metadata = InstanceKlass::has_previous_versions_and_reset(); - MetadataOnStackMark md_on_stack(walk_all_metadata); + MetadataOnStackMark md_on_stack(walk_all_metadata, /*redefinition_walk*/false); clean_deallocate_lists(walk_all_metadata); }
--- a/src/hotspot/share/classfile/javaClasses.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/javaClasses.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -65,6 +65,7 @@ #include "runtime/vframe.inline.hpp" #include "utilities/align.hpp" #include "utilities/preserveException.hpp" +#include "utilities/utf8.hpp" #if INCLUDE_JVMCI #include "jvmci/jvmciJavaClasses.hpp" #endif
--- a/src/hotspot/share/classfile/javaClasses.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/javaClasses.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -29,7 +29,6 @@ #include "jvmtifiles/jvmti.h" #include "oops/oop.hpp" #include "runtime/os.hpp" -#include "utilities/utf8.hpp" // Interface for manipulating the basic Java classes. //
--- a/src/hotspot/share/classfile/klassFactory.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/klassFactory.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -58,7 +58,7 @@ // Post the CFLH JvmtiCachedClassFileData* cached_class_file = NULL; if (cfs == NULL) { - cfs = FileMapInfo::open_stream_for_jvmti(ik, CHECK_NULL); + cfs = FileMapInfo::open_stream_for_jvmti(ik, class_loader, CHECK_NULL); } unsigned char* ptr = (unsigned char*)cfs->buffer(); unsigned char* end_ptr = ptr + cfs->length();
--- a/src/hotspot/share/classfile/metadataOnStackMark.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/metadataOnStackMark.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -50,18 +50,25 @@ // it. Class unloading only deletes in-error class files, methods created by // the relocator and dummy constant pools. None of these appear anywhere except // in metadata Handles. -MetadataOnStackMark::MetadataOnStackMark(bool redefinition_walk) { +MetadataOnStackMark::MetadataOnStackMark(bool walk_all_metadata, bool redefinition_walk) { assert(SafepointSynchronize::is_at_safepoint(), "sanity check"); assert(_used_buffers == NULL, "sanity check"); assert(!_is_active, "MetadataOnStackMarks do not nest"); + assert(!redefinition_walk || walk_all_metadata, + "walk_all_metadata must be true for redefinition_walk"); NOT_PRODUCT(_is_active = true;) Threads::metadata_handles_do(Metadata::mark_on_stack); - if (redefinition_walk) { + if (walk_all_metadata) { MetadataOnStackClosure md_on_stack; Threads::metadata_do(&md_on_stack); - CodeCache::metadata_do(&md_on_stack); + if (redefinition_walk) { + // We have to walk the whole code cache during redefinition. + CodeCache::metadata_do(&md_on_stack); + } else { + CodeCache::old_nmethods_do(&md_on_stack); + } CompileBroker::mark_on_stack(); JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack); ThreadService::metadata_do(Metadata::mark_on_stack);
--- a/src/hotspot/share/classfile/metadataOnStackMark.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/metadataOnStackMark.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -48,7 +48,7 @@ static void retire_buffer(MetadataOnStackBuffer* buffer); public: - MetadataOnStackMark(bool redefinition_walk); + MetadataOnStackMark(bool walk_all_metadata, bool redefinition_walk); ~MetadataOnStackMark(); static void record(Metadata* m);
--- a/src/hotspot/share/classfile/moduleEntry.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/moduleEntry.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "jni.h" +#include "classfile/classLoader.hpp" #include "classfile/classLoaderData.inline.hpp" #include "classfile/javaClasses.inline.hpp" #include "classfile/moduleEntry.hpp"
--- a/src/hotspot/share/classfile/protectionDomainCache.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/protectionDomainCache.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -45,7 +45,7 @@ } ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size) - : Hashtable<ClassLoaderWeakHandle, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry)) + : Hashtable<WeakHandle<vm_class_loader_data>, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry)) { _dead_entries = false; _total_oops_removed = 0; } @@ -180,8 +180,8 @@ protection_domain->print_value_on(&ls); ls.cr(); } - ClassLoaderWeakHandle w = ClassLoaderWeakHandle::create(protection_domain); + WeakHandle<vm_class_loader_data> w = WeakHandle<vm_class_loader_data>::create(protection_domain); ProtectionDomainCacheEntry* p = new_entry(hash, w); - Hashtable<ClassLoaderWeakHandle, mtClass>::add_entry(index, p); + Hashtable<WeakHandle<vm_class_loader_data>, mtClass>::add_entry(index, p); return p; }
--- a/src/hotspot/share/classfile/protectionDomainCache.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/protectionDomainCache.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -35,18 +35,18 @@ // to dictionary.hpp pd_set for more information about how protection domain entries // are used. // This table is walked during GC, rather than the class loader data graph dictionaries. -class ProtectionDomainCacheEntry : public HashtableEntry<ClassLoaderWeakHandle, mtClass> { +class ProtectionDomainCacheEntry : public HashtableEntry<WeakHandle<vm_class_loader_data>, mtClass> { friend class VMStructs; public: oop object(); oop object_no_keepalive(); ProtectionDomainCacheEntry* next() { - return (ProtectionDomainCacheEntry*)HashtableEntry<ClassLoaderWeakHandle, mtClass>::next(); + return (ProtectionDomainCacheEntry*)HashtableEntry<WeakHandle<vm_class_loader_data>, mtClass>::next(); } ProtectionDomainCacheEntry** next_addr() { - return (ProtectionDomainCacheEntry**)HashtableEntry<ClassLoaderWeakHandle, mtClass>::next_addr(); + return (ProtectionDomainCacheEntry**)HashtableEntry<WeakHandle<vm_class_loader_data>, mtClass>::next_addr(); } void verify(); @@ -61,21 +61,21 @@ // we only need to iterate over this set. // The amount of different protection domains used is typically magnitudes smaller // than the number of system dictionary entries (loaded classes). -class ProtectionDomainCacheTable : public Hashtable<ClassLoaderWeakHandle, mtClass> { +class ProtectionDomainCacheTable : public Hashtable<WeakHandle<vm_class_loader_data>, mtClass> { friend class VMStructs; private: ProtectionDomainCacheEntry* bucket(int i) const { - return (ProtectionDomainCacheEntry*) Hashtable<ClassLoaderWeakHandle, mtClass>::bucket(i); + return (ProtectionDomainCacheEntry*) Hashtable<WeakHandle<vm_class_loader_data>, mtClass>::bucket(i); } // The following method is not MT-safe and must be done under lock. ProtectionDomainCacheEntry** bucket_addr(int i) { - return (ProtectionDomainCacheEntry**) Hashtable<ClassLoaderWeakHandle, mtClass>::bucket_addr(i); + return (ProtectionDomainCacheEntry**) Hashtable<WeakHandle<vm_class_loader_data>, mtClass>::bucket_addr(i); } - ProtectionDomainCacheEntry* new_entry(unsigned int hash, ClassLoaderWeakHandle protection_domain) { + ProtectionDomainCacheEntry* new_entry(unsigned int hash, WeakHandle<vm_class_loader_data> protection_domain) { ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) - Hashtable<ClassLoaderWeakHandle, mtClass>::new_entry(hash, protection_domain); + Hashtable<WeakHandle<vm_class_loader_data>, mtClass>::new_entry(hash, protection_domain); return entry; }
--- a/src/hotspot/share/classfile/stringTable.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/stringTable.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -52,6 +52,7 @@ #include "utilities/concurrentHashTable.inline.hpp" #include "utilities/concurrentHashTableTasks.inline.hpp" #include "utilities/macros.hpp" +#include "utilities/utf8.hpp" // We prefer short chains of avg 2 const double PREF_AVG_LIST_LEN = 2.0; @@ -760,10 +761,6 @@ return true; } unsigned int hash = java_lang_String::hash_code(s); - if (hash == 0) { - // We do not archive Strings with a 0 hashcode because ...... - return true; - } java_lang_String::set_hash(s, hash); oop new_s = StringTable::create_archived_string(s, Thread::current());
--- a/src/hotspot/share/classfile/symbolTable.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/symbolTable.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -38,6 +38,7 @@ #include "services/diagnosticCommand.hpp" #include "utilities/concurrentHashTable.inline.hpp" #include "utilities/concurrentHashTableTasks.inline.hpp" +#include "utilities/utf8.hpp" // We used to not resize at all, so let's be conservative // and not set it too short before we decide to resize,
--- a/src/hotspot/share/classfile/systemDictionary.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/systemDictionary.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -25,11 +25,12 @@ #ifndef SHARE_CLASSFILE_SYSTEMDICTIONARY_HPP #define SHARE_CLASSFILE_SYSTEMDICTIONARY_HPP -#include "classfile/classLoader.hpp" +#include "classfile/classLoaderData.hpp" #include "jvmci/systemDictionary_jvmci.hpp" #include "oops/objArrayOop.hpp" #include "oops/symbol.hpp" #include "runtime/java.hpp" +#include "runtime/mutexLocker.hpp" #include "runtime/reflectionUtils.hpp" #include "runtime/signature.hpp" #include "utilities/hashtable.hpp"
--- a/src/hotspot/share/classfile/systemDictionaryShared.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/systemDictionaryShared.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -27,6 +27,7 @@ #include "oops/klass.hpp" #include "classfile/dictionary.hpp" +#include "classfile/packageEntry.hpp" #include "classfile/systemDictionary.hpp" #include "memory/filemap.hpp"
--- a/src/hotspot/share/classfile/verifier.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/verifier.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "jvm.h" #include "classfile/classFileStream.hpp" +#include "classfile/classLoader.hpp" #include "classfile/javaClasses.hpp" #include "classfile/stackMapTable.hpp" #include "classfile/stackMapFrame.hpp"
--- a/src/hotspot/share/classfile/vmSymbols.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/classfile/vmSymbols.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -25,7 +25,6 @@ #ifndef SHARE_CLASSFILE_VMSYMBOLS_HPP #define SHARE_CLASSFILE_VMSYMBOLS_HPP -#include "classfile/moduleEntry.hpp" #include "jfr/support/jfrIntrinsics.hpp" #include "jvmci/vmSymbols_jvmci.hpp" #include "memory/iterator.hpp" @@ -52,7 +51,7 @@ #define VM_SYMBOLS_DO(template, do_alias) \ /* commonly used class, package, module names */ \ - template(java_base, JAVA_BASE_NAME) \ + template(java_base, "java.base") \ template(java_lang_System, "java/lang/System") \ template(java_lang_Object, "java/lang/Object") \ template(java_lang_Class, "java/lang/Class") \
--- a/src/hotspot/share/code/codeCache.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/code/codeCache.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -1032,43 +1032,77 @@ #endif } +#ifdef INCLUDE_JVMTI +// RedefineClasses support for unloading nmethods that are dependent on "old" methods. +// We don't really expect this table to grow very large. If it does, it can become a hashtable. +static GrowableArray<CompiledMethod*>* old_compiled_method_table = NULL; + +static void add_to_old_table(CompiledMethod* c) { + if (old_compiled_method_table == NULL) { + old_compiled_method_table = new (ResourceObj::C_HEAP, mtCode) GrowableArray<CompiledMethod*>(100, true); + } + old_compiled_method_table->push(c); +} + +static void reset_old_method_table() { + if (old_compiled_method_table != NULL) { + delete old_compiled_method_table; + old_compiled_method_table = NULL; + } +} + +// Remove this method when zombied or unloaded. +void CodeCache::unregister_old_nmethod(CompiledMethod* c) { + assert_locked_or_safepoint(CodeCache_lock); + if (old_compiled_method_table != NULL) { + int index = old_compiled_method_table->find(c); + if (index != -1) { + old_compiled_method_table->delete_at(index); + } + } +} + +void CodeCache::old_nmethods_do(MetadataClosure* f) { + // Walk old method table and mark those on stack. + int length = 0; + if (old_compiled_method_table != NULL) { + length = old_compiled_method_table->length(); + for (int i = 0; i < length; i++) { + old_compiled_method_table->at(i)->metadata_do(f); + } + } + log_debug(redefine, class, nmethod)("Walked %d nmethods for mark_on_stack", length); +} + // Just marks the methods in this class as needing deoptimization void CodeCache::mark_for_evol_deoptimization(InstanceKlass* dependee) { - MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); - - // Deoptimize all methods of the evolving class itself - Array<Method*>* old_methods = dependee->methods(); - for (int i = 0; i < old_methods->length(); i++) { - ResourceMark rm; - Method* old_method = old_methods->at(i); - CompiledMethod* nm = old_method->code(); - if (nm != NULL) { - nm->mark_for_deoptimization(); - } - } + assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); // Mark dependent AOT nmethods, which are only found via the class redefined. + // TODO: add dependencies to aotCompiledMethod's metadata section so this isn't + // needed. AOTLoader::mark_evol_dependent_methods(dependee); } + // Walk compiled methods and mark dependent methods for deoptimization. int CodeCache::mark_dependents_for_evol_deoptimization() { + assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); + // Each redefinition creates a new set of nmethods that have references to "old" Methods + // So delete old method table and create a new one. + reset_old_method_table(); + int number_of_marked_CodeBlobs = 0; CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); while(iter.next()) { CompiledMethod* nm = iter.method(); - if (nm->is_marked_for_deoptimization()) { - // ...Already marked in the previous pass; count it here. - // Also counts AOT compiled methods, already marked. + // Walk all alive nmethods to check for old Methods. + // This includes methods whose inline caches point to old methods, so + // inline cache clearing is unnecessary. + if (nm->has_evol_metadata()) { + nm->mark_for_deoptimization(); + add_to_old_table(nm); number_of_marked_CodeBlobs++; - } else if (nm->has_evol_metadata()) { - ResourceMark rm; - nm->mark_for_deoptimization(); - number_of_marked_CodeBlobs++; - } else { - // Inline caches that refer to an nmethod are deoptimized already, because - // the Method* is walked in the metadata section of the nmethod. - assert(!nm->is_evol_dependent(), "should no longer be necessary"); } } @@ -1077,6 +1111,46 @@ return number_of_marked_CodeBlobs; } +void CodeCache::mark_all_nmethods_for_evol_deoptimization() { + assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); + CompiledMethodIterator iter(CompiledMethodIterator::only_alive_and_not_unloading); + while(iter.next()) { + CompiledMethod* nm = iter.method(); + if (!nm->method()->is_method_handle_intrinsic()) { + nm->mark_for_deoptimization(); + if (nm->has_evol_metadata()) { + add_to_old_table(nm); + } + } + } +} + +// Flushes compiled methods dependent on redefined classes, that have already been +// marked for deoptimization. +void CodeCache::flush_evol_dependents() { + assert(SafepointSynchronize::is_at_safepoint(), "Can only do this at a safepoint!"); + + // CodeCache can only be updated by a thread_in_VM and they will all be + // stopped during the safepoint so CodeCache will be safe to update without + // holding the CodeCache_lock. + + // At least one nmethod has been marked for deoptimization + + // All this already happens inside a VM_Operation, so we'll do all the work here. + // Stuff copied from VM_Deoptimize and modified slightly. + + // We do not want any GCs to happen while we are in the middle of this VM operation + ResourceMark rm; + DeoptimizationMarker dm; + + // Deoptimize all activations depending on marked nmethods + Deoptimization::deoptimize_dependents(); + + // Make the dependent methods not entrant + make_marked_nmethods_not_entrant(); +} +#endif // INCLUDE_JVMTI + // Deoptimize all methods void CodeCache::mark_all_nmethods_for_deoptimization() { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); @@ -1137,32 +1211,6 @@ } } -// Flushes compiled methods dependent on redefined classes, that have already been -// marked for deoptimization. -void CodeCache::flush_evol_dependents() { - // --- Compile_lock is not held. However we are at a safepoint. - assert_locked_or_safepoint(Compile_lock); - - // CodeCache can only be updated by a thread_in_VM and they will all be - // stopped during the safepoint so CodeCache will be safe to update without - // holding the CodeCache_lock. - - // At least one nmethod has been marked for deoptimization - - // All this already happens inside a VM_Operation, so we'll do all the work here. - // Stuff copied from VM_Deoptimize and modified slightly. - - // We do not want any GCs to happen while we are in the middle of this VM operation - ResourceMark rm; - DeoptimizationMarker dm; - - // Deoptimize all activations depending on marked nmethods - Deoptimization::deoptimize_dependents(); - - // Make the dependent methods not entrant - make_marked_nmethods_not_entrant(); -} - // Flushes compiled methods dependent on dependee void CodeCache::flush_dependents_on_method(const methodHandle& m_h) { // --- Compile_lock is not held. However we are at a safepoint.
--- a/src/hotspot/share/code/codeCache.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/code/codeCache.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -270,10 +270,16 @@ // Flushing and deoptimization static void flush_dependents_on(InstanceKlass* dependee); + + // RedefineClasses support // Flushing and deoptimization in case of evolution static void mark_for_evol_deoptimization(InstanceKlass* dependee); static int mark_dependents_for_evol_deoptimization(); + static void mark_all_nmethods_for_evol_deoptimization(); static void flush_evol_dependents(); + static void old_nmethods_do(MetadataClosure* f); + static void unregister_old_nmethod(CompiledMethod* c); + // Support for fullspeed debugging static void flush_dependents_on_method(const methodHandle& dependee);
--- a/src/hotspot/share/code/compiledMethod.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/code/compiledMethod.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -368,7 +368,6 @@ int verify_icholder_relocations(); void verify_oop_relocations(); - virtual bool is_evol_dependent() = 0; bool has_evol_metadata(); // Fast breakpoint support. Tells if this compiled method is
--- a/src/hotspot/share/code/nmethod.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/code/nmethod.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -1106,6 +1106,7 @@ MutexLockerEx ml(SafepointSynchronize::is_at_safepoint() ? NULL : CodeCache_lock, Mutex::_no_safepoint_check_flag); Universe::heap()->unregister_nmethod(this); + CodeCache::unregister_old_nmethod(this); } // Clear the method of this dead nmethod @@ -1291,6 +1292,7 @@ MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); if (nmethod_needs_unregister) { Universe::heap()->unregister_nmethod(this); + CodeCache::unregister_old_nmethod(this); } flush_dependencies(/*delete_immediately*/true); } @@ -1988,32 +1990,6 @@ return found_check; } -bool nmethod::is_evol_dependent() { - for (Dependencies::DepStream deps(this); deps.next(); ) { - if (deps.type() == Dependencies::evol_method) { - Method* method = deps.method_argument(0); - if (method->is_old()) { - if (log_is_enabled(Debug, redefine, class, nmethod)) { - ResourceMark rm; - log_debug(redefine, class, nmethod) - ("Found evol dependency of nmethod %s.%s(%s) compile_id=%d on method %s.%s(%s)", - _method->method_holder()->external_name(), - _method->name()->as_C_string(), - _method->signature()->as_C_string(), - compile_id(), - method->method_holder()->external_name(), - method->name()->as_C_string(), - method->signature()->as_C_string()); - } - if (TraceDependencies || LogCompilation) - deps.log_dependency(method->method_holder()); - return true; - } - } - } - return false; -} - // Called from mark_for_deoptimization, when dependee is invalidated. bool nmethod::is_dependent_on_method(Method* dependee) { for (Dependencies::DepStream deps(this); deps.next(); ) {
--- a/src/hotspot/share/code/nmethod.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/code/nmethod.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -565,11 +565,6 @@ // and the changes have invalidated it bool check_dependency_on(DepChange& changes); - // Evolution support. Tells if this compiled method is dependent on any of - // redefined methods, such that if m() is replaced, - // this compiled method will have to be deoptimized. - bool is_evol_dependent(); - // Fast breakpoint support. Tells if this compiled method is // dependent on the given method. Returns true if this nmethod // corresponds to the given method as well.
--- a/src/hotspot/share/compiler/compilerOracle.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/compiler/compilerOracle.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1998, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -738,30 +738,6 @@ parse_line(token); } -void CompilerOracle::append_comment_to_file(const char* message) { - assert(has_command_file(), "command file must be specified"); - fileStream stream(fopen(cc_file(), "at")); - stream.print("# "); - for (int index = 0; message[index] != '\0'; index++) { - stream.put(message[index]); - if (message[index] == '\n') stream.print("# "); - } - stream.cr(); -} - -void CompilerOracle::append_exclude_to_file(const methodHandle& method) { - assert(has_command_file(), "command file must be specified"); - fileStream stream(fopen(cc_file(), "at")); - stream.print("exclude "); - method->method_holder()->name()->print_symbol_on(&stream); - stream.print("."); - method->name()->print_symbol_on(&stream); - method->signature()->print_symbol_on(&stream); - stream.cr(); - stream.cr(); -} - - void compilerOracle_init() { CompilerOracle::parse_from_string(CompileCommand, CompilerOracle::parse_from_line); CompilerOracle::parse_from_string(CompileOnly, CompilerOracle::parse_compile_only);
--- a/src/hotspot/share/compiler/compilerOracle.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/compiler/compilerOracle.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -83,10 +83,6 @@ static void parse_from_line(char* line); static void parse_compile_only(char * line); - // For updating the oracle file - static void append_comment_to_file(const char* message); - static void append_exclude_to_file(const methodHandle& method); - // Tells whether there are any methods to print for print_method_statistics() static bool should_print_methods(); };
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -24,8 +24,6 @@ #include "precompiled.hpp" #include "classfile/classLoaderDataGraph.hpp" -#include "classfile/stringTable.hpp" -#include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" #include "gc/cms/cmsCollectorPolicy.hpp" @@ -54,7 +52,6 @@ #include "gc/shared/genCollectedHeap.hpp" #include "gc/shared/genOopClosures.inline.hpp" #include "gc/shared/isGCActiveMark.hpp" -#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/owstTaskTerminator.hpp" #include "gc/shared/referencePolicy.hpp" #include "gc/shared/referenceProcessorPhaseTimes.hpp" @@ -2771,12 +2768,10 @@ protected: CMSCollector* _collector; uint _n_workers; - OopStorage::ParState<false, false> _par_state_string; CMSParMarkTask(const char* name, CMSCollector* collector, uint n_workers) : AbstractGangTask(name), _collector(collector), - _n_workers(n_workers), - _par_state_string(StringTable::weak_storage()) {} + _n_workers(n_workers) {} // Work method in support of parallel rescan ... of young gen spaces void do_young_space_rescan(OopsInGenClosure* cl, ContiguousSpace* space,
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -582,8 +582,7 @@ _young_gen(young_gen), _old_gen(old_gen), _young_old_boundary(young_old_boundary), _state_set(state_set), - _strong_roots_scope(strong_roots_scope), - _par_state_string(StringTable::weak_storage()) + _strong_roots_scope(strong_roots_scope) {} void ParNewGenTask::work(uint worker_id) {
--- a/src/hotspot/share/gc/cms/parNewGeneration.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/cms/parNewGeneration.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -235,7 +235,6 @@ HeapWord* _young_old_boundary; class ParScanThreadStateSet* _state_set; StrongRootsScope* _strong_roots_scope; - OopStorage::ParState<false, false> _par_state_string; public: ParNewGenTask(ParNewGeneration* young_gen,
--- a/src/hotspot/share/gc/epsilon/epsilonHeap.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/epsilon/epsilonHeap.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -129,6 +129,12 @@ virtual void print_gc_threads_on(outputStream* st) const {} virtual void gc_threads_do(ThreadClosure* tc) const {} + // No nmethod handling + virtual void register_nmethod(nmethod* nm) {} + virtual void unregister_nmethod(nmethod* nm) {} + virtual void flush_nmethod(nmethod* nm) {} + virtual void verify_nmethod(nmethod* nm) {} + // No heap verification virtual void prepare_for_verify() {} virtual void verify(VerifyOption option) {}
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -148,24 +148,7 @@ // If we are creating the thread during a marking cycle, we should // set the active field of the SATB queue to true. That involves - // copying the global is_active value to this thread's queue, which - // is done without any direct synchronization here. - // - // The activation and deactivation of the SATB queues occurs at the - // beginning / end of a marking cycle, and is done during - // safepoints. This function is called just before a thread is - // added to its corresponding threads list (for Java or non-Java - // threads, respectively). - // - // For Java threads, that's done while holding the Threads_lock, - // which ensures we're not at a safepoint, so reading the global - // is_active state is synchronized against update. - assert(!thread->is_Java_thread() || !SafepointSynchronize::is_at_safepoint(), - "Should not be at a safepoint"); - // For non-Java threads, thread creation (and list addition) may, - // and indeed usually does, occur during a safepoint. But such - // creation isn't concurrent with updating the global SATB active - // state. + // copying the global is_active value to this thread's queue. bool is_satb_active = _satb_mark_queue_set.is_active(); G1ThreadLocalData::satb_mark_queue(thread).set_active(is_satb_active); }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -1575,7 +1575,10 @@ // And ReservedSpace calls it 'special'. If we failed to set 'special', // we reserved memory without large page. if (os::can_commit_large_page_memory() || rs.special()) { - page_size = rs.alignment(); + // An alignment at ReservedSpace comes from preferred page size or + // heap alignment, and if the alignment came from heap alignment, it could be + // larger than large pages size. So need to cap with the large page size. + page_size = MIN2(rs.alignment(), os::large_page_size()); } } @@ -2728,7 +2731,7 @@ // The remembered set might contain references to already freed // regions. Filter out such entries to avoid failing card table // verification. - if (g1h->is_in_closed_subset(ct->addr_for(card_ptr))) { + if (g1h->is_in(ct->addr_for(card_ptr))) { if (*card_ptr != G1CardTable::dirty_card_val()) { *card_ptr = G1CardTable::dirty_card_val(); _dcq.enqueue(card_ptr); @@ -4605,11 +4608,6 @@ used(), recalculate_used()); } -bool G1CollectedHeap::is_in_closed_subset(const void* p) const { - HeapRegion* hr = heap_region_containing(p); - return hr->is_in(p); -} - // Methods for the mutator alloc region HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -1119,8 +1119,6 @@ return _hrm->reserved(); } - virtual bool is_in_closed_subset(const void* p) const; - G1HotCardCache* g1_hot_card_cache() const { return _hot_card_cache; } G1CardTable* card_table() const { @@ -1322,6 +1320,12 @@ // Unregister the given nmethod from the G1 heap. virtual void unregister_nmethod(nmethod* nm); + // No nmethod flushing needed. + virtual void flush_nmethod(nmethod* nm) {} + + // No nmethod verification implemented. + virtual void verify_nmethod(nmethod* nm) {} + // Free up superfluous code root memory. void purge_code_root_memory();
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -1766,17 +1766,17 @@ G1CMSATBBufferClosure _cm_satb_cl; G1CMOopClosure _cm_cl; MarkingCodeBlobClosure _code_cl; - int _thread_parity; + uintx _claim_token; public: G1RemarkThreadsClosure(G1CollectedHeap* g1h, G1CMTask* task) : _cm_satb_cl(task, g1h), _cm_cl(g1h, task), _code_cl(&_cm_cl, !CodeBlobToOopClosure::FixRelocations), - _thread_parity(Threads::thread_claim_parity()) {} + _claim_token(Threads::thread_claim_token()) {} void do_thread(Thread* thread) { - if (thread->claim_oops_do(true, _thread_parity)) { + if (thread->claim_threads_do(true, _claim_token)) { SATBMarkQueue& queue = G1ThreadLocalData::satb_mark_queue(thread); queue.apply_closure_and_empty(&_cm_satb_cl); if (thread->is_Java_thread()) {
--- a/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1FullGCOopClosures.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -61,7 +61,7 @@ _cc++; oop obj = CompressedOops::decode_not_null(heap_oop); bool failed = false; - if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _verify_option)) { + if (!_g1h->is_in(obj) || _g1h->is_obj_dead_cond(obj, _verify_option)) { MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); LogStreamHandle(Error, gc, verify) yy; @@ -69,7 +69,7 @@ yy.cr(); yy.print_cr("----------"); } - if (!_g1h->is_in_closed_subset(obj)) { + if (!_g1h->is_in(obj)) { HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); yy.print_cr("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT, p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
--- a/src/hotspot/share/gc/g1/g1RootProcessor.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1RootProcessor.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -38,7 +38,6 @@ #include "gc/g1/g1RootClosures.hpp" #include "gc/g1/g1RootProcessor.hpp" #include "gc/g1/heapRegion.inline.hpp" -#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/referenceProcessor.hpp" #include "memory/allocation.inline.hpp" #include "runtime/mutex.hpp" @@ -71,7 +70,6 @@ _g1h(g1h), _process_strong_tasks(G1RP_PS_NumElements), _srs(n_workers), - _par_state_string(StringTable::weak_storage()), _lock(Mutex::leaf, "G1 Root Scanning barrier lock", false, Monitor::_safepoint_check_never), _n_workers_discovered_strong_classes(0) {}
--- a/src/hotspot/share/gc/g1/g1RootProcessor.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/g1/g1RootProcessor.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -25,7 +25,6 @@ #ifndef SHARE_GC_G1_G1ROOTPROCESSOR_HPP #define SHARE_GC_G1_G1ROOTPROCESSOR_HPP -#include "gc/shared/oopStorageParState.hpp" #include "gc/shared/strongRootsScope.hpp" #include "memory/allocation.hpp" #include "runtime/mutex.hpp" @@ -50,7 +49,6 @@ G1CollectedHeap* _g1h; SubTasksDone _process_strong_tasks; StrongRootsScope _srs; - OopStorage::ParState<false, false> _par_state_string; // Used to implement the Thread work barrier. Monitor _lock;
--- a/src/hotspot/share/gc/g1/heapRegion.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/g1/heapRegion.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -514,7 +514,7 @@ if (!CompressedOops::is_null(heap_oop)) { oop obj = CompressedOops::decode_not_null(heap_oop); bool failed = false; - if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { + if (!_g1h->is_in(obj) || _g1h->is_obj_dead_cond(obj, _vo)) { MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag); @@ -522,7 +522,7 @@ log.error("----------"); } ResourceMark rm; - if (!_g1h->is_in_closed_subset(obj)) { + if (!_g1h->is_in(obj)) { HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); log.error("Field " PTR_FORMAT " of live obj " PTR_FORMAT " in region " HR_FORMAT, p2i(p), p2i(_containing_obj), HR_FORMAT_PARAMS(from));
--- a/src/hotspot/share/gc/serial/serialHeap.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/serial/serialHeap.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -59,11 +59,6 @@ virtual GrowableArray<GCMemoryManager*> memory_managers(); virtual GrowableArray<MemoryPool*> memory_pools(); - // override - virtual bool is_in_closed_subset(const void* p) const { - return is_in(p); - } - DefNewGeneration* young_gen() const { assert(_young_gen->kind() == Generation::DefNew, "Wrong generation type"); return static_cast<DefNewGeneration*>(_young_gen);
--- a/src/hotspot/share/gc/shared/barrierSet.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shared/barrierSet.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -130,8 +130,18 @@ virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {} virtual void on_thread_create(Thread* thread) {} virtual void on_thread_destroy(Thread* thread) {} + + // These perform BarrierSet-related initialization/cleanup before the thread + // is added to or removed from the corresponding set of threads. The + // argument thread is the current thread. These are called either holding + // the Threads_lock (for a JavaThread) and so not at a safepoint, or holding + // the NonJavaThreadsList_lock (for a NonJavaThread) locked by the + // caller. That locking ensures the operation is "atomic" with the list + // modification wrto operations that hold the NJTList_lock and either also + // hold the Threads_lock or are at a safepoint. virtual void on_thread_attach(Thread* thread) {} virtual void on_thread_detach(Thread* thread) {} + virtual void make_parsable(JavaThread* thread) {} #ifdef CHECK_UNHANDLED_OOPS
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shared/collectedHeap.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -575,3 +575,8 @@ size_t CollectedHeap::obj_size(oop obj) const { return obj->size(); } + +uint32_t CollectedHeap::hash_oop(oop obj) const { + const uintptr_t addr = cast_from_oop<uintptr_t>(obj); + return static_cast<uint32_t>(addr >> LogMinObjAlignment); +}
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shared/collectedHeap.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -239,37 +239,7 @@ DEBUG_ONLY(bool is_in_or_null(const void* p) const { return p == NULL || is_in(p); }) - // Let's define some terms: a "closed" subset of a heap is one that - // - // 1) contains all currently-allocated objects, and - // - // 2) is closed under reference: no object in the closed subset - // references one outside the closed subset. - // - // Membership in a heap's closed subset is useful for assertions. - // Clearly, the entire heap is a closed subset, so the default - // implementation is to use "is_in_reserved". But this may not be too - // liberal to perform useful checking. Also, the "is_in" predicate - // defines a closed subset, but may be too expensive, since "is_in" - // verifies that its argument points to an object head. The - // "closed_subset" method allows a heap to define an intermediate - // predicate, allowing more precise checking than "is_in_reserved" at - // lower cost than "is_in." - - // One important case is a heap composed of disjoint contiguous spaces, - // such as the Garbage-First collector. Such heaps have a convenient - // closed subset consisting of the allocated portions of those - // contiguous spaces. - - // Return "TRUE" iff the given pointer points into the heap's defined - // closed subset (which defaults to the entire heap). - virtual bool is_in_closed_subset(const void* p) const { - return is_in_reserved(p); - } - - bool is_in_closed_subset_or_null(const void* p) const { - return p == NULL || is_in_closed_subset(p); - } + virtual uint32_t hash_oop(oop obj) const; void set_gc_cause(GCCause::Cause v) { if (UsePerfData) { @@ -510,11 +480,11 @@ void print_heap_after_gc(); // Registering and unregistering an nmethod (compiled code) with the heap. - // Override with specific mechanism for each specialized heap type. - virtual void register_nmethod(nmethod* nm) {} - virtual void unregister_nmethod(nmethod* nm) {} - virtual void flush_nmethod(nmethod* nm) {} - virtual void verify_nmethod(nmethod* nmethod) {} + virtual void register_nmethod(nmethod* nm) = 0; + virtual void unregister_nmethod(nmethod* nm) = 0; + // Callback for when nmethod is about to be deleted. + virtual void flush_nmethod(nmethod* nm) = 0; + virtual void verify_nmethod(nmethod* nm) = 0; void trace_heap_before_gc(const GCTracer* gc_tracer); void trace_heap_after_gc(const GCTracer* gc_tracer);
--- a/src/hotspot/share/gc/shared/genCollectedHeap.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shared/genCollectedHeap.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -824,7 +824,6 @@ CLDClosure* weak_cld_closure, CodeBlobToOopClosure* code_roots) { // General roots. - assert(Threads::thread_claim_parity() != 0, "must have called prologue code"); assert(code_roots != NULL, "code root closure should always be set"); // _n_termination for _process_strong_tasks should be set up stream // in a method not running in a GC worker. Otherwise the GC worker
--- a/src/hotspot/share/gc/shared/genCollectedHeap.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shared/genCollectedHeap.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -234,10 +234,9 @@ void collect(GCCause::Cause cause, GenerationType max_generation); // Returns "TRUE" iff "p" points into the committed areas of the heap. - // The methods is_in(), is_in_closed_subset() and is_in_youngest() may - // be expensive to compute in general, so, to prevent - // their inadvertent use in product jvm's, we restrict their use to - // assertion checking or verification only. + // The methods is_in() and is_in_youngest() may be expensive to compute + // in general, so, to prevent their inadvertent use in product jvm's, we + // restrict their use to assertion checking or verification only. bool is_in(const void* p) const; // Returns true if the reference is to an object in the reserved space
--- a/src/hotspot/share/gc/shared/satbMarkQueue.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shared/satbMarkQueue.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -170,7 +170,11 @@ #ifdef ASSERT verify_active_states(expected_active); #endif // ASSERT - _all_active = active; + // Update the global state, synchronized with threads list management. + { + MutexLockerEx ml(NonJavaThreadsList_lock, Mutex::_no_safepoint_check_flag); + _all_active = active; + } class SetThreadActiveClosure : public ThreadClosure { SATBMarkQueueSet* _qset;
--- a/src/hotspot/share/gc/shared/strongRootsScope.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shared/strongRootsScope.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -37,7 +37,7 @@ } StrongRootsScope::StrongRootsScope(uint n_threads) : _n_threads(n_threads) { - Threads::change_thread_claim_parity(); + Threads::change_thread_claim_token(); } StrongRootsScope::~StrongRootsScope() {
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -46,9 +46,9 @@ bs->gen_pre_barrier_stub(ce, this); } -void ShenandoahWriteBarrierStub::emit_code(LIR_Assembler* ce) { +void ShenandoahLoadReferenceBarrierStub::emit_code(LIR_Assembler* ce) { ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler(); - bs->gen_write_barrier_stub(ce, this); + bs->gen_load_reference_barrier_stub(ce, this); } void ShenandoahBarrierSetC1::pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val) { @@ -105,40 +105,16 @@ __ branch_destination(slow->continuation()); } -LIR_Opr ShenandoahBarrierSetC1::read_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) { - if (UseShenandoahGC && ShenandoahReadBarrier) { - return read_barrier_impl(gen, obj, info, need_null_check); +LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) { + if (ShenandoahLoadRefBarrier) { + return load_reference_barrier_impl(gen, obj, info, need_null_check); } else { return obj; } } -LIR_Opr ShenandoahBarrierSetC1::read_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) { - assert(UseShenandoahGC && (ShenandoahReadBarrier || ShenandoahStoreValReadBarrier), "Should be enabled"); - LabelObj* done = new LabelObj(); - LIR_Opr result = gen->new_register(T_OBJECT); - __ move(obj, result); - if (need_null_check) { - __ cmp(lir_cond_equal, result, LIR_OprFact::oopConst(NULL)); - __ branch(lir_cond_equal, T_LONG, done->label()); - } - LIR_Address* brooks_ptr_address = gen->generate_address(result, ShenandoahBrooksPointer::byte_offset(), T_ADDRESS); - __ load(brooks_ptr_address, result, info ? new CodeEmitInfo(info) : NULL, lir_patch_none); - - __ branch_destination(done->label()); - return result; -} - -LIR_Opr ShenandoahBarrierSetC1::write_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) { - if (UseShenandoahGC && ShenandoahWriteBarrier) { - return write_barrier_impl(gen, obj, info, need_null_check); - } else { - return obj; - } -} - -LIR_Opr ShenandoahBarrierSetC1::write_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) { - assert(UseShenandoahGC && (ShenandoahWriteBarrier || ShenandoahStoreValEnqueueBarrier), "Should be enabled"); +LIR_Opr ShenandoahBarrierSetC1::load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check) { + assert(ShenandoahLoadRefBarrier, "Should be enabled"); obj = ensure_in_register(gen, obj); assert(obj->is_register(), "must be a register at this point"); @@ -168,7 +144,7 @@ } __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0)); - CodeStub* slow = new ShenandoahWriteBarrierStub(obj, result, info ? new CodeEmitInfo(info) : NULL, need_null_check); + CodeStub* slow = new ShenandoahLoadReferenceBarrierStub(obj, result, info ? new CodeEmitInfo(info) : NULL, need_null_check); __ branch(lir_cond_notEqual, T_INT, slow); __ branch_destination(slow->continuation()); @@ -189,58 +165,13 @@ } LIR_Opr ShenandoahBarrierSetC1::storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators) { - bool need_null_check = (decorators & IS_NOT_NULL) == 0; if (ShenandoahStoreValEnqueueBarrier) { - obj = write_barrier_impl(gen, obj, info, need_null_check); + obj = ensure_in_register(gen, obj); pre_barrier(gen, info, decorators, LIR_OprFact::illegalOpr, obj); } - if (ShenandoahStoreValReadBarrier) { - obj = read_barrier_impl(gen, obj, info, true /*need_null_check*/); - } return obj; } -LIR_Opr ShenandoahBarrierSetC1::resolve_address(LIRAccess& access, bool resolve_in_register) { - DecoratorSet decorators = access.decorators(); - bool is_array = (decorators & IS_ARRAY) != 0; - bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0; - - bool is_write = (decorators & ACCESS_WRITE) != 0; - bool needs_null_check = (decorators & IS_NOT_NULL) == 0; - - LIR_Opr base = access.base().item().result(); - LIR_Opr offset = access.offset().opr(); - LIRGenerator* gen = access.gen(); - - if (is_write) { - base = write_barrier(gen, base, access.access_emit_info(), needs_null_check); - } else { - base = read_barrier(gen, base, access.access_emit_info(), needs_null_check); - } - - LIR_Opr addr_opr; - if (is_array) { - addr_opr = LIR_OprFact::address(gen->emit_array_address(base, offset, access.type())); - } else if (needs_patching) { - // we need to patch the offset in the instruction so don't allow - // generate_address to try to be smart about emitting the -1. - // Otherwise the patching code won't know how to find the - // instruction to patch. - addr_opr = LIR_OprFact::address(new LIR_Address(base, PATCHED_ADDR, access.type())); - } else { - addr_opr = LIR_OprFact::address(gen->generate_address(base, offset, 0, 0, access.type())); - } - - if (resolve_in_register) { - LIR_Opr resolved_addr = gen->new_pointer_register(); - __ leal(addr_opr, resolved_addr); - resolved_addr = LIR_OprFact::address(new LIR_Address(resolved_addr, access.type())); - return resolved_addr; - } else { - return addr_opr; - } -} - void ShenandoahBarrierSetC1::store_at_resolved(LIRAccess& access, LIR_Opr value) { if (access.is_oop()) { if (ShenandoahSATBBarrier) { @@ -252,15 +183,28 @@ } void ShenandoahBarrierSetC1::load_at_resolved(LIRAccess& access, LIR_Opr result) { - BarrierSetC1::load_at_resolved(access, result); + if (!access.is_oop()) { + BarrierSetC1::load_at_resolved(access, result); + return; + } + + LIRGenerator *gen = access.gen(); + + if (ShenandoahLoadRefBarrier) { + LIR_Opr tmp = gen->new_register(T_OBJECT); + BarrierSetC1::load_at_resolved(access, tmp); + tmp = load_reference_barrier(access.gen(), tmp, access.access_emit_info(), true); + __ move(tmp, result); + } else { + BarrierSetC1::load_at_resolved(access, result); + } if (ShenandoahKeepAliveBarrier) { DecoratorSet decorators = access.decorators(); bool is_weak = (decorators & ON_WEAK_OOP_REF) != 0; bool is_phantom = (decorators & ON_PHANTOM_OOP_REF) != 0; bool is_anonymous = (decorators & ON_UNKNOWN_OOP_REF) != 0; - LIRGenerator *gen = access.gen(); - if (access.is_oop() && (is_weak || is_phantom || is_anonymous)) { + if (is_weak || is_phantom || is_anonymous) { // Register the value in the referent field with the pre-barrier LabelObj *Lcont_anonymous; if (is_anonymous) { @@ -276,19 +220,6 @@ } } -LIR_Opr ShenandoahBarrierSetC1::atomic_add_at_resolved(LIRAccess& access, LIRItem& value) { - return BarrierSetC1::atomic_add_at_resolved(access, value); -} - -LIR_Opr ShenandoahBarrierSetC1::resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj) { - bool is_write = decorators & ACCESS_WRITE; - if (is_write) { - return write_barrier(gen, obj, NULL, (decorators & IS_NOT_NULL) == 0); - } else { - return read_barrier(gen, obj, NULL, (decorators & IS_NOT_NULL) == 0); - } -} - class C1ShenandoahPreBarrierCodeGenClosure : public StubAssemblerCodeGenClosure { virtual OopMapSet* generate_code(StubAssembler* sasm) { ShenandoahBarrierSetAssembler* bs = (ShenandoahBarrierSetAssembler*)BarrierSet::barrier_set()->barrier_set_assembler();
--- a/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/c1/shenandoahBarrierSetC1.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -85,7 +85,7 @@ #endif // PRODUCT }; -class ShenandoahWriteBarrierStub: public CodeStub { +class ShenandoahLoadReferenceBarrierStub: public CodeStub { friend class ShenandoahBarrierSetC1; private: LIR_Opr _obj; @@ -94,7 +94,7 @@ bool _needs_null_check; public: - ShenandoahWriteBarrierStub(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, bool needs_null_check) : + ShenandoahLoadReferenceBarrierStub(LIR_Opr obj, LIR_Opr result, CodeEmitInfo* info, bool needs_null_check) : _obj(obj), _result(result), _info(info), _needs_null_check(needs_null_check) { assert(_obj->is_register(), "should be register"); @@ -113,7 +113,7 @@ visitor->do_temp(_result); } #ifndef PRODUCT - virtual void print_name(outputStream* out) const { out->print("ShenandoahWritePreBarrierStub"); } + virtual void print_name(outputStream* out) const { out->print("ShenandoahLoadReferenceBarrierStub"); } #endif // PRODUCT }; @@ -181,12 +181,10 @@ void pre_barrier(LIRGenerator* gen, CodeEmitInfo* info, DecoratorSet decorators, LIR_Opr addr_opr, LIR_Opr pre_val); - LIR_Opr read_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check); - LIR_Opr write_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check); + LIR_Opr load_reference_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check); LIR_Opr storeval_barrier(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, DecoratorSet decorators); - LIR_Opr read_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check); - LIR_Opr write_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check); + LIR_Opr load_reference_barrier_impl(LIRGenerator* gen, LIR_Opr obj, CodeEmitInfo* info, bool need_null_check); LIR_Opr ensure_in_register(LIRGenerator* gen, LIR_Opr obj); @@ -194,7 +192,6 @@ CodeBlob* pre_barrier_c1_runtime_code_blob() { return _pre_barrier_c1_runtime_code_blob; } protected: - virtual LIR_Opr resolve_address(LIRAccess& access, bool resolve_in_register); virtual void store_at_resolved(LIRAccess& access, LIR_Opr value); virtual void load_at_resolved(LIRAccess& access, LIR_Opr result); @@ -202,10 +199,8 @@ virtual LIR_Opr atomic_cmpxchg_at_resolved(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value); virtual LIR_Opr atomic_xchg_at_resolved(LIRAccess& access, LIRItem& value); - virtual LIR_Opr atomic_add_at_resolved(LIRAccess& access, LIRItem& value); public: - virtual LIR_Opr resolve(LIRGenerator* gen, DecoratorSet decorators, LIR_Opr obj); virtual void generate_c1_runtime_stubs(BufferBlob* buffer_blob); };
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -43,121 +43,56 @@ } ShenandoahBarrierSetC2State::ShenandoahBarrierSetC2State(Arena* comp_arena) - : _shenandoah_barriers(new (comp_arena) GrowableArray<ShenandoahWriteBarrierNode*>(comp_arena, 8, 0, NULL)) { + : _enqueue_barriers(new (comp_arena) GrowableArray<ShenandoahEnqueueBarrierNode*>(comp_arena, 8, 0, NULL)), + _load_reference_barriers(new (comp_arena) GrowableArray<ShenandoahLoadReferenceBarrierNode*>(comp_arena, 8, 0, NULL)) { } -int ShenandoahBarrierSetC2State::shenandoah_barriers_count() const { - return _shenandoah_barriers->length(); +int ShenandoahBarrierSetC2State::enqueue_barriers_count() const { + return _enqueue_barriers->length(); } -ShenandoahWriteBarrierNode* ShenandoahBarrierSetC2State::shenandoah_barrier(int idx) const { - return _shenandoah_barriers->at(idx); +ShenandoahEnqueueBarrierNode* ShenandoahBarrierSetC2State::enqueue_barrier(int idx) const { + return _enqueue_barriers->at(idx); } -void ShenandoahBarrierSetC2State::add_shenandoah_barrier(ShenandoahWriteBarrierNode * n) { - assert(!_shenandoah_barriers->contains(n), "duplicate entry in barrier list"); - _shenandoah_barriers->append(n); +void ShenandoahBarrierSetC2State::add_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) { + assert(!_enqueue_barriers->contains(n), "duplicate entry in barrier list"); + _enqueue_barriers->append(n); } -void ShenandoahBarrierSetC2State::remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n) { - if (_shenandoah_barriers->contains(n)) { - _shenandoah_barriers->remove(n); +void ShenandoahBarrierSetC2State::remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n) { + if (_enqueue_barriers->contains(n)) { + _enqueue_barriers->remove(n); } } -#define __ kit-> +int ShenandoahBarrierSetC2State::load_reference_barriers_count() const { + return _load_reference_barriers->length(); +} -Node* ShenandoahBarrierSetC2::shenandoah_read_barrier(GraphKit* kit, Node* obj) const { - if (ShenandoahReadBarrier) { - obj = shenandoah_read_barrier_impl(kit, obj, false, true, true); +ShenandoahLoadReferenceBarrierNode* ShenandoahBarrierSetC2State::load_reference_barrier(int idx) const { + return _load_reference_barriers->at(idx); +} + +void ShenandoahBarrierSetC2State::add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) { + assert(!_load_reference_barriers->contains(n), "duplicate entry in barrier list"); + _load_reference_barriers->append(n); +} + +void ShenandoahBarrierSetC2State::remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n) { + if (_load_reference_barriers->contains(n)) { + _load_reference_barriers->remove(n); + } +} + +Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const { + if (ShenandoahStoreValEnqueueBarrier) { + obj = shenandoah_enqueue_barrier(kit, obj); } return obj; } -Node* ShenandoahBarrierSetC2::shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const { - if (ShenandoahStoreValEnqueueBarrier) { - obj = shenandoah_write_barrier(kit, obj); - obj = shenandoah_enqueue_barrier(kit, obj); - } - if (ShenandoahStoreValReadBarrier) { - obj = shenandoah_read_barrier_impl(kit, obj, true, false, false); - } - return obj; -} - -Node* ShenandoahBarrierSetC2::shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const { - const Type* obj_type = obj->bottom_type(); - if (obj_type->higher_equal(TypePtr::NULL_PTR)) { - return obj; - } - const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type); - Node* mem = use_mem ? __ memory(adr_type) : __ immutable_memory(); - - if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, mem, allow_fromspace)) { - // We know it is null, no barrier needed. - return obj; - } - - if (obj_type->meet(TypePtr::NULL_PTR) == obj_type->remove_speculative()) { - - // We don't know if it's null or not. Need null-check. - enum { _not_null_path = 1, _null_path, PATH_LIMIT }; - RegionNode* region = new RegionNode(PATH_LIMIT); - Node* phi = new PhiNode(region, obj_type); - Node* null_ctrl = __ top(); - Node* not_null_obj = __ null_check_oop(obj, &null_ctrl); - - region->init_req(_null_path, null_ctrl); - phi ->init_req(_null_path, __ zerocon(T_OBJECT)); - - Node* ctrl = use_ctrl ? __ control() : NULL; - ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, not_null_obj, allow_fromspace); - Node* n = __ gvn().transform(rb); - - region->init_req(_not_null_path, __ control()); - phi ->init_req(_not_null_path, n); - - __ set_control(__ gvn().transform(region)); - __ record_for_igvn(region); - return __ gvn().transform(phi); - - } else { - // We know it is not null. Simple barrier is sufficient. - Node* ctrl = use_ctrl ? __ control() : NULL; - ShenandoahReadBarrierNode* rb = new ShenandoahReadBarrierNode(ctrl, mem, obj, allow_fromspace); - Node* n = __ gvn().transform(rb); - __ record_for_igvn(n); - return n; - } -} - -Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const { - ShenandoahWriteBarrierNode* wb = new ShenandoahWriteBarrierNode(kit->C, kit->control(), kit->memory(adr_type), obj); - Node* n = __ gvn().transform(wb); - if (n == wb) { // New barrier needs memory projection. - Node* proj = __ gvn().transform(new ShenandoahWBMemProjNode(n)); - __ set_memory(proj, adr_type); - } - return n; -} - -Node* ShenandoahBarrierSetC2::shenandoah_write_barrier(GraphKit* kit, Node* obj) const { - if (ShenandoahWriteBarrier) { - obj = shenandoah_write_barrier_impl(kit, obj); - } - return obj; -} - -Node* ShenandoahBarrierSetC2::shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const { - if (! ShenandoahBarrierNode::needs_barrier(&__ gvn(), NULL, obj, NULL, true)) { - return obj; - } - const Type* obj_type = obj->bottom_type(); - const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type); - Node* n = shenandoah_write_barrier_helper(kit, obj, adr_type); - __ record_for_igvn(n); - return n; -} +#define __ kit-> bool ShenandoahBarrierSetC2::satb_can_remove_pre_barrier(GraphKit* kit, PhaseTransform* phase, Node* adr, BasicType bt, uint adr_idx) const { @@ -304,7 +239,7 @@ Node* gc_state = __ AddP(no_base, tls, __ ConX(in_bytes(ShenandoahThreadLocalData::gc_state_offset()))); Node* ld = __ load(__ ctrl(), gc_state, TypeInt::BYTE, T_BYTE, Compile::AliasIdxRaw); marking = __ AndI(ld, __ ConI(ShenandoahHeap::MARKING)); - assert(ShenandoahWriteBarrierNode::is_gc_state_load(ld), "Should match the shape"); + assert(ShenandoahBarrierC2Support::is_gc_state_load(ld), "Should match the shape"); // if (!marking) __ if_then(marking, BoolTest::ne, zero, unlikely); { @@ -361,7 +296,7 @@ bool ShenandoahBarrierSetC2::is_shenandoah_wb_call(Node* call) { return call->is_CallLeaf() && - call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT); + call->as_CallLeaf()->entry_point() == CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT); } bool ShenandoahBarrierSetC2::is_shenandoah_marking_if(PhaseTransform *phase, Node* n) { @@ -549,88 +484,6 @@ return TypeFunc::make(domain, range); } -void ShenandoahBarrierSetC2::resolve_address(C2Access& access) const { - const TypePtr* adr_type = access.addr().type(); - - if ((access.decorators() & IN_NATIVE) == 0 && (adr_type->isa_instptr() || adr_type->isa_aryptr())) { - int off = adr_type->is_ptr()->offset(); - int base_off = adr_type->isa_instptr() ? instanceOopDesc::base_offset_in_bytes() : - arrayOopDesc::base_offset_in_bytes(adr_type->is_aryptr()->elem()->array_element_basic_type()); - assert(off != Type::OffsetTop, "unexpected offset"); - if (off == Type::OffsetBot || off >= base_off) { - DecoratorSet decorators = access.decorators(); - bool is_write = (decorators & C2_WRITE_ACCESS) != 0; - GraphKit* kit = NULL; - if (access.is_parse_access()) { - C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access); - kit = parse_access.kit(); - } - Node* adr = access.addr().node(); - assert(adr->is_AddP(), "unexpected address shape"); - Node* base = adr->in(AddPNode::Base); - - if (is_write) { - if (kit != NULL) { - base = shenandoah_write_barrier(kit, base); - } else { - assert(access.is_opt_access(), "either parse or opt access"); - assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for clone"); - } - } else { - if (adr_type->isa_instptr()) { - Compile* C = access.gvn().C; - ciField* field = C->alias_type(adr_type)->field(); - - // Insert read barrier for Shenandoah. - if (field != NULL && - ((ShenandoahOptimizeStaticFinals && field->is_static() && field->is_final()) || - (ShenandoahOptimizeInstanceFinals && !field->is_static() && field->is_final()) || - (ShenandoahOptimizeStableFinals && field->is_stable()))) { - // Skip the barrier for special fields - } else { - if (kit != NULL) { - base = shenandoah_read_barrier(kit, base); - } else { - assert(access.is_opt_access(), "either parse or opt access"); - assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy"); - } - } - } else { - if (kit != NULL) { - base = shenandoah_read_barrier(kit, base); - } else { - assert(access.is_opt_access(), "either parse or opt access"); - assert((access.decorators() & C2_ARRAY_COPY) != 0, "can be skipped for arraycopy"); - } - } - } - if (base != adr->in(AddPNode::Base)) { - assert(kit != NULL, "no barrier should have been added"); - - Node* address = adr->in(AddPNode::Address); - - if (address->is_AddP()) { - assert(address->in(AddPNode::Base) == adr->in(AddPNode::Base), "unexpected address shape"); - assert(!address->in(AddPNode::Address)->is_AddP(), "unexpected address shape"); - assert(address->in(AddPNode::Address) == adr->in(AddPNode::Base), "unexpected address shape"); - address = address->clone(); - address->set_req(AddPNode::Base, base); - address->set_req(AddPNode::Address, base); - address = kit->gvn().transform(address); - } else { - assert(address == adr->in(AddPNode::Base), "unexpected address shape"); - address = base; - } - adr = adr->clone(); - adr->set_req(AddPNode::Base, base); - adr->set_req(AddPNode::Address, address); - adr = kit->gvn().transform(adr); - access.addr().set_node(adr); - } - } - } -} - Node* ShenandoahBarrierSetC2::store_at_resolved(C2Access& access, C2AccessValue& val) const { DecoratorSet decorators = access.decorators(); @@ -662,44 +515,8 @@ PhaseGVN& gvn = opt_access.gvn(); MergeMemNode* mm = opt_access.mem(); - if (ShenandoahStoreValReadBarrier) { - RegionNode* region = new RegionNode(3); - const Type* v_t = gvn.type(val.node()); - Node* phi = new PhiNode(region, v_t->isa_oopptr() ? v_t->is_oopptr()->cast_to_nonconst() : v_t); - Node* cmp = gvn.transform(new CmpPNode(val.node(), gvn.zerocon(T_OBJECT))); - Node* bol = gvn.transform(new BoolNode(cmp, BoolTest::ne)); - IfNode* iff = new IfNode(opt_access.ctl(), bol, PROB_LIKELY_MAG(3), COUNT_UNKNOWN); - - gvn.transform(iff); - if (gvn.is_IterGVN()) { - gvn.is_IterGVN()->_worklist.push(iff); - } else { - gvn.record_for_igvn(iff); - } - - Node* null_true = gvn.transform(new IfFalseNode(iff)); - Node* null_false = gvn.transform(new IfTrueNode(iff)); - region->init_req(1, null_true); - region->init_req(2, null_false); - phi->init_req(1, gvn.zerocon(T_OBJECT)); - Node* cast = new CastPPNode(val.node(), gvn.type(val.node())->join_speculative(TypePtr::NOTNULL)); - cast->set_req(0, null_false); - cast = gvn.transform(cast); - Node* rb = gvn.transform(new ShenandoahReadBarrierNode(null_false, gvn.C->immutable_memory(), cast, false)); - phi->init_req(2, rb); - opt_access.set_ctl(gvn.transform(region)); - val.set_node(gvn.transform(phi)); - } if (ShenandoahStoreValEnqueueBarrier) { - const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(gvn.type(val.node())); - int alias = gvn.C->get_alias_index(adr_type); - Node* wb = new ShenandoahWriteBarrierNode(gvn.C, opt_access.ctl(), mm->memory_at(alias), val.node()); - Node* wb_transformed = gvn.transform(wb); - Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(wb_transformed)); - if (wb_transformed == wb) { - Node* proj = gvn.transform(new ShenandoahWBMemProjNode(wb)); - mm->set_memory_at(alias, proj); - } + Node* enqueue = gvn.transform(new ShenandoahEnqueueBarrierNode(val.node())); val.set_node(enqueue); } } @@ -724,6 +541,17 @@ Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top; Node* load = BarrierSetC2::load_at_resolved(access, val_type); + if (access.is_oop()) { + if (ShenandoahLoadRefBarrier) { + load = new ShenandoahLoadReferenceBarrierNode(NULL, load); + if (access.is_parse_access()) { + load = static_cast<C2ParseAccess &>(access).kit()->gvn().transform(load); + } else { + load = static_cast<C2OptAccess &>(access).gvn().transform(load); + } + } + } + // If we are reading the value of the referent field of a Reference // object (either by using Unsafe directly or through reflection) // then, if SATB is enabled, we need to record the referent in an @@ -797,9 +625,10 @@ #ifdef _LP64 if (adr->bottom_type()->is_ptr_to_narrowoop()) { - return kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); + load_store = kit->gvn().transform(new DecodeNNode(load_store, load_store->get_ptr_type())); } #endif + load_store = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, load_store)); return load_store; } return BarrierSetC2::atomic_cmpxchg_val_at_resolved(access, expected_val, new_val, value_type); @@ -867,6 +696,7 @@ } Node* result = BarrierSetC2::atomic_xchg_at_resolved(access, val, value_type); if (access.is_oop()) { + result = kit->gvn().transform(new ShenandoahLoadReferenceBarrierNode(NULL, result)); shenandoah_write_barrier_pre(kit, false /* do_load */, NULL, NULL, max_juint, NULL, NULL, result /* pre_val */, T_OBJECT); @@ -876,19 +706,9 @@ void ShenandoahBarrierSetC2::clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const { assert(!src->is_AddP(), "unexpected input"); - src = shenandoah_read_barrier(kit, src); BarrierSetC2::clone(kit, src, dst, size, is_array); } -Node* ShenandoahBarrierSetC2::resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const { - bool is_write = decorators & ACCESS_WRITE; - if (is_write) { - return shenandoah_write_barrier(kit, n); - } else { - return shenandoah_read_barrier(kit, n); - } -} - Node* ShenandoahBarrierSetC2::obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes, Node*& i_o, Node*& needgc_ctrl, Node*& fast_oop_ctrl, Node*& fast_oop_rawmem, @@ -915,6 +735,7 @@ // Support for GC barriers emitted during parsing bool ShenandoahBarrierSetC2::is_gc_barrier_node(Node* node) const { + if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) return true; if (node->Opcode() != Op_CallLeaf && node->Opcode() != Op_CallLeafNoFP) { return false; } @@ -929,26 +750,30 @@ } Node* ShenandoahBarrierSetC2::step_over_gc_barrier(Node* c) const { - return ShenandoahBarrierNode::skip_through_barrier(c); + if (c->Opcode() == Op_ShenandoahLoadReferenceBarrier) { + return c->in(ShenandoahLoadReferenceBarrierNode::ValueIn); + } + if (c->Opcode() == Op_ShenandoahEnqueueBarrier) { + c = c->in(1); + } + return c; } bool ShenandoahBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const { - return !ShenandoahWriteBarrierNode::expand(C, igvn); + return !ShenandoahBarrierC2Support::expand(C, igvn); } bool ShenandoahBarrierSetC2::optimize_loops(PhaseIdealLoop* phase, LoopOptsMode mode, VectorSet& visited, Node_Stack& nstack, Node_List& worklist) const { if (mode == LoopOptsShenandoahExpand) { assert(UseShenandoahGC, "only for shenandoah"); - ShenandoahWriteBarrierNode::pin_and_expand(phase); + ShenandoahBarrierC2Support::pin_and_expand(phase); return true; } else if (mode == LoopOptsShenandoahPostExpand) { assert(UseShenandoahGC, "only for shenandoah"); visited.Clear(); - ShenandoahWriteBarrierNode::optimize_after_expansion(visited, nstack, worklist, phase); + ShenandoahBarrierC2Support::optimize_after_expansion(visited, nstack, worklist, phase); return true; } - GrowableArray<MemoryGraphFixer*> memory_graph_fixers; - ShenandoahWriteBarrierNode::optimize_before_expansion(phase, memory_graph_fixers, false); return false; } @@ -957,7 +782,6 @@ if (!is_oop) { return false; } - if (tightly_coupled_alloc) { if (phase == Optimization) { return false; @@ -985,7 +809,7 @@ } } else { return true; - } + } } else if (src_type->isa_aryptr()) { BasicType src_elem = src_type->klass()->as_array_klass()->element_type()->basic_type(); if (src_elem == T_OBJECT || src_elem == T_ARRAY) { @@ -1038,14 +862,20 @@ // Support for macro expanded GC barriers void ShenandoahBarrierSetC2::register_potential_barrier_node(Node* node) const { - if (node->Opcode() == Op_ShenandoahWriteBarrier) { - state()->add_shenandoah_barrier((ShenandoahWriteBarrierNode*) node); + if (node->Opcode() == Op_ShenandoahEnqueueBarrier) { + state()->add_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node); + } + if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) { + state()->add_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node); } } void ShenandoahBarrierSetC2::unregister_potential_barrier_node(Node* node) const { - if (node->Opcode() == Op_ShenandoahWriteBarrier) { - state()->remove_shenandoah_barrier((ShenandoahWriteBarrierNode*) node); + if (node->Opcode() == Op_ShenandoahEnqueueBarrier) { + state()->remove_enqueue_barrier((ShenandoahEnqueueBarrierNode*) node); + } + if (node->Opcode() == Op_ShenandoahLoadReferenceBarrier) { + state()->remove_load_reference_barrier((ShenandoahLoadReferenceBarrierNode*) node); } } @@ -1091,19 +921,18 @@ } } } - for (int i = state()->shenandoah_barriers_count()-1; i >= 0; i--) { - ShenandoahWriteBarrierNode* n = state()->shenandoah_barrier(i); + for (int i = state()->enqueue_barriers_count() - 1; i >= 0; i--) { + ShenandoahEnqueueBarrierNode* n = state()->enqueue_barrier(i); if (!useful.member(n)) { - state()->remove_shenandoah_barrier(n); + state()->remove_enqueue_barrier(n); } } - -} - -bool ShenandoahBarrierSetC2::has_special_unique_user(const Node* node) const { - assert(node->outcnt() == 1, "match only for unique out"); - Node* n = node->unique_out(); - return node->Opcode() == Op_ShenandoahWriteBarrier && n->Opcode() == Op_ShenandoahWBMemProj; + for (int i = state()->load_reference_barriers_count() - 1; i >= 0; i--) { + ShenandoahLoadReferenceBarrierNode* n = state()->load_reference_barrier(i); + if (!useful.member(n)) { + state()->remove_load_reference_barrier(n); + } + } } void ShenandoahBarrierSetC2::add_users_to_worklist(Unique_Node_List* worklist) const {} @@ -1123,7 +952,7 @@ #ifdef ASSERT void ShenandoahBarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const { if (ShenandoahVerifyOptoBarriers && phase == BarrierSetC2::BeforeExpand) { - ShenandoahBarrierNode::verify(Compile::current()->root()); + ShenandoahBarrierC2Support::verify(Compile::current()->root()); } else if (phase == BarrierSetC2::BeforeCodeGen) { // Verify G1 pre-barriers const int marking_offset = in_bytes(ShenandoahThreadLocalData::satb_mark_queue_active_offset()); @@ -1229,7 +1058,7 @@ } } else if (can_reshape && n->Opcode() == Op_If && - ShenandoahWriteBarrierNode::is_heap_stable_test(n) && + ShenandoahBarrierC2Support::is_heap_stable_test(n) && n->in(0) != NULL) { Node* dom = n->in(0); Node* prev_dom = n; @@ -1237,7 +1066,7 @@ int dist = 16; // Search up the dominator tree for another heap stable test while (dom->Opcode() != op || // Not same opcode? - !ShenandoahWriteBarrierNode::is_heap_stable_test(dom) || // Not same input 1? + !ShenandoahBarrierC2Support::is_heap_stable_test(dom) || // Not same input 1? prev_dom->in(0) != dom) { // One path of test does not dominate? if (dist < 0) return NULL; @@ -1258,46 +1087,6 @@ return NULL; } -Node* ShenandoahBarrierSetC2::identity_node(PhaseGVN* phase, Node* n) const { - if (n->is_Load()) { - Node *mem = n->in(MemNode::Memory); - Node *value = n->as_Load()->can_see_stored_value(mem, phase); - if (value) { - PhaseIterGVN *igvn = phase->is_IterGVN(); - if (igvn != NULL && - value->is_Phi() && - value->req() > 2 && - value->in(1) != NULL && - value->in(1)->is_ShenandoahBarrier()) { - if (igvn->_worklist.member(value) || - igvn->_worklist.member(value->in(0)) || - (value->in(0)->in(1) != NULL && - value->in(0)->in(1)->is_IfProj() && - (igvn->_worklist.member(value->in(0)->in(1)) || - (value->in(0)->in(1)->in(0) != NULL && - igvn->_worklist.member(value->in(0)->in(1)->in(0)))))) { - igvn->_worklist.push(n); - return n; - } - } - // (This works even when value is a Con, but LoadNode::Value - // usually runs first, producing the singleton type of the Con.) - Node *value_no_barrier = step_over_gc_barrier(value->Opcode() == Op_EncodeP ? value->in(1) : value); - if (value->Opcode() == Op_EncodeP) { - if (value_no_barrier != value->in(1)) { - Node *encode = value->clone(); - encode->set_req(1, value_no_barrier); - encode = phase->transform(encode); - return encode; - } - } else { - return value_no_barrier; - } - } - } - return n; -} - bool ShenandoahBarrierSetC2::has_only_shenandoah_wb_pre_uses(Node* n) { for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node* u = n->fast_out(i); @@ -1308,20 +1097,6 @@ return n->outcnt() > 0; } -bool ShenandoahBarrierSetC2::flatten_gc_alias_type(const TypePtr*& adr_type) const { - int offset = adr_type->offset(); - if (offset == ShenandoahBrooksPointer::byte_offset()) { - if (adr_type->isa_aryptr()) { - adr_type = TypeAryPtr::make(adr_type->ptr(), adr_type->isa_aryptr()->ary(), adr_type->isa_aryptr()->klass(), false, offset); - } else if (adr_type->isa_instptr()) { - adr_type = TypeInstPtr::make(adr_type->ptr(), ciEnv::current()->Object_klass(), false, NULL, offset); - } - return true; - } else { - return false; - } -} - bool ShenandoahBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const { switch (opcode) { case Op_CallLeaf: @@ -1356,9 +1131,7 @@ } #endif return true; - case Op_ShenandoahReadBarrier: - return true; - case Op_ShenandoahWriteBarrier: + case Op_ShenandoahLoadReferenceBarrier: assert(false, "should have been expanded already"); return true; default: @@ -1366,17 +1139,6 @@ } } -#ifdef ASSERT -bool ShenandoahBarrierSetC2::verify_gc_alias_type(const TypePtr* adr_type, int offset) const { - if (offset == ShenandoahBrooksPointer::byte_offset() && - (adr_type->base() == Type::AryPtr || adr_type->base() == Type::OopPtr)) { - return true; - } else { - return false; - } -} -#endif - bool ShenandoahBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const { switch (opcode) { case Op_ShenandoahCompareAndExchangeP: @@ -1412,15 +1174,12 @@ } return false; } - case Op_ShenandoahReadBarrier: - case Op_ShenandoahWriteBarrier: - // Barriers 'pass through' its arguments. I.e. what goes in, comes out. - // It doesn't escape. - conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), delayed_worklist); - break; case Op_ShenandoahEnqueueBarrier: conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), delayed_worklist); break; + case Op_ShenandoahLoadReferenceBarrier: + conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), delayed_worklist); + return true; default: // Nothing break; @@ -1441,15 +1200,12 @@ case Op_ShenandoahWeakCompareAndSwapP: case Op_ShenandoahWeakCompareAndSwapN: return conn_graph->add_final_edges_unsafe_access(n, opcode); - case Op_ShenandoahReadBarrier: - case Op_ShenandoahWriteBarrier: - // Barriers 'pass through' its arguments. I.e. what goes in, comes out. - // It doesn't escape. - conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahBarrierNode::ValueIn), NULL); - return true; case Op_ShenandoahEnqueueBarrier: conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(1), NULL); return true; + case Op_ShenandoahLoadReferenceBarrier: + conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(ShenandoahLoadReferenceBarrierNode::ValueIn), NULL); + return true; default: // Nothing break; @@ -1464,21 +1220,7 @@ } bool ShenandoahBarrierSetC2::escape_is_barrier_node(Node* n) const { - return n->is_ShenandoahBarrier(); -} - -bool ShenandoahBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const { - switch (opcode) { - case Op_ShenandoahReadBarrier: - if (n->in(ShenandoahBarrierNode::ValueIn)->is_DecodeNarrowPtr()) { - matcher->set_shared(n->in(ShenandoahBarrierNode::ValueIn)->in(1)); - } - matcher->set_shared(n); - return true; - default: - break; - } - return false; + return n->Opcode() == Op_ShenandoahLoadReferenceBarrier; } bool ShenandoahBarrierSetC2::matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const { @@ -1510,62 +1252,3 @@ xop == Op_ShenandoahCompareAndSwapN || xop == Op_ShenandoahCompareAndSwapP; } - -void ShenandoahBarrierSetC2::igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const { - if (use->is_ShenandoahBarrier()) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* u = use->fast_out(i2); - Node* cmp = use->find_out_with(Op_CmpP); - if (u->Opcode() == Op_CmpP) { - igvn->_worklist.push(cmp); - } - } - } -} - -void ShenandoahBarrierSetC2::ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const { - if (use->is_ShenandoahBarrier()) { - for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) { - Node* p = use->fast_out(i2); - if (p->Opcode() == Op_AddP) { - for (DUIterator_Fast i3max, i3 = p->fast_outs(i3max); i3 < i3max; i3++) { - Node* q = p->fast_out(i3); - if (q->is_Load()) { - if(q->bottom_type() != ccp->type(q)) { - worklist.push(q); - } - } - } - } - } - } -} - -Node* ShenandoahBarrierSetC2::split_if_pre(PhaseIdealLoop* phase, Node* n) const { - if (n->Opcode() == Op_ShenandoahReadBarrier) { - ((ShenandoahReadBarrierNode*)n)->try_move(phase); - } else if (n->Opcode() == Op_ShenandoahWriteBarrier) { - return ((ShenandoahWriteBarrierNode*)n)->try_split_thru_phi(phase); - } - - return NULL; -} - -bool ShenandoahBarrierSetC2::build_loop_late_post(PhaseIdealLoop* phase, Node* n) const { - return ShenandoahBarrierNode::build_loop_late_post(phase, n); -} - -bool ShenandoahBarrierSetC2::sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const { - if (n->is_ShenandoahBarrier()) { - return x->as_ShenandoahBarrier()->sink_node(phase, x_ctrl, n_ctrl); - } - if (n->is_MergeMem()) { - // PhaseIdealLoop::split_if_with_blocks_post() would: - // _igvn._worklist.yank(x); - // which sometimes causes chains of MergeMem which some of - // shenandoah specific code doesn't support - phase->register_new_node(x, x_ctrl); - return true; - } - return false; -}
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahBarrierSetC2.hpp Thu Apr 04 22:07:49 2019 +0200 @@ -30,14 +30,21 @@ class ShenandoahBarrierSetC2State : public ResourceObj { private: - GrowableArray<ShenandoahWriteBarrierNode*>* _shenandoah_barriers; + GrowableArray<ShenandoahEnqueueBarrierNode*>* _enqueue_barriers; + GrowableArray<ShenandoahLoadReferenceBarrierNode*>* _load_reference_barriers; public: ShenandoahBarrierSetC2State(Arena* comp_arena); - int shenandoah_barriers_count() const; - ShenandoahWriteBarrierNode* shenandoah_barrier(int idx) const; - void add_shenandoah_barrier(ShenandoahWriteBarrierNode * n); - void remove_shenandoah_barrier(ShenandoahWriteBarrierNode * n); + + int enqueue_barriers_count() const; + ShenandoahEnqueueBarrierNode* enqueue_barrier(int idx) const; + void add_enqueue_barrier(ShenandoahEnqueueBarrierNode* n); + void remove_enqueue_barrier(ShenandoahEnqueueBarrierNode * n); + + int load_reference_barriers_count() const; + ShenandoahLoadReferenceBarrierNode* load_reference_barrier(int idx) const; + void add_load_reference_barrier(ShenandoahLoadReferenceBarrierNode* n); + void remove_load_reference_barrier(ShenandoahLoadReferenceBarrierNode * n); }; class ShenandoahBarrierSetC2 : public BarrierSetC2 { @@ -66,12 +73,7 @@ BasicType bt) const; Node* shenandoah_enqueue_barrier(GraphKit* kit, Node* val) const; - Node* shenandoah_read_barrier(GraphKit* kit, Node* obj) const; Node* shenandoah_storeval_barrier(GraphKit* kit, Node* obj) const; - Node* shenandoah_write_barrier(GraphKit* kit, Node* obj) const; - Node* shenandoah_read_barrier_impl(GraphKit* kit, Node* obj, bool use_ctrl, bool use_mem, bool allow_fromspace) const; - Node* shenandoah_write_barrier_impl(GraphKit* kit, Node* obj) const; - Node* shenandoah_write_barrier_helper(GraphKit* kit, Node* obj, const TypePtr* adr_type) const; void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar) const; @@ -79,7 +81,6 @@ static bool clone_needs_postbarrier(ArrayCopyNode *ac, PhaseIterGVN& igvn); protected: - virtual void resolve_address(C2Access& access) const; virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const; virtual Node* store_at_resolved(C2Access& access, C2AccessValue& val) const; virtual Node* atomic_cmpxchg_val_at_resolved(C2AtomicParseAccess& access, Node* expected_val, @@ -102,12 +103,11 @@ static const TypeFunc* write_ref_field_pre_entry_Type(); static const TypeFunc* shenandoah_clone_barrier_Type(); static const TypeFunc* shenandoah_write_barrier_Type(); + virtual bool has_load_barriers() const { return true; } // This is the entry-point for the backend to perform accesses through the Access API. virtual void clone(GraphKit* kit, Node* src, Node* dst, Node* size, bool is_array) const; - virtual Node* resolve(GraphKit* kit, Node* n, DecoratorSet decorators) const; - virtual Node* obj_allocate(PhaseMacroExpand* macro, Node* ctrl, Node* mem, Node* toobig_false, Node* size_in_bytes, Node*& i_o, Node*& needgc_ctrl, Node*& fast_oop_ctrl, Node*& fast_oop_rawmem, @@ -144,13 +144,7 @@ virtual void verify_gc_barriers(Compile* compile, CompilePhase phase) const; #endif - virtual bool flatten_gc_alias_type(const TypePtr*& adr_type) const; -#ifdef ASSERT - virtual bool verify_gc_alias_type(const TypePtr* adr_type, int offset) const; -#endif - virtual Node* ideal_node(PhaseGVN* phase, Node* n, bool can_reshape) const; - virtual Node* identity_node(PhaseGVN* phase, Node* n) const; virtual bool final_graph_reshaping(Compile* compile, Node* n, uint opcode) const; virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const; @@ -158,17 +152,8 @@ virtual bool escape_has_out_with_unsafe_object(Node* n) const; virtual bool escape_is_barrier_node(Node* n) const; - virtual bool matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const; virtual bool matcher_find_shared_post_visit(Matcher* matcher, Node* n, uint opcode) const; virtual bool matcher_is_store_load_barrier(Node* x, uint xop) const; - - virtual void igvn_add_users_to_worklist(PhaseIterGVN* igvn, Node* use) const; - virtual void ccp_analyze(PhaseCCP* ccp, Unique_Node_List& worklist, Node* use) const; - - virtual bool has_special_unique_user(const Node* node) const; - virtual Node* split_if_pre(PhaseIdealLoop* phase, Node* n) const; - virtual bool build_loop_late_post(PhaseIdealLoop* phase, Node* n) const; - virtual bool sink_node(PhaseIdealLoop* phase, Node* n, Node* x, Node* x_ctrl, Node* n_ctrl) const; }; #endif // SHARE_GC_SHENANDOAH_C2_SHENANDOAHBARRIERSETC2_HPP
--- a/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp Thu Mar 28 22:08:15 2019 +0100 +++ b/src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp Thu Apr 04 22:07:49 2019 +0200 @@ -41,383 +41,28 @@ #include "opto/runtime.hpp" #include "opto/subnode.hpp" -Node* ShenandoahBarrierNode::skip_through_barrier(Node* n) { - if (n == NULL) { - return NULL; - } - if (n->Opcode() == Op_ShenandoahEnqueueBarrier) { - n = n->in(1); - } - - if (n->is_ShenandoahBarrier()) { - return n->in(ValueIn); - } else if (n->is_Phi() && - n->req() == 3 && - n->in(1) != NULL && - n->in(1)->is_ShenandoahBarrier() && - n->in(2) != NULL && - n->in(2)->bottom_type() == TypePtr::NULL_PTR && - n->in(0) != NULL && - n->in(0)->in(1) != NULL && - n->in(0)->in(1)->is_IfProj() && - n->in(0)->in(2) != NULL && - n->in(0)->in(2)->is_IfProj() && - n->in(0)->in(1)->in(0) != NULL && - n->in(0)->in(1)->in(0) == n->in(0)->in(2)->in(0) && - n->in(1)->in(ValueIn)->Opcode() == Op_CastPP) { - Node* iff = n->in(0)->in(1)->in(0); - Node* res = n->in(1)->in(ValueIn)->in(1); - if (iff->is_If() && - iff->in(1) != NULL && - iff->in(1)->is_Bool() && - iff->in(1)->as_Bool()->_test._test == BoolTest::ne && - iff->in(1)->in(1) != NULL && - iff->in(1)->in(1)->Opcode() == Op_CmpP && - iff->in(1)->in(1)->in(1) != NULL && - iff->in(1)->in(1)->in(1) == res && - iff->in(1)->in(1)->in(2) != NULL && - iff->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { - return res; - } - } - return n; -} - -bool ShenandoahBarrierNode::needs_barrier(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace) { - Unique_Node_List visited; - return needs_barrier_impl(phase, orig, n, rb_mem, allow_fromspace, visited); -} - -bool ShenandoahBarrierNode::needs_barrier_impl(PhaseGVN* phase, ShenandoahBarrierNode* orig, Node* n, Node* rb_mem, bool allow_fromspace, Unique_Node_List &visited) { - if (visited.member(n)) { - return false; // Been there. - } - visited.push(n); - - if (n->is_Allocate()) { - return false; - } - - if (n->is_Call()) { - return true; - } - - const Type* type = phase->type(n); - if (type == Type::TOP) { - return false; - } - if (type->make_ptr()->higher_equal(TypePtr::NULL_PTR)) { - return false; - } - if (type->make_oopptr() && type->make_oopptr()->const_oop() != NULL) { - return false; - } - - if (ShenandoahOptimizeStableFinals) { - const TypeAryPtr* ary = type->isa_aryptr(); - if (ary && ary->is_stable() && allow_fromspace) { - return false; - } - } - - if (n->is_CheckCastPP() || n->is_ConstraintCast() || n->Opcode() == Op_ShenandoahEnqueueBarrier) { - return needs_barrier_impl(phase, orig, n->in(1), rb_mem, allow_fromspace, visited); - } - if (n->is_Parm()) { - return true; - } - if (n->is_Proj()) { - return needs_barrier_impl(phase, orig, n->in(0), rb_mem, allow_fromspace, visited); - } - - if (n->Opcode() == Op_ShenandoahWBMemProj) { - return needs_barrier_impl(phase, orig, n->in(ShenandoahWBMemProjNode::WriteBarrier), rb_mem, allow_fromspace, visited); - } - if (n->is_Phi()) { - bool need_barrier = false; - for (uint i = 1; i < n->req() && ! need_barrier; i++) { - Node* input = n->in(i); - if (input == NULL) { - need_barrier = true; // Phi not complete yet? - } else if (needs_barrier_impl(phase, orig, input, rb_mem, allow_fromspace, visited)) { - need_barrier = true; - } - } - return need_barrier; - } - if (n->is_CMove()) { - return needs_barrier_impl(phase, orig, n->in(CMoveNode::IfFalse), rb_mem, allow_fromspace, visited) || - needs_barrier_impl(phase, orig, n->in(CMoveNode::IfTrue ), rb_mem, allow_fromspace, visited); - } - if (n->Opcode() == Op_CreateEx) { - return true; - } - if (n->Opcode() == Op_ShenandoahWriteBarrier) { - return false; - } - if (n->Opcode() == Op_ShenandoahReadBarrier) { - if (rb_mem == n->in(Memory)) { - return false; - } else { - return true; - } - } - - if (n->Opcode() == Op_LoadP || - n->Opcode() == Op_LoadN || - n->Opcode() == Op_GetAndSetP || - n->Opcode() == Op_CompareAndExchangeP || - n->Opcode() == Op_ShenandoahCompareAndExchangeP || - n->Opcode() == Op_GetAndSetN || - n->Opcode() == Op_CompareAndExchangeN || - n->Opcode() == Op_ShenandoahCompareAndExchangeN) { - return true; - } - if (n->Opcode() == Op_DecodeN || - n->Opcode() == Op_EncodeP) { - return needs_barrier_impl(phase, orig, n->in(1), rb_mem, allow_fromspace, visited); - } - -#ifdef ASSERT - tty->print("need barrier on?: "); n->dump(); - ShouldNotReachHere(); -#endif - return true; -} - -bool ShenandoahReadBarrierNode::dominates_memory_rb_impl(PhaseGVN* phase, - Node* b1, - Node* b2, - Node* current, - bool linear) { - ResourceMark rm; - VectorSet visited(Thread::current()->resource_area()); - Node_Stack phis(0); - - for(int i = 0; i < 10; i++) { - if (current == NULL) { - return false; - } else if (visited.test_set(current->_idx) || current->is_top() || current == b1) { - current = NULL; - while (phis.is_nonempty() && current == NULL) { - uint idx = phis.index(); - Node* phi = phis.node(); - if (idx >= phi->req()) { - phis.pop(); - } else { - current = phi->in(idx); - phis.set_index(idx+1); - } - } - if (current == NULL) { - return true; - } - } else if (current == phase->C->immutable_memory()) { - return false; - } else if (current->isa_Phi()) { - if (!linear) { +bool ShenandoahBarrierC2Support::expand(Compile* C, PhaseIterGVN& igvn) { + ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state(); + if ((state->enqueue_barriers_count() + + state->load_reference_barriers_count()) > 0) { + bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion; + C->clear_major_progress(); + PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand); + if (C->failing()) return false; + PhaseIdealLoop::verify(igvn); + DEBUG_ONLY(verify_raw_mem(C->root());) + if (attempt_more_loopopts) { + C->set_major_progress(); + if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) { return false; } - phis.push(current, 2); - current = current->in(1); - } else if (current->Opcode() == Op_ShenandoahWriteBarrier) { - const Type* in_type = current->bottom_type(); - const Type* this_type = b2->bottom_type(); - if (is_independent(in_type, this_type)) { - current = current->in(Memory); - } else { - return false; - } - } else if (current->Opcode() == Op_ShenandoahWBMemProj) { - current = current->in(ShenandoahWBMemProjNode::WriteBarrier); - } else if (current->is_Proj()) { - current = current->in(0); - } else if (current->is_Call()) { - return false; // TODO: Maybe improve by looking at the call's memory effects? - } else if (current->is_MemBar()) { - return false; // TODO: Do we need to stop at *any* membar? - } else if (current->is_MergeMem()) { - const TypePtr* adr_type = brooks_pointer_type(phase->type(b2)); - uint alias_idx = phase->C->get_alias_index(adr_type); - current = current->as_MergeMem()->memory_at(alias_idx); - } else { -#ifdef ASSERT - current->dump(); -#endif - ShouldNotReachHere(); - return false; - } - } - return false; -} - -bool ShenandoahReadBarrierNode::is_independent(Node* mem) { - if (mem->is_Phi() || mem->is_Proj() || mem->is_MergeMem()) { - return true; - } else if (mem->Opcode() == Op_ShenandoahWBMemProj) { - return true; - } else if (mem->Opcode() == Op_ShenandoahWriteBarrier) { - const Type* mem_type = mem->bottom_type(); - const Type* this_type = bottom_type(); - if (is_independent(mem_type, this_type)) { - return true; - } else { - return false; - } - } else if (mem->is_Call() || mem->is_MemBar()) { - return false; - } -#ifdef ASSERT - mem->dump(); -#endif - ShouldNotReachHere(); - return true; -} - -bool ShenandoahReadBarrierNode::dominates_memory_rb(PhaseGVN* phase, Node* b1, Node* b2, bool linear) { - return dominates_memory_rb_impl(phase, b1->in(Memory), b2, b2->in(Memory), linear); -} - -bool ShenandoahReadBarrierNode::is_independent(const Type* in_type, const Type* this_type) { - assert(in_type->isa_oopptr(), "expect oop ptr"); - assert(this_type->isa_oopptr(), "expect oop ptr"); - - ciKlass* in_kls = in_type->is_oopptr()->klass(); - ciKlass* this_kls = this_type->is_oopptr()->klass(); - if (in_kls != NULL && this_kls != NULL && - in_kls->is_loaded() && this_kls->is_loaded() && - (!in_kls->is_subclass_of(this_kls)) && - (!this_kls->is_subclass_of(in_kls))) { - return true; - } - return false; -} - -Node* ShenandoahReadBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) { - if (! can_reshape) { - return NULL; - } - - if (in(Memory) == phase->C->immutable_memory()) return NULL; - - // If memory input is a MergeMem, take the appropriate slice out of it. - Node* mem_in = in(Memory); - if (mem_in->isa_MergeMem()) { - const TypePtr* adr_type = brooks_pointer_type(bottom_type()); - uint alias_idx = phase->C->get_alias_index(adr_type); - mem_in = mem_in->as_MergeMem()->memory_at(alias_idx); - set_req(Memory, mem_in); - return this; - } - - Node* input = in(Memory); - if (input->Opcode() == Op_ShenandoahWBMemProj) { - ResourceMark rm; - VectorSet seen(Thread::current()->resource_area()); - Node* n = in(Memory); - while (n->Opcode() == Op_ShenandoahWBMemProj && - n->in(ShenandoahWBMemProjNode::WriteBarrier) != NULL && - n->in(ShenandoahWBMemProjNode::WriteBarrier)->Opcode() == Op_ShenandoahWriteBarrier && - n->in(ShenandoahWBMemProjNode::WriteBarrier)->in(Memory) != NULL) { - if (seen.test_set(n->_idx)) { - return NULL; // loop - } - n = n->in(ShenandoahWBMemProjNode::WriteBarrier)->in(Memory); - } - - Node* wb = input->in(ShenandoahWBMemProjNode::WriteBarrier); - const Type* in_type = phase->type(wb); - // is_top() test not sufficient here: we can come here after CCP - // in a dead branch of the graph that has not yet been removed. - if (in_type == Type::TOP) return NULL; // Dead path. - assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "expect write barrier"); - if (is_independent(in_type, _type)) { - phase->igvn_rehash_node_delayed(wb); - set_req(Memory, wb->in(Memory)); - if (can_reshape && input->outcnt() == 0) { - phase->is_IterGVN()->_worklist.push(input); - } - return this; - } - } - return NULL; -} - -ShenandoahWriteBarrierNode::ShenandoahWriteBarrierNode(Compile* C, Node* ctrl, Node* mem, Node* obj) - : ShenandoahBarrierNode(ctrl, mem, obj, false) { - assert(UseShenandoahGC && ShenandoahWriteBarrier, "should be enabled"); - ShenandoahBarrierSetC2::bsc2()->state()->add_shenandoah_barrier(this); -} - -Node* ShenandoahWriteBarrierNode::Identity(PhaseGVN* phase) { - assert(in(0) != NULL, "should have control"); - PhaseIterGVN* igvn = phase->is_IterGVN(); - Node* mem_in = in(Memory); - Node* mem_proj = NULL; - - if (igvn != NULL) { - mem_proj = find_out_with(Op_ShenandoahWBMemProj); - if (mem_in == mem_proj) { - return this; - } - } - - Node* replacement = Identity_impl(phase); - if (igvn != NULL) { - if (replacement != NULL && replacement != this && mem_proj != NULL) { - igvn->replace_node(mem_proj, mem_in); - } - } - return replacement; -} - -Node* ShenandoahWriteBarrierNode::Ideal(PhaseGVN *phase, bool can_reshape) { - assert(in(0) != NULL, "should have control"); - if (!can_reshape) { - return NULL; - } - - Node* mem_in = in(Memory); - - if (mem_in->isa_MergeMem()) { - const TypePtr* adr_type = brooks_pointer_type(bottom_type()); - uint alias_idx = phase->C->get_alias_index(adr_type); - mem_in = mem_in->as_MergeMem()->memory_at(alias_idx); - set_req(Memory, mem_in); - return this; - } - - Node* val = in(ValueIn); - if (val->is_ShenandoahBarrier()) { - set_req(ValueIn, val->in(ValueIn)); - return this; - } - - return NULL; -} - -bool ShenandoahWriteBarrierNode::expand(Compile* C, PhaseIterGVN& igvn) { - if (UseShenandoahGC) { - if (ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() > 0 || (!ShenandoahWriteBarrier && ShenandoahStoreValEnqueueBarrier)) { - bool attempt_more_loopopts = ShenandoahLoopOptsAfterExpansion; C->clear_major_progress(); - PhaseIdealLoop ideal_loop(igvn, LoopOptsShenandoahExpand); - if (C->failing()) return false; - PhaseIdealLoop::verify(igvn); - DEBUG_ONLY(ShenandoahBarrierNode::verify_raw_mem(C->root());) - if (attempt_more_loopopts) { - C->set_major_progress(); - if (!C->optimize_loops(igvn, LoopOptsShenandoahPostExpand)) { - return false; - } - C->clear_major_progress(); - } } } return true; } -bool ShenandoahWriteBarrierNode::is_heap_state_test(Node* iff, int mask) { +bool ShenandoahBarrierC2Support::is_heap_state_test(Node* iff, int mask) { if (!UseShenandoahGC) { return false; } @@ -450,11 +95,11 @@ return is_gc_state_load(in1); } -bool ShenandoahWriteBarrierNode::is_heap_stable_test(Node* iff) { +bool ShenandoahBarrierC2Support::is_heap_stable_test(Node* iff) { return is_heap_state_test(iff, ShenandoahHeap::HAS_FORWARDED); } -bool ShenandoahWriteBarrierNode::is_gc_state_load(Node *n) { +bool ShenandoahBarrierC2Support::is_gc_state_load(Node *n) { if (!UseShenandoahGC) { return false; } @@ -476,7 +121,7 @@ return true; } -bool ShenandoahWriteBarrierNode::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) { +bool ShenandoahBarrierC2Support::has_safepoint_between(Node* start, Node* stop, PhaseIdealLoop *phase) { assert(phase->is_dominator(stop, start), "bad inputs"); ResourceMark rm; Unique_Node_List wq; @@ -500,7 +145,7 @@ return false; } -bool ShenandoahWriteBarrierNode::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) { +bool ShenandoahBarrierC2Support::try_common_gc_state_load(Node *n, PhaseIdealLoop *phase) { assert(is_gc_state_load(n), "inconsistent"); Node* addp = n->in(MemNode::Address); Node* dominator = NULL; @@ -525,193 +170,8 @@ return true; } -bool ShenandoahBarrierNode::dominates_memory_impl(PhaseGVN* phase, - Node* b1, - Node* b2, - Node* current, - bool linear) { - ResourceMark rm; - VectorSet visited(Thread::current()->resource_area()); - Node_Stack phis(0); - - for(int i = 0; i < 10; i++) { - if (current == NULL) { - return false; - } else if (visited.test_set(current->_idx) || current->is_top() || current == b1) { - current = NULL; - while (phis.is_nonempty() && current == NULL) { - uint idx = phis.index(); - Node* phi = phis.node(); - if (idx >= phi->req()) { - phis.pop(); - } else { - current = phi->in(idx); - phis.set_index(idx+1); - } - } - if (current == NULL) { - return true; - } - } else if (current == b2) { - return false; - } else if (current == phase->C->immutable_memory()) { - return false; - } else if (current->isa_Phi()) { - if (!linear) { - return false; - } - phis.push(current, 2); - current = current->in(1); - } else if (current->Opcode() == Op_ShenandoahWriteBarrier) { - current = current->in(Memory); - } else if (current->Opcode() == Op_ShenandoahWBMemProj) { - current = current->in(ShenandoahWBMemProjNode::WriteBarrier); - } else if (current->is_Proj()) { - current = current->in(0); - } else if (current->is_Call()) { - current = current->in(TypeFunc::Memory); - } else if (current->is_MemBar()) { - current = current->in(TypeFunc::Memory); - } else if (current->is_MergeMem()) { - const TypePtr* adr_type = brooks_pointer_type(phase->type(b2)); - uint alias_idx = phase->C->get_alias_index(adr_type); - current = current->as_MergeMem()->memory_at(alias_idx); - } else { #ifdef ASSERT - current->dump(); -#endif - ShouldNotReachHere(); - return false; - } - } - return false; -} - -/** - * Determines if b1 dominates b2 through memory inputs. It returns true if: - * - b1 can be reached by following each branch in b2's memory input (through phis, etc) - * - or we get back to b2 (i.e. through a loop) without seeing b1 - * In all other cases, (in particular, if we reach immutable_memory without having seen b1) - * we return false. - */ -bool ShenandoahBarrierNode::dominates_memory(PhaseGVN* phase, Node* b1, Node* b2, bool linear) { - return dominates_memory_impl(phase, b1, b2, b2->in(Memory), linear); -} - -Node* ShenandoahBarrierNode::Identity_impl(PhaseGVN* phase) { - Node* n = in(ValueIn); - - Node* rb_mem = Opcode() == Op_ShenandoahReadBarrier ? in(Memory) : NULL; - if (! needs_barrier(phase, this, n, rb_mem, _allow_fromspace)) { - return n; - } - - // Try to find a write barrier sibling with identical inputs that we can fold into. - for (DUIterator i = n->outs(); n->has_out(i); i++) { - Node* sibling = n->out(i); - if (sibling == this) { - continue; - } - if (sibling->Opcode() != Op_ShenandoahWriteBarrier) { - continue; - } - - assert(sibling->in(ValueIn) == in(ValueIn), "sanity"); - assert(sibling->Opcode() == Op_ShenandoahWriteBarrier, "sanity"); - - if (dominates_memory(phase, sibling, this, phase->is_IterGVN() == NULL)) { - return sibling; - } - } - return this; -} - -#ifndef PRODUCT -void ShenandoahBarrierNode::dump_spec(outputStream *st) const { - const TypePtr* adr = adr_type(); - if (adr == NULL) { - return; - } - st->print(" @"); - adr->dump_on(st); - st->print(" ("); - Compile::current()->alias_type(adr)->adr_type()->dump_on(st); - st->print(") "); -} -#endif - -Node* ShenandoahReadBarrierNode::Identity(PhaseGVN* phase) { - Node* id = Identity_impl(phase); - - if (id == this && phase->is_IterGVN()) { - Node* n = in(ValueIn); - // No success in super call. Try to combine identical read barriers. - for (DUIterator i = n->outs(); n->has_out(i); i++) { - Node* sibling = n->out(i); - if (sibling == this || sibling->Opcode() != Op_ShenandoahReadBarrier) { - continue; - } - assert(sibling->in(ValueIn) == in(ValueIn), "sanity"); - if (phase->is_IterGVN()->hash_find(sibling) && - sibling->bottom_type() == bottom_type() && - sibling->in(Control) == in(Control) && - dominates_memory_rb(phase, sibling, this, phase->is_IterGVN() == NULL)) { - return sibling; - } - } - } - return id; -} - -const Type* ShenandoahBarrierNode::Value(PhaseGVN* phase) const { - // Either input is TOP ==> the result is TOP - const Type *t1 = phase->type(in(Memory)); - if (t1 == Type::TOP) return Type::TOP; - const Type *t2 = phase->type(in(ValueIn)); - if( t2 == Type::TOP ) return Type::TOP; - - if (t2 == TypePtr::NULL_PTR) { - return _type; - } - - const Type* type = t2->is_oopptr()->cast_to_nonconst(); - return type; -} - -uint ShenandoahBarrierNode::hash() const { - return TypeNode::hash() + _allow_fromspace; -} - -bool ShenandoahBarrierNode::cmp(const Node& n) const { - return _allow_fromspace == ((ShenandoahBarrierNode&) n)._allow_fromspace - && TypeNode::cmp(n); -} - -uint ShenandoahBarrierNode::size_of() const { - return sizeof(*this); -} - -Node* ShenandoahWBMemProjNode::Identity(PhaseGVN* phase) { - Node* wb = in(WriteBarrier); - if (wb->is_top()) return phase->C->top(); // Dead path. - - assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "expect write barrier"); - PhaseIterGVN* igvn = phase->is_IterGVN(); - // We can't do the below unless the graph is fully constructed. - if (igvn == NULL) { - return this; - } - - // If the mem projection has no barrier users, it's not needed anymore. - if (wb->outcnt() == 1) { - return wb->in(ShenandoahBarrierNode::Memory); - } - - return this; -} - -#ifdef ASSERT -bool ShenandoahBarrierNode::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) { +bool ShenandoahBarrierC2Support::verify_helper(Node* in, Node_Stack& phis, VectorSet& visited, verify_type t, bool trace, Unique_Node_List& barriers_used) { assert(phis.size() == 0, ""); while (true) { @@ -732,12 +192,24 @@ in = in->in(AddPNode::Address); continue; } else if (in->is_Con()) { - if (trace) {tty->print("Found constant"); in->dump();} - } else if (in->is_ShenandoahBarrier()) { + if (trace) { + tty->print("Found constant"); + in->dump(); + } + } else if (in->Opcode() == Op_Parm) { + if (trace) { + tty->print("Found argument"); + } + } else if (in->Opcode() == Op_CreateEx) { + if (trace) { + tty->print("Found create-exception"); + } + } else if (in->Opcode() == Op_LoadP && in->adr_type() == TypeRawPtr::BOTTOM) { + if (trace) { + tty->print("Found raw LoadP (OSR argument?)"); + } + } else if (in->Opcode() == Op_ShenandoahLoadReferenceBarrier) { if (t == ShenandoahOopStore) { - if (in->Opcode() != Op_ShenandoahWriteBarrier) { - return false; - } uint i = 0; for (; i < phis.size(); i++) { Node* n = phis.node_at(i); @@ -748,8 +220,6 @@ if (i == phis.size()) { return false; } - } else if (t == ShenandoahStore && in->Opcode() != Op_ShenandoahWriteBarrier) { - return false; } barriers_used.push(in); if (trace) {tty->print("Found barrier"); in->dump();} @@ -763,7 +233,14 @@ in = in->in(1); continue; } else if (in->is_Proj() && in->in(0)->is_Allocate()) { - if (trace) {tty->print("Found alloc"); in->in(0)->dump();} + if (trace) { + tty->print("Found alloc"); + in->in(0)->dump(); + } + } else if (in->is_Proj() && (in->in(0)->Opcode() == Op_CallStaticJava || in->in(0)->Opcode() == Op_CallDynamicJava)) { + if (trace) { + tty->print("Found Java call"); + } } else if (in->is_Phi()) { if (!visited.test_set(in->_idx)) { if (trace) {tty->print("Pushed phi:"); in->dump();} @@ -809,7 +286,7 @@ return true; } -void ShenandoahBarrierNode::report_verify_failure(const char *msg, Node *n1, Node *n2) { +void ShenandoahBarrierC2Support::report_verify_failure(const char* msg, Node* n1, Node* n2) { if (n1 != NULL) { n1->dump(+10); } @@ -819,7 +296,7 @@ fatal("%s", msg); } -void ShenandoahBarrierNode::verify(RootNode* root) { +void ShenandoahBarrierC2Support::verify(RootNode* root) { ResourceMark rm; Unique_Node_List wq; GrowableArray<Node*> barriers; @@ -871,7 +348,7 @@ } } - if (verify && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) { + if (verify && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahLoad, trace, barriers_used)) { report_verify_failure("Shenandoah verification: Load should have barriers", n); } } @@ -899,11 +376,11 @@ } } - if (verify && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { + if (verify && !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { report_verify_failure("Shenandoah verification: Store should have barriers", n); } } - if (!ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { + if (!verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { report_verify_failure("Shenandoah verification: Store (address) should have barriers", n); } } else if (n->Opcode() == Op_CmpP) { @@ -926,26 +403,26 @@ } else { assert(in2->bottom_type()->isa_oopptr(), ""); - if (!ShenandoahBarrierNode::verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) || - !ShenandoahBarrierNode::verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) { + if (!verify_helper(in1, phis, visited, ShenandoahStore, trace, barriers_used) || + !verify_helper(in2, phis, visited, ShenandoahStore, trace, barriers_used)) { report_verify_failure("Shenandoah verification: Cmp should have barriers", n); } } if (verify_no_useless_barrier && mark_inputs && - (!ShenandoahBarrierNode::verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) || - !ShenandoahBarrierNode::verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) { + (!verify_helper(in1, phis, visited, ShenandoahValue, trace, barriers_used) || + !verify_helper(in2, phis, visited, ShenandoahValue, trace, barriers_used))) { phis.clear(); visited.Reset(); } } } else if (n->is_LoadStore()) { if (n->in(MemNode::ValueIn)->bottom_type()->make_ptr() && - !ShenandoahBarrierNode::verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { + !verify_helper(n->in(MemNode::ValueIn), phis, visited, ShenandoahStoreValEnqueueBarrier ? ShenandoahOopStore : ShenandoahValue, trace, barriers_used)) { report_verify_failure("Shenandoah verification: LoadStore (value) should have barriers", n); } - if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !ShenandoahBarrierNode::verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { + if (n->in(MemNode::Address)->bottom_type()->make_oopptr() && !verify_helper(n->in(MemNode::Address), phis, visited, ShenandoahStore, trace, barriers_used)) { report_verify_failure("Shenandoah verification: LoadStore (address) should have barriers", n); } } else if (n->Opcode() == Op_CallLeafNoFP || n->Opcode() == Op_CallLeaf) { @@ -1041,13 +518,13 @@ } } } - if (!ShenandoahBarrierNode::verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) || - !ShenandoahBarrierNode::verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) { + if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahLoad, trace, barriers_used) || + !verify_helper(dest, phis, visited, ShenandoahStore, trace, barriers_used)) { report_verify_failure("Shenandoah verification: ArrayCopy should have barriers", n); } } else if (strlen(call->_name) > 5 && !strcmp(call->_name + strlen(call->_name) - 5, "_fill")) { - if (!ShenandoahBarrierNode::verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) { + if (!verify_helper(n->in(TypeFunc::Parms), phis, visited, ShenandoahStore, trace, barriers_used)) { report_verify_failure("Shenandoah verification: _fill should have barriers", n); } } else if (!strcmp(call->_name, "shenandoah_wb_pre")) { @@ -1067,7 +544,7 @@ if (pos == -1) { break; } - if (!ShenandoahBarrierNode::verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) { + if (!verify_helper(call->in(pos), phis, visited, calls[i].args[j].t, trace, barriers_used)) { report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); } } @@ -1090,15 +567,8 @@ } } } - } else if (n->is_ShenandoahBarrier()) { - assert(!barriers.contains(n), ""); - assert(n->Opcode() != Op_ShenandoahWriteBarrier || n->find_out_with(Op_ShenandoahWBMemProj) != NULL, "bad shenandoah write barrier"); - assert(n->Opcode() != Op_ShenandoahWriteBarrier || n->outcnt() > 1, "bad shenandoah write barrier"); - barriers.push(n); - } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier) { + } else if (n->Opcode() == Op_ShenandoahEnqueueBarrier || n->Opcode() == Op_ShenandoahLoadReferenceBarrier) { // skip - } else if (n->Opcode() == Op_ShenandoahWBMemProj) { - assert(n->in(0) == NULL && n->in(ShenandoahWBMemProjNode::WriteBarrier)->Opcode() == Op_ShenandoahWriteBarrier, "strange ShenandoahWBMemProj"); } else if (n->is_AddP() || n->is_Phi() || n->is_ConstraintCast() @@ -1165,7 +635,7 @@ if (pos == -1) { break; } - if (!ShenandoahBarrierNode::verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) { + if (!verify_helper(n->in(pos), phis, visited, others[i].inputs[j].t, trace, barriers_used)) { report_verify_failure("Shenandoah verification: intrinsic calls should have barriers", n); } } @@ -1193,7 +663,7 @@ SafePointNode* sfpt = n->as_SafePoint(); if (verify_no_useless_barrier && sfpt->jvms() != NULL) { for (uint i = sfpt->jvms()->scloff(); i < sfpt->jvms()->endoff(); i++) { - if (!ShenandoahBarrierNode::verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) { + if (!verify_helper(sfpt->in(i), phis, visited, ShenandoahLoad, trace, barriers_used)) { phis.clear(); visited.Reset(); } @@ -1227,9 +697,8 @@ n->Opcode() == Op_SCMemProj || n->Opcode() == Op_EncodeP || n->Opcode() == Op_DecodeN || - n->Opcode() == Op_ShenandoahWriteBarrier || - n->Opcode() == Op_ShenandoahWBMemProj || - n->Opcode() == Op_ShenandoahEnqueueBarrier)) { + n->Opcode() == Op_ShenandoahEnqueueBarrier || + n->Opcode() == Op_ShenandoahLoadReferenceBarrier)) { if (m->bottom_type()->make_oopptr() && m->bottom_type()->make_oopptr()->meet(TypePtr::NULL_PTR) == m->bottom_type()) { report_verify_failure("Shenandoah verification: null input", n, m); } @@ -1251,7 +720,7 @@ } #endif -bool ShenandoahBarrierNode::is_dominator_same_ctrl(Node*c, Node* d, Node* n, PhaseIdealLoop* phase) { +bool ShenandoahBarrierC2Support::is_dominator_same_ctrl(Node* c, Node* d, Node* n, PhaseIdealLoop* phase) { // That both nodes have the same control is not sufficient to prove // domination, verify that there's no path from d to n ResourceMark rm; @@ -1275,7 +744,7 @@ return true; } -bool ShenandoahBarrierNode::is_dominator(Node *d_c, Node *n_c, Node* d, Node* n, PhaseIdealLoop* phase) { +bool ShenandoahBarrierC2Support::is_dominator(Node* d_c, Node* n_c, Node* d, Node* n, PhaseIdealLoop* phase) { if (d_c != n_c) { return phase->is_dominator(d_c, n_c); } @@ -1290,15 +759,11 @@ res = mem->in(TypeFunc::Memory); } else if (mem->is_Phi()) { res = mem->in(1); - } else if (mem->is_ShenandoahBarrier()) { - res = mem->in(ShenandoahBarrierNode::Memory); } else if (mem->is_MergeMem()) { res = mem->as_MergeMem()->memory_at(alias); } else if (mem->is_Store() || mem->is_LoadStore() || mem->is_ClearArray()) { assert(alias = Compile::AliasIdxRaw, "following raw memory can't lead to a barrier"); res = mem->in(MemNode::Memory); - } else if (mem->Opcode() == Op_ShenandoahWBMemProj) { - res = mem->in(ShenandoahWBMemProjNode::WriteBarrier); } else { #ifdef ASSERT mem->dump(); @@ -1308,7 +773,7 @@ return res; } -Node* ShenandoahBarrierNode::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) { +Node* ShenandoahBarrierC2Support::no_branches(Node* c, Node* dom, bool allow_one_proj, PhaseIdealLoop* phase) { Node* iffproj = NULL; while (c != dom) { Node* next = phase->idom(c); @@ -1373,270 +838,7 @@ return iffproj; } -bool ShenandoahBarrierNode::build_loop_late_post(PhaseIdealLoop* phase, Node* n) { - if (n->Opcode() == Op_ShenandoahReadBarrier || - n->Opcode() == Op_ShenandoahWriteBarrier || - n->Opcode() == Op_ShenandoahWBMemProj) { - - phase->build_loop_late_post_work(n, false); - - if (n->Opcode() == Op_ShenandoahWriteBarrier) { - // The write barrier and its memory proj must have the same - // control otherwise some loop opts could put nodes (Phis) between - // them - Node* proj = n->find_out_with(Op_ShenandoahWBMemProj); - if (proj != NULL) { - phase->set_ctrl_and_loop(proj, phase->get_ctrl(n)); - } - } - return true; - } - return false; -} - -bool ShenandoahBarrierNode::sink_node(PhaseIdealLoop* phase, Node* ctrl, Node* n_ctrl) { - ctrl = phase->find_non_split_ctrl(ctrl); - assert(phase->dom_depth(n_ctrl) <= phase->dom_depth(ctrl), "n is later than its clone"); - set_req(0, ctrl); - phase->register_new_node(this, ctrl); - return true; -} - -#ifdef ASSERT -void ShenandoahWriteBarrierNode::memory_dominates_all_paths_helper(Node* c, Node* rep_ctrl, Unique_Node_List& controls, PhaseIdealLoop* phase) { - const bool trace = false; - if (trace) { tty->print("X control is"); c->dump(); } - - uint start = controls.size(); - controls.push(c); - for (uint i = start; i < controls.size(); i++) { - Node *n = controls.at(i); - - if (trace) { tty->print("X from"); n->dump(); } - - if (n == rep_ctrl) { - continue; - } - - if (n->is_Proj()) { - Node* n_dom = n->in(0); - IdealLoopTree* n_dom_loop = phase->get_loop(n_dom); - if (n->is_IfProj() && n_dom->outcnt() == 2) { - n_dom_loop = phase->get_loop(n_dom->as_If()->proj_out(n->as_Proj()->_con == 0 ? 1 : 0)); - } - if (n_dom_loop != phase->ltree_root()) { - Node* tail = n_dom_loop->tail(); - if (tail->is_Region()) { - for (uint j = 1; j < tail->req(); j++) { - if (phase->is_dominator(n_dom, tail->in(j)) && !phase->is_dominator(n, tail->in(j))) { - assert(phase->is_dominator(rep_ctrl, tail->in(j)), "why are we here?"); - // entering loop from below, mark backedge - if (trace) { tty->print("X pushing backedge"); tail->in(j)->dump(); } - controls.push(tail->in(j)); - //assert(n->in(0) == n_dom, "strange flow control"); - } - } - } else if (phase->get_loop(n) != n_dom_loop && phase->is_dominator(n_dom, tail)) { - // entering loop from below, mark backedge - if (trace) { tty->print("X pushing backedge"); tail->dump(); } - controls.push(tail); - //assert(n->in(0) == n_dom, "strange flow control"); - } - } - } - - if (n->is_Loop()) { - Node* c = n->in(LoopNode::EntryControl); - if (trace) { tty->print("X pushing"); c->dump(); } - controls.push(c); - } else if (n->is_Region()) { - for (uint i = 1; i < n->req(); i++) { - Node* c = n->in(i); - if (trace) { tty->print("X pushing"); c->dump(); } - controls.push(c); - } - } else { - Node* c = n->in(0); - if (trace) { tty->print("X pushing"); c->dump(); } - controls.push(c); - } - } -} - -bool ShenandoahWriteBarrierNode::memory_dominates_all_paths(Node* mem, Node* rep_ctrl, int alias, PhaseIdealLoop* phase) { - const bool trace = false; - if (trace) { - tty->print("XXX mem is"); mem->dump(); - tty->print("XXX rep ctrl is"); rep_ctrl->dump(); - tty->print_cr("XXX alias is %d", alias); - } - ResourceMark rm; - Unique_Node_List wq; - Unique_Node_List controls; - wq.push(mem); - for (uint next = 0; next < wq.size(); next++) { - Node *nn = wq.at(next); - if (trace) { tty->print("XX from mem"); nn->dump(); } - assert(nn->bottom_type() == Type::MEMORY, "memory only"); - - if (nn->is_Phi()) { - Node* r = nn->in(0); - for (DUIterator_Fast jmax, j = r->fast_outs(jmax); j < jmax; j++) { - Node* u = r->fast_out(j); - if (u->is_Phi() && u->bottom_type() == Type::MEMORY && u != nn && - (u->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(u->adr_type()) == alias)) { - if (trace) { tty->print("XX Next mem (other phi)"); u->dump(); } - wq.push(u); - } - } - } - - for (DUIterator_Fast imax, i = nn->fast_outs(imax); i < imax; i++) { - Node* use = nn->fast_out(i); - - if (trace) { tty->print("XX use %p", use->adr_type()); use->dump(); } - if (use->is_CFG() && use->in(TypeFunc::Memory) == nn) { - Node* c = use->in(0); - if (phase->is_dominator(rep_ctrl, c)) { - memory_dominates_all_paths_helper(c, rep_ctrl, controls, phase); - } else if (use->is_CallStaticJava() && use->as_CallStaticJava()->uncommon_trap_request() != 0 && c->is_Region()) { - Node* region = c; - if (trace) { tty->print("XX unc region"); region->dump(); } - for (uint j = 1; j < region->req(); j++) { - if (phase->is_dominator(rep_ctrl, region->in(j))) { - if (trace) { tty->print("XX unc follows"); region->in(j)->dump(); } - memory_dominates_all_paths_helper(region->in(j), rep_ctrl, controls, phase); - } - } - } - //continue; - } else if (use->is_Phi()) { - assert(use->bottom_type() == Type::MEMORY, "bad phi"); - if ((use->adr_type() == TypePtr::BOTTOM) || - phase->C->get_alias_index(use->adr_type()) == alias) { - for (uint j = 1; j < use->req(); j++) { - if (use->in(j) == nn) { - Node* c = use->in(0)->in(j); - if (phase->is_dominator(rep_ctrl, c)) { - memory_dominates_all_paths_helper(c, rep_ctrl, controls, phase); - } - } - } - } - // continue; - } - - if (use->is_MergeMem()) { - if (use->as_MergeMem()->memory_at(alias) == nn) { - if (trace) { tty->print("XX Next mem"); use->dump(); } - // follow the memory edges - wq.push(use); - } - } else if (use->is_Phi()) { - assert(use->bottom_type() == Type::MEMORY, "bad phi"); - if ((use->adr_type() == TypePtr::BOTTOM) || - phase->C->get_alias_index(use->adr_type()) == alias) { - if (trace) { tty->print("XX Next mem"); use->dump(); } - // follow the memory edges - wq.push(use); - } - } else if (use->bottom_type() == Type::MEMORY && - (use->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(use->adr_type()) == alias)) { - if (trace) { tty->print("XX Next mem"); use->dump(); } - // follow the memory edges - wq.push(use); - } else if ((use->is_SafePoint() || use->is_MemBar()) && - (use->adr_type() == TypePtr::BOTTOM || phase->C->get_alias_index(use->adr_type()) == alias)) { - for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { - Node* u = use->fast_out(j); - if (u->bottom_type() == Type::MEMORY) { - if (trace) { tty->print("XX Next mem"); u->dump(); } - // follow the memory edges - wq.push(u); - } - } - } else if (use->Opcode() == Op_ShenandoahWriteBarrier && phase->C->get_alias_index(use->adr_type()) == alias) { - Node* m = use->find_out_with(Op_ShenandoahWBMemProj); - if (m != NULL) { - if (trace) { tty->print("XX Next mem"); m->dump(); } - // follow the memory edges - wq.push(m); - } - } - } - } - - if (controls.size() == 0) { - return false; - } - - for (uint i = 0; i < controls.size(); i++) { - Node *n = controls.at(i); - - if (trace) { tty->print("X checking"); n->dump(); } - - if (n->unique_ctrl_out() != NULL) { - continue; - } - - if (n->Opcode() == Op_NeverBranch) { - Node* taken = n->as_Multi()->proj_out(0); - if (!controls.member(taken)) { - if (trace) { tty->print("X not seen"); taken->dump(); } - return false; - } - continue; - } - - for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { - Node* u = n->fast_out(j); - - if (u->is_CFG()) { - if (!controls.member(u)) { - if (u->is_Proj() && u->as_Proj()->is_uncommon_trap_proj(Deoptimization::Reason_none)) { - if (trace) { tty->print("X not seen but unc"); u->dump(); } - } else { - Node* c = u; - do { - c = c->unique_ctrl_out(); - } while (c != NULL && c->is_Region()); - if (c != NULL && c->Opcode() == Op_Halt) { - if (trace) { tty->print("X not seen but halt"); c->dump(); } - } else { - if (trace) { tty->print("X not seen"); u->dump(); } - return false; - } - } - } else { - if (trace) { tty->print("X seen"); u->dump(); } - } - } - } - } - return true; -} -#endif - -Node* ShenandoahBarrierNode::dom_mem(Node* mem, Node*& mem_ctrl, Node* n, Node* rep_ctrl, int alias, PhaseIdealLoop* phase) { - ResourceMark rm; - VectorSet wq(Thread::current()->resource_area()); - wq.set(mem->_idx); - mem_ctrl = phase->get_ctrl(mem); - while (!is_dominator(mem_ctrl, rep_ctrl, mem, n, phase)) { - mem = next_mem(mem, alias); - if (wq.test_set(mem->_idx)) { - return NULL; // hit an unexpected loop - } - mem_ctrl = phase->ctrl_or_self(mem); - } - if (mem->is_MergeMem()) { - mem = mem->as_MergeMem()->memory_at(alias); - mem_ctrl = phase->ctrl_or_self(mem); - } - return mem; -} - -Node* ShenandoahBarrierNode::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) { +Node* ShenandoahBarrierC2Support::dom_mem(Node* mem, Node* ctrl, int alias, Node*& mem_ctrl, PhaseIdealLoop* phase) { ResourceMark rm; VectorSet wq(Thread::current()->resource_area()); wq.set(mem->_idx); @@ -1655,650 +857,7 @@ return mem; } -static void disconnect_barrier_mem(Node* wb, PhaseIterGVN& igvn) { - Node* mem_in = wb->in(ShenandoahBarrierNode::Memory); - Node* proj = wb->find_out_with(Op_ShenandoahWBMemProj); - - for (DUIterator_Last imin, i = proj->last_outs(imin); i >= imin; ) { - Node* u = proj->last_out(i); - igvn.rehash_node_delayed(u); - int nb = u->replace_edge(proj, mem_in); - assert(nb > 0, "no replacement?"); - i -= nb; - } -} - -Node* ShenandoahWriteBarrierNode::move_above_predicates(LoopNode* cl, Node* val_ctrl, PhaseIdealLoop* phase) { - Node* entry = cl->skip_strip_mined(-1)->in(LoopNode::EntryControl); - Node* above_pred = phase->skip_all_loop_predicates(entry); - Node* ctrl = entry; - while (ctrl != above_pred) { - Node* next = ctrl->in(0); - if (!phase->is_dominator(val_ctrl, next)) { - break; - } - ctrl = next; - } - return ctrl; -} - -static MemoryGraphFixer* find_fixer(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, int alias) { - for (int i = 0; i < memory_graph_fixers.length(); i++) { - if (memory_graph_fixers.at(i)->alias() == alias) { - return memory_graph_fixers.at(i); - } - } - return NULL; -} - -static MemoryGraphFixer* create_fixer(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, int alias, PhaseIdealLoop* phase, bool include_lsm) { - assert(find_fixer(memory_graph_fixers, alias) == NULL, "none should exist yet"); - MemoryGraphFixer* fixer = new MemoryGraphFixer(alias, include_lsm, phase); - memory_graph_fixers.push(fixer); - return fixer; -} - -void ShenandoahWriteBarrierNode::try_move_before_loop_helper(LoopNode* cl, Node* val_ctrl, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses) { - assert(cl->is_Loop(), "bad control"); - Node* ctrl = move_above_predicates(cl, val_ctrl, phase); - Node* mem_ctrl = NULL; - int alias = phase->C->get_alias_index(adr_type()); - - MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias); - if (fixer == NULL) { - fixer = create_fixer(memory_graph_fixers, alias, phase, include_lsm); - } - - Node* proj = find_out_with(Op_ShenandoahWBMemProj); - - fixer->remove(proj); - Node* mem = fixer->find_mem(ctrl, NULL); - - assert(!ShenandoahVerifyOptoBarriers || memory_dominates_all_paths(mem, ctrl, alias, phase), "can't fix the memory graph"); - - phase->set_ctrl_and_loop(this, ctrl); - phase->igvn().replace_input_of(this, Control, ctrl); - - disconnect_barrier_mem(this, phase->igvn()); - - phase->igvn().replace_input_of(this, Memory, mem); - phase->set_ctrl_and_loop(proj, ctrl); - - fixer->fix_mem(ctrl, ctrl, mem, mem, proj, uses); - assert(proj->outcnt() > 0, "disconnected write barrier"); -} - -LoopNode* ShenandoahWriteBarrierNode::try_move_before_pre_loop(Node* c, Node* val_ctrl, PhaseIdealLoop* phase) { - // A write barrier between a pre and main loop can get in the way of - // vectorization. Move it above the pre loop if possible - CountedLoopNode* cl = NULL; - if (c->is_IfFalse() && - c->in(0)->is_CountedLoopEnd()) { - cl = c->in(0)->as_CountedLoopEnd()->loopnode(); - } else if (c->is_IfProj() && - c->in(0)->is_If() && - c->in(0)->in(0)->is_IfFalse() && - c->in(0)->in(0)->in(0)->is_CountedLoopEnd()) { - cl = c->in(0)->in(0)->in(0)->as_CountedLoopEnd()->loopnode(); - } - if (cl != NULL && - cl->is_pre_loop() && - val_ctrl != cl && - phase->is_dominator(val_ctrl, cl)) { - return cl; - } - return NULL; -} - -void ShenandoahWriteBarrierNode::try_move_before_loop(GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, PhaseIdealLoop* phase, bool include_lsm, Unique_Node_List& uses) { - Node *n_ctrl = phase->get_ctrl(this); - IdealLoopTree *n_loop = phase->get_loop(n_ctrl); - Node* val = in(ValueIn); - Node* val_ctrl = phase->get_ctrl(val); - if (n_loop != phase->ltree_root() && !n_loop->_irreducible) { - IdealLoopTree *val_loop = phase->get_loop(val_ctrl); - Node* mem = in(Memory); - IdealLoopTree *mem_loop = phase->get_loop(phase->get_ctrl(mem)); - if (!n_loop->is_member(val_loop) && - n_loop->is_member(mem_loop)) { - Node* n_loop_head = n_loop->_head; - - if (n_loop_head->is_Loop()) { - LoopNode* loop = n_loop_head->as_Loop(); - if (n_loop_head->is_CountedLoop() && n_loop_head->as_CountedLoop()->is_main_loop()) { - LoopNode* res = try_move_before_pre_loop(n_loop_head->in(LoopNode::EntryControl), val_ctrl, phase); - if (res != NULL) { - loop = res; - } - } - - try_move_before_loop_helper(loop, val_ctrl, memory_graph_fixers, phase, include_lsm, uses); - } - } - } - LoopNode* ctrl = try_move_before_pre_loop(in(0), val_ctrl, phase); - if (ctrl != NULL) { - try_move_before_loop_helper(ctrl, val_ctrl, memory_graph_fixers, phase, include_lsm, uses); - } -} - -Node* ShenandoahWriteBarrierNode::would_subsume(ShenandoahBarrierNode* other, PhaseIdealLoop* phase) { - Node* val = in(ValueIn); - Node* val_ctrl = phase->get_ctrl(val); - Node* other_mem = other->in(Memory); - Node* other_ctrl = phase->get_ctrl(other); - Node* this_ctrl = phase->get_ctrl(this); - IdealLoopTree* this_loop = phase->get_loop(this_ctrl); - IdealLoopTree* other_loop = phase->get_loop(other_ctrl); - - Node* ctrl = phase->dom_lca(other_ctrl, this_ctrl); - - if (ctrl->is_Proj() && - ctrl->in(0)->is_Call() && - ctrl->unique_ctrl_out() != NULL && - ctrl->unique_ctrl_out()->Opcode() == Op_Catch && - !phase->is_dominator(val_ctrl, ctrl->in(0)->in(0))) { - return NULL; - } - - IdealLoopTree* loop = phase->get_loop(ctrl); - - // We don't want to move a write barrier in a loop - // If the LCA is in a inner loop, try a control out of loop if possible - while (!loop->is_member(this_loop) && (other->Opcode() != Op_ShenandoahWriteBarrier || !loop->is_member(other_loop))) { - ctrl = phase->idom(ctrl); - if (ctrl->is_MultiBranch()) { - ctrl = ctrl->in(0); - } - if (ctrl != val_ctrl && phase->is_dominator(ctrl, val_ctrl)) { - return NULL; - } - loop = phase->get_loop(ctrl); - } - - if (ShenandoahDontIncreaseWBFreq) { - Node* this_iffproj = no_branches(this_ctrl, ctrl, true, phase); - if (other->Opcode() == Op_ShenandoahWriteBarrier) { - Node* other_iffproj = no_branches(other_ctrl, ctrl, true, phase); - if (other_iffproj == NULL || this_iffproj == NULL) { - return ctrl; - } else if (other_iffproj != NodeSentinel && this_iffproj != NodeSentinel && - other_iffproj->in(0) == this_iffproj->in(0)) { - return ctrl; - } - } else if (this_iffproj == NULL) { - return ctrl; - } - return NULL; - } - - return ctrl; -} - -void ShenandoahWriteBarrierNode::optimize_before_expansion(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*> memory_graph_fixers, bool include_lsm) { - bool progress = false; - Unique_Node_List uses; - do { - progress = false; - for (int i = 0; i < ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i++) { - ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i); - - wb->try_move_before_loop(memory_graph_fixers, phase, include_lsm, uses); - - Node* val = wb->in(ValueIn); - - for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { - Node* u = val->fast_out(j); - if (u != wb && u->is_ShenandoahBarrier()) { - Node* rep_ctrl = wb->would_subsume(u->as_ShenandoahBarrier(), phase); - - if (rep_ctrl != NULL) { - Node* other = u; - Node* val_ctrl = phase->get_ctrl(val); - if (rep_ctrl->is_Proj() && - rep_ctrl->in(0)->is_Call() && - rep_ctrl->unique_ctrl_out() != NULL && - rep_ctrl->unique_ctrl_out()->Opcode() == Op_Catch) { - rep_ctrl = rep_ctrl->in(0)->in(0); - - assert(phase->is_dominator(val_ctrl, rep_ctrl), "bad control"); - } else { - LoopNode* c = ShenandoahWriteBarrierNode::try_move_before_pre_loop(rep_ctrl, val_ctrl, phase); - if (c != NULL) { - rep_ctrl = ShenandoahWriteBarrierNode::move_above_predicates(c, val_ctrl, phase); - } else { - while (rep_ctrl->is_IfProj()) { - CallStaticJavaNode* unc = rep_ctrl->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); - if (unc != NULL) { - int req = unc->uncommon_trap_request(); - Deoptimization::DeoptReason trap_reason = Deoptimization::trap_request_reason(req); - if ((trap_reason == Deoptimization::Reason_loop_limit_check || - trap_reason == Deoptimization::Reason_predicate || - trap_reason == Deoptimization::Reason_profile_predicate) && - phase->is_dominator(val_ctrl, rep_ctrl->in(0)->in(0))) { - rep_ctrl = rep_ctrl->in(0)->in(0); - continue; - } - } - break; - } - } - } - - Node* wb_ctrl = phase->get_ctrl(wb); - Node* other_ctrl = phase->get_ctrl(other); - int alias = phase->C->get_alias_index(wb->adr_type()); - MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias);; - if (!is_dominator(wb_ctrl, other_ctrl, wb, other, phase)) { - if (fixer == NULL) { - fixer = create_fixer(memory_graph_fixers, alias, phase, include_lsm); - } - Node* mem = fixer->find_mem(rep_ctrl, phase->get_ctrl(other) == rep_ctrl ? other : NULL); - - if (mem->has_out_with(Op_Lock) || mem->has_out_with(Op_Unlock)) { - continue; - } - - Node* wb_proj = wb->find_out_with(Op_ShenandoahWBMemProj); - fixer->remove(wb_proj); - Node* mem_for_ctrl = fixer->find_mem(rep_ctrl, NULL); - - if (wb->in(Memory) != mem) { - disconnect_barrier_mem(wb, phase->igvn()); - phase->igvn().replace_input_of(wb, Memory, mem); - } - if (rep_ctrl != wb_ctrl) { - phase->set_ctrl_and_loop(wb, rep_ctrl); - phase->igvn().replace_input_of(wb, Control, rep_ctrl); - phase->set_ctrl_and_loop(wb_proj, rep_ctrl); - progress = true; - } - - fixer->fix_mem(rep_ctrl, rep_ctrl, mem, mem_for_ctrl, wb_proj, uses); - - assert(!ShenandoahVerifyOptoBarriers || ShenandoahWriteBarrierNode::memory_dominates_all_paths(mem, rep_ctrl, alias, phase), "can't fix the memory graph"); - } - - if (other->Opcode() == Op_ShenandoahWriteBarrier) { - Node* other_proj = other->find_out_with(Op_ShenandoahWBMemProj); - if (fixer != NULL) { - fixer->remove(other_proj); - } - phase->igvn().replace_node(other_proj, other->in(Memory)); - } - phase->igvn().replace_node(other, wb); - --j; --jmax; - } - } - } - } - } while(progress); -} - -// Some code duplication with PhaseIdealLoop::split_if_with_blocks_pre() -Node* ShenandoahWriteBarrierNode::try_split_thru_phi(PhaseIdealLoop* phase) { - Node *ctrl = phase->get_ctrl(this); - if (ctrl == NULL) { - return this; - } - Node *blk = phase->has_local_phi_input(this); - if (blk == NULL) { - return this; - } - - if (in(0) != blk) { - return this; - } - - int policy = blk->req() >> 2; - - if (blk->is_CountedLoop()) { - IdealLoopTree *lp = phase->get_loop(blk); - if (lp && lp->_rce_candidate) { - return this; - } - } - - if (phase->C->live_nodes() > 35000) { - return this; - } - - uint unique = phase->C->unique(); - Node *phi = phase->split_thru_phi(this, blk, policy); - if (phi == NULL) { - return this; - } - - Node* mem_phi = new PhiNode(blk, Type::MEMORY, phase->C->alias_type(adr_type())->adr_type()); - for (uint i = 1; i < blk->req(); i++) { - Node* n = phi->in(i); - if (n->Opcode() == Op_ShenandoahWriteBarrier && - n->_idx >= unique) { - Node* proj = new ShenandoahWBMemProjNode(n); - phase->register_new_node(proj, phase->get_ctrl(n)); - mem_phi->init_req(i, proj); - } else { - Node* mem = in(ShenandoahBarrierNode::Memory); - if (mem->is_Phi() && mem->in(0) == blk) { - mem = mem->in(i); - } - mem_phi->init_req(i, mem); - } - } - phase->register_new_node(mem_phi, blk); - - - Node* proj = find_out_with(Op_ShenandoahWBMemProj); - phase->igvn().replace_node(proj, mem_phi); - phase->igvn().replace_node(this, phi); - - return phi; -} - -void ShenandoahReadBarrierNode::try_move(PhaseIdealLoop* phase) { - Node *n_ctrl = phase->get_ctrl(this); - if (n_ctrl == NULL) { - return; - } - Node* mem = in(MemNode::Memory); - int alias = phase->C->get_alias_index(adr_type()); - const bool trace = false; - -#ifdef ASSERT - if (trace) { tty->print("Trying to move mem of"); dump(); } -#endif - - Node* new_mem = mem; - - ResourceMark rm; - VectorSet seen(Thread::current()->resource_area()); - Node_List phis; - - for (;;) { -#ifdef ASSERT - if (trace) { tty->print("Looking for dominator from"); mem->dump(); } -#endif - if (mem->is_Proj() && mem->in(0)->is_Start()) { - if (new_mem != in(MemNode::Memory)) { -#ifdef ASSERT - if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); } -#endif - phase->igvn().replace_input_of(this, MemNode::Memory, new_mem); - } - return; - } - - Node* candidate = mem; - do { - if (!is_independent(mem)) { - if (trace) { tty->print_cr("Not independent"); } - if (new_mem != in(MemNode::Memory)) { -#ifdef ASSERT - if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); } -#endif - phase->igvn().replace_input_of(this, MemNode::Memory, new_mem); - } - return; - } - if (seen.test_set(mem->_idx)) { - if (trace) { tty->print_cr("Already seen"); } - ShouldNotReachHere(); - // Strange graph - if (new_mem != in(MemNode::Memory)) { -#ifdef ASSERT - if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); } -#endif - phase->igvn().replace_input_of(this, MemNode::Memory, new_mem); - } - return; - } - if (mem->is_Phi()) { - phis.push(mem); - } - mem = next_mem(mem, alias); - if (mem->bottom_type() == Type::MEMORY) { - candidate = mem; - } - assert(is_dominator(phase->ctrl_or_self(mem), n_ctrl, mem, this, phase) == phase->is_dominator(phase->ctrl_or_self(mem), n_ctrl), "strange dominator"); -#ifdef ASSERT - if (trace) { tty->print("Next mem is"); mem->dump(); } -#endif - } while (mem->bottom_type() != Type::MEMORY || !phase->is_dominator(phase->ctrl_or_self(mem), n_ctrl)); - - assert(mem->bottom_type() == Type::MEMORY, "bad mem"); - - bool not_dom = false; - for (uint i = 0; i < phis.size() && !not_dom; i++) { - Node* nn = phis.at(i); - -#ifdef ASSERT - if (trace) { tty->print("Looking from phi"); nn->dump(); } -#endif - assert(nn->is_Phi(), "phis only"); - for (uint j = 2; j < nn->req() && !not_dom; j++) { - Node* m = nn->in(j); -#ifdef ASSERT - if (trace) { tty->print("Input %d is", j); m->dump(); } -#endif - while (m != mem && !seen.test_set(m->_idx)) { - if (is_dominator(phase->ctrl_or_self(m), phase->ctrl_or_self(mem), m, mem, phase)) { - not_dom = true; - // Scheduling anomaly -#ifdef ASSERT - if (trace) { tty->print("Giving up"); m->dump(); } -#endif - break; - } - if (!is_independent(m)) { - if (trace) { tty->print_cr("Not independent"); } - if (new_mem != in(MemNode::Memory)) { -#ifdef ASSERT - if (trace) { tty->print("XXX Setting mem to"); new_mem->dump(); tty->print(" for "); dump(); } -#endif - phase->igvn().replace_input_of(this, MemNode::Memory, new_mem); - } - return; - } - if (m->is_Phi()) { - phis.push(m); - } - m = next_mem(m, alias); -#ifdef ASSERT - if (trace) { tty->print("Next mem is"); m->dump(); } -#endif - } - } - } - if (!not_dom) { - new_mem = mem; - phis.clear(); - } else { - seen.Clear(); - } - } -} - -CallStaticJavaNode* ShenandoahWriteBarrierNode::pin_and_expand_null_check(PhaseIterGVN& igvn) { - Node* val = in(ValueIn); - - const Type* val_t = igvn.type(val); - - if (val_t->meet(TypePtr::NULL_PTR) != val_t && - val->Opcode() == Op_CastPP && - val->in(0) != NULL && - val->in(0)->Opcode() == Op_IfTrue && - val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && - val->in(0)->in(0)->is_If() && - val->in(0)->in(0)->in(1)->Opcode() == Op_Bool && - val->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && - val->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && - val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1) && - val->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { - assert(val->in(0)->in(0)->in(1)->in(1)->in(1) == val->in(1), ""); - CallStaticJavaNode* unc = val->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); - return unc; - } - return NULL; -} - -void ShenandoahWriteBarrierNode::pin_and_expand_move_barrier(PhaseIdealLoop* phase, GrowableArray<MemoryGraphFixer*>& memory_graph_fixers, Unique_Node_List& uses) { - Node* unc = pin_and_expand_null_check(phase->igvn()); - Node* val = in(ValueIn); - - if (unc != NULL) { - Node* ctrl = phase->get_ctrl(this); - Node* unc_ctrl = val->in(0); - - // Don't move write barrier in a loop - IdealLoopTree* loop = phase->get_loop(ctrl); - IdealLoopTree* unc_loop = phase->get_loop(unc_ctrl); - - if (!unc_loop->is_member(loop)) { - return; - } - - Node* branch = no_branches(ctrl, unc_ctrl, false, phase); - assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch"); - if (branch == NodeSentinel) { - return; - } - - RegionNode* r = new RegionNode(3); - IfNode* iff = unc_ctrl->in(0)->as_If(); - - Node* ctrl_use = unc_ctrl->unique_ctrl_out(); - Node* unc_ctrl_clone = unc_ctrl->clone(); - phase->register_control(unc_ctrl_clone, loop, iff); - Node* c = unc_ctrl_clone; - Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase); - r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0)); - - phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0)); - phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl)); - phase->lazy_replace(c, unc_ctrl); - c = NULL;; - phase->igvn().replace_input_of(val, 0, unc_ctrl_clone); - phase->set_ctrl(val, unc_ctrl_clone); - - IfNode* new_iff = new_cast->in(0)->in(0)->as_If(); - fix_null_check(unc, unc_ctrl_clone, r, uses, phase); - Node* iff_proj = iff->proj_out(0); - r->init_req(2, iff_proj); - phase->register_control(r, phase->ltree_root(), iff); - - Node* new_bol = new_iff->in(1)->clone(); - Node* new_cmp = new_bol->in(1)->clone(); - assert(new_cmp->Opcode() == Op_CmpP, "broken"); - assert(new_cmp->in(1) == val->in(1), "broken"); - new_bol->set_req(1, new_cmp); - new_cmp->set_req(1, this); - phase->register_new_node(new_bol, new_iff->in(0)); - phase->register_new_node(new_cmp, new_iff->in(0)); - phase->igvn().replace_input_of(new_iff, 1, new_bol); - phase->igvn().replace_input_of(new_cast, 1, this); - - for (DUIterator_Fast imax, i = this->fast_outs(imax); i < imax; i++) { - Node* u = this->fast_out(i); - if (u == new_cast || u->Opcode() == Op_ShenandoahWBMemProj || u == new_cmp) { - continue; - } - phase->igvn().rehash_node_delayed(u); - int nb = u->replace_edge(this, new_cast); - assert(nb > 0, "no update?"); - --i; imax -= nb; - } - - for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { - Node* u = val->fast_out(i); - if (u == this) { - continue; - } - phase->igvn().rehash_node_delayed(u); - int nb = u->replace_edge(val, new_cast); - assert(nb > 0, "no update?"); - --i; imax -= nb; - } - - Node* new_ctrl = unc_ctrl_clone; - - int alias = phase->C->get_alias_index(adr_type()); - MemoryGraphFixer* fixer = find_fixer(memory_graph_fixers, alias); - if (fixer == NULL) { - fixer = create_fixer(memory_graph_fixers, alias, phase, true); - } - - Node* proj = find_out_with(Op_ShenandoahWBMemProj); - fixer->remove(proj); - Node* mem = fixer->find_mem(new_ctrl, NULL); - - if (in(Memory) != mem) { - disconnect_barrier_mem(this, phase->igvn()); - phase->igvn().replace_input_of(this, Memory, mem); - } - - phase->set_ctrl_and_loop(this, new_ctrl); - phase->igvn().replace_input_of(this, Control, new_ctrl); - phase->set_ctrl_and_loop(proj, new_ctrl); - - fixer->fix_mem(new_ctrl, new_ctrl, mem, mem, proj, uses); - } -} - -void ShenandoahWriteBarrierNode::pin_and_expand_helper(PhaseIdealLoop* phase) { - Node* val = in(ValueIn); - CallStaticJavaNode* unc = pin_and_expand_null_check(phase->igvn()); - Node* rep = this; - Node* ctrl = phase->get_ctrl(this); - if (unc != NULL && val->in(0) == ctrl) { - Node* unc_ctrl = val->in(0); - IfNode* other_iff = unc_ctrl->unique_ctrl_out()->as_If(); - ProjNode* other_unc_ctrl = other_iff->proj_out(1); - Node* cast = NULL; - for (DUIterator_Fast imax, i = other_unc_ctrl->fast_outs(imax); i < imax && cast == NULL; i++) { - Node* u = other_unc_ctrl->fast_out(i); - if (u->Opcode() == Op_CastPP && u->in(1) == this) { - cast = u; - } - } - assert(other_unc_ctrl->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) == unc, "broken"); - rep = cast; - } - - // Replace all uses of barrier's input that are dominated by ctrl - // with the value returned by the barrier: no need to keep both - // live. - for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { - Node* u = val->fast_out(i); - if (u != this) { - if (u->is_Phi()) { - int nb = 0; - for (uint j = 1; j < u->req(); j++) { - if (u->in(j) == val) { - Node* c = u->in(0)->in(j); - if (phase->is_dominator(ctrl, c)) { - phase->igvn().replace_input_of(u, j, rep); - nb++; - } - } - } - if (nb > 0) { - imax -= nb; - --i; - } - } else { - Node* c = phase->ctrl_or_self(u); - if (is_dominator(ctrl, c, this, u, phase)) { - phase->igvn().rehash_node_delayed(u); - int nb = u->replace_edge(val, rep); - assert(nb > 0, "no update?"); - --i, imax -= nb; - } - } - } - } -} - -Node* ShenandoahWriteBarrierNode::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) { +Node* ShenandoahBarrierC2Support::find_bottom_mem(Node* ctrl, PhaseIdealLoop* phase) { Node* mem = NULL; Node* c = ctrl; do { @@ -2355,7 +914,7 @@ return mem; } -void ShenandoahWriteBarrierNode::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) { +void ShenandoahBarrierC2Support::follow_barrier_uses(Node* n, Node* ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) { for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { Node* u = n->fast_out(i); if (!u->is_CFG() && phase->get_ctrl(u) == ctrl && (!u->is_Phi() || !u->in(0)->is_Loop() || u->in(LoopNode::LoopBackControl) != n)) { @@ -2375,7 +934,7 @@ inner->clear_strip_mined(); } -void ShenandoahWriteBarrierNode::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl, +void ShenandoahBarrierC2Support::test_heap_stable(Node*& ctrl, Node* raw_mem, Node*& heap_stable_ctrl, PhaseIdealLoop* phase) { IdealLoopTree* loop = phase->get_loop(ctrl); Node* thread = new ThreadLocalNode(); @@ -2407,7 +966,7 @@ assert(is_heap_stable_test(heap_stable_iff), "Should match the shape"); } -void ShenandoahWriteBarrierNode::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) { +void ShenandoahBarrierC2Support::test_null(Node*& ctrl, Node* val, Node*& null_ctrl, PhaseIdealLoop* phase) { const Type* val_t = phase->igvn().type(val); if (val_t->meet(TypePtr::NULL_PTR) == val_t) { IdealLoopTree* loop = phase->get_loop(ctrl); @@ -2424,7 +983,7 @@ } } -Node* ShenandoahWriteBarrierNode::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) { +Node* ShenandoahBarrierC2Support::clone_null_check(Node*& c, Node* val, Node* unc_ctrl, PhaseIdealLoop* phase) { IdealLoopTree *loop = phase->get_loop(c); Node* iff = unc_ctrl->in(0); assert(iff->is_If(), "broken"); @@ -2445,7 +1004,7 @@ return val; } -void ShenandoahWriteBarrierNode::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, +void ShenandoahBarrierC2Support::fix_null_check(Node* unc, Node* unc_ctrl, Node* new_unc_ctrl, Unique_Node_List& uses, PhaseIdealLoop* phase) { IfNode* iff = unc_ctrl->in(0)->as_If(); Node* proj = iff->proj_out(0); @@ -2494,7 +1053,7 @@ assert(nb == 1, "only use expected"); } -void ShenandoahWriteBarrierNode::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) { +void ShenandoahBarrierC2Support::in_cset_fast_test(Node*& ctrl, Node*& not_cset_ctrl, Node* val, Node* raw_mem, PhaseIdealLoop* phase) { IdealLoopTree *loop = phase->get_loop(ctrl); Node* raw_rbtrue = new CastP2XNode(ctrl, val); phase->register_new_node(raw_rbtrue, ctrl); @@ -2523,23 +1082,18 @@ phase->register_control(ctrl, loop, in_cset_fast_test_iff); } -void ShenandoahWriteBarrierNode::call_wb_stub(Node*& ctrl, Node*& val, Node*& result_mem, - Node* raw_mem, Node* wb_mem, - int alias, - PhaseIdealLoop* phase) { +void ShenandoahBarrierC2Support::call_lrb_stub(Node*& ctrl, Node*& val, Node*& result_mem, Node* raw_mem, PhaseIdealLoop* phase) { IdealLoopTree*loop = phase->get_loop(ctrl); const TypePtr* obj_type = phase->igvn().type(val)->is_oopptr()->cast_to_nonconst(); // The slow path stub consumes and produces raw memory in addition // to the existing memory edges Node* base = find_bottom_mem(ctrl, phase); - MergeMemNode* mm = MergeMemNode::make(base); - mm->set_memory_at(alias, wb_mem); mm->set_memory_at(Compile::AliasIdxRaw, raw_mem); phase->register_new_node(mm, ctrl); - Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_write_barrier_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::write_barrier_JRT), "shenandoah_write_barrier", TypeRawPtr::BOTTOM); + Node* call = new CallLeafNode(ShenandoahBarrierSetC2::shenandoah_write_barrier_Type(), CAST_FROM_FN_PTR(address, ShenandoahRuntime::load_reference_barrier_JRT), "shenandoah_write_barrier", TypeRawPtr::BOTTOM); call->init_req(TypeFunc::Control, ctrl); call->init_req(TypeFunc::I_O, phase->C->top()); call->init_req(TypeFunc::Memory, mm); @@ -2557,7 +1111,7 @@ phase->register_new_node(val, ctrl); } -void ShenandoahWriteBarrierNode::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) { +void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const MemoryGraphFixer& fixer, Unique_Node_List& uses, Unique_Node_List& uses_to_ignore, uint last, PhaseIdealLoop* phase) { Node* ctrl = phase->get_ctrl(barrier); Node* init_raw_mem = fixer.find_mem(ctrl, barrier); @@ -2610,26 +1164,32 @@ } } -void ShenandoahWriteBarrierNode::pin_and_expand(PhaseIdealLoop* phase) { - Node_List enqueue_barriers; - if (ShenandoahStoreValEnqueueBarrier) { - Unique_Node_List wq; - wq.push(phase->C->root()); - for (uint i = 0; i < wq.size(); i++) { - Node* n = wq.at(i); - if (n->Opcode() == Op_ShenandoahEnqueueBarrier) { - enqueue_barriers.push(n); - } - for (uint i = 0; i < n->req(); i++) { - Node* in = n->in(i); - if (in != NULL) { - wq.push(in); - } - } +static Node* create_phis_on_call_return(Node* ctrl, Node* c, Node* n, Node* n_clone, const CallProjections& projs, PhaseIdealLoop* phase) { + Node* region = NULL; + while (c != ctrl) { + if (c->is_Region()) { + region = c; + } + c = phase->idom(c); + } + assert(region != NULL, ""); + Node* phi = new PhiNode(region, n->bottom_type()); + for (uint j = 1; j < region->req(); j++) { + Node* in = region->in(j); + if (phase->is_dominator(projs.fallthrough_catchproj, in)) { + phi->init_req(j, n); + } else if (phase->is_dominator(projs.catchall_catchproj, in)) { + phi->init_req(j, n_clone); + } else { + phi->init_req(j, create_phis_on_call_return(ctrl, in, n, n_clone, projs, phase)); } } + phase->register_new_node(phi, region); + return phi; +} - const bool trace = false; +void ShenandoahBarrierC2Support::pin_and_expand(PhaseIdealLoop* phase) { + ShenandoahBarrierSetC2State* state = ShenandoahBarrierSetC2::bsc2()->state(); // Collect raw memory state at CFG points in the entire graph and // record it in memory_nodes. Optimize the raw memory graph in the @@ -2637,34 +1197,9 @@ // simpler. GrowableArray<MemoryGraphFixer*> memory_graph_fixers; - // Let's try to common write barriers again - optimize_before_expansion(phase, memory_graph_fixers, true); - Unique_Node_List uses; - for (int i = 0; i < ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i++) { - ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i); - Node* ctrl = phase->get_ctrl(wb); - - Node* val = wb->in(ValueIn); - if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) { - assert(is_dominator(phase->get_ctrl(val), ctrl->in(0)->in(0), val, ctrl->in(0), phase), "can't move"); - phase->set_ctrl(wb, ctrl->in(0)->in(0)); - } else if (ctrl->is_CallRuntime()) { - assert(is_dominator(phase->get_ctrl(val), ctrl->in(0), val, ctrl, phase), "can't move"); - phase->set_ctrl(wb, ctrl->in(0)); - } - - assert(wb->Opcode() == Op_ShenandoahWriteBarrier, "only for write barriers"); - // Look for a null check that dominates this barrier and move the - // barrier right after the null check to enable implicit null - // checks - wb->pin_and_expand_move_barrier(phase, memory_graph_fixers, uses); - - wb->pin_and_expand_helper(phase); - } - - for (uint i = 0; i < enqueue_barriers.size(); i++) { - Node* barrier = enqueue_barriers.at(i); + for (int i = 0; i < state->enqueue_barriers_count(); i++) { + Node* barrier = state->enqueue_barrier(i); Node* ctrl = phase->get_ctrl(barrier); IdealLoopTree* loop = phase->get_loop(ctrl); if (loop->_head->is_OuterStripMinedLoop()) { @@ -2676,24 +1211,386 @@ } } - for (int i = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i > 0; i--) { - int cnt = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); - ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i-1); - Node* ctrl = phase->get_ctrl(wb); - IdealLoopTree* loop = phase->get_loop(ctrl); - if (loop->_head->is_OuterStripMinedLoop()) { - // Expanding a barrier here will break loop strip mining - // verification. Transform the loop so the loop nest doesn't - // appear as strip mined. - OuterStripMinedLoopNode* outer = loop->_head->as_OuterStripMinedLoop(); - hide_strip_mined_loop(outer, outer->unique_ctrl_out()->as_CountedLoop(), phase); + Node_Stack stack(0); + Node_List clones; + for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) { + ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); + if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) { + continue; + } + + Node* ctrl = phase->get_ctrl(lrb); + Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); + + CallStaticJavaNode* unc = NULL; + Node* unc_ctrl = NULL; + Node* uncasted_val = val; + + for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) { + Node* u = lrb->fast_out(i); + if (u->Opcode() == Op_CastPP && + u->in(0) != NULL && + phase->is_dominator(u->in(0), ctrl)) { + const Type* u_t = phase->igvn().type(u); + + if (u_t->meet(TypePtr::NULL_PTR) != u_t && + u->in(0)->Opcode() == Op_IfTrue && + u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none) && + u->in(0)->in(0)->is_If() && + u->in(0)->in(0)->in(1)->Opcode() == Op_Bool && + u->in(0)->in(0)->in(1)->as_Bool()->_test._test == BoolTest::ne && + u->in(0)->in(0)->in(1)->in(1)->Opcode() == Op_CmpP && + u->in(0)->in(0)->in(1)->in(1)->in(1) == val && + u->in(0)->in(0)->in(1)->in(1)->in(2)->bottom_type() == TypePtr::NULL_PTR) { + IdealLoopTree* loop = phase->get_loop(ctrl); + IdealLoopTree* unc_loop = phase->get_loop(u->in(0)); + + if (!unc_loop->is_member(loop)) { + continue; + } + + Node* branch = no_branches(ctrl, u->in(0), false, phase); + assert(branch == NULL || branch == NodeSentinel, "was not looking for a branch"); + if (branch == NodeSentinel) { + continue; + } + + phase->igvn().replace_input_of(u, 1, val); + phase->igvn().replace_input_of(lrb, ShenandoahLoadReferenceBarrierNode::ValueIn, u); + phase->set_ctrl(u, u->in(0)); + phase->set_ctrl(lrb, u->in(0)); + unc = u->in(0)->as_Proj()->is_uncommon_trap_if_pattern(Deoptimization::Reason_none); + unc_ctrl = u->in(0); + val = u; + + for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { + Node* u = val->fast_out(j); + if (u == lrb) continue; + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(val, lrb); + --j; jmax -= nb; + } + + RegionNode* r = new RegionNode(3); + IfNode* iff = unc_ctrl->in(0)->as_If(); + + Node* ctrl_use = unc_ctrl->unique_ctrl_out(); + Node* unc_ctrl_clone = unc_ctrl->clone(); + phase->register_control(unc_ctrl_clone, loop, iff); + Node* c = unc_ctrl_clone; + Node* new_cast = clone_null_check(c, val, unc_ctrl_clone, phase); + r->init_req(1, new_cast->in(0)->in(0)->as_If()->proj_out(0)); + + phase->igvn().replace_input_of(unc_ctrl, 0, c->in(0)); + phase->set_idom(unc_ctrl, c->in(0), phase->dom_depth(unc_ctrl)); + phase->lazy_replace(c, unc_ctrl); + c = NULL;; + phase->igvn().replace_input_of(val, 0, unc_ctrl_clone); + phase->set_ctrl(val, unc_ctrl_clone); + + IfNode* new_iff = new_cast->in(0)->in(0)->as_If(); + fix_null_check(unc, unc_ctrl_clone, r, uses, phase); + Node* iff_proj = iff->proj_out(0); + r->init_req(2, iff_proj); + phase->register_control(r, phase->ltree_root(), iff); + + Node* new_bol = new_iff->in(1)->clone(); + Node* new_cmp = new_bol->in(1)->clone(); + assert(new_cmp->Opcode() == Op_CmpP, "broken"); + assert(new_cmp->in(1) == val->in(1), "broken"); + new_bol->set_req(1, new_cmp); + new_cmp->set_req(1, lrb); + phase->register_new_node(new_bol, new_iff->in(0)); + phase->register_new_node(new_cmp, new_iff->in(0)); + phase->igvn().replace_input_of(new_iff, 1, new_bol); + phase->igvn().replace_input_of(new_cast, 1, lrb); + + for (DUIterator_Fast imax, i = lrb->fast_outs(imax); i < imax; i++) { + Node* u = lrb->fast_out(i); + if (u == new_cast || u == new_cmp) { + continue; + } + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(lrb, new_cast); + assert(nb > 0, "no update?"); + --i; imax -= nb; + } + + for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { + Node* u = val->fast_out(i); + if (u == lrb) { + continue; + } + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(val, new_cast); + assert(nb > 0, "no update?"); + --i; imax -= nb; + } + + ctrl = unc_ctrl_clone; + phase->set_ctrl_and_loop(lrb, ctrl); + break; + } + } + } + if (ctrl->is_Proj() && ctrl->in(0)->is_CallJava()) { + CallNode* call = ctrl->in(0)->as_CallJava(); + CallProjections projs; + call->extract_projections(&projs, false, false); + + Node* lrb_clone = lrb->clone(); + phase->register_new_node(lrb_clone, projs.catchall_catchproj); + phase->set_ctrl(lrb, projs.fallthrough_catchproj); + + stack.push(lrb, 0); + clones.push(lrb_clone); + + do { + assert(stack.size() == clones.size(), ""); + Node* n = stack.node(); +#ifdef ASSERT + if (n->is_Load()) { + Node* mem = n->in(MemNode::Memory); + for (DUIterator_Fast jmax, j = mem->fast_outs(jmax); j < jmax; j++) { + Node* u = mem->fast_out(j); + assert(!u->is_Store() || !u->is_LoadStore() || phase->get_ctrl(u) != ctrl, "anti dependent store?"); + } + } +#endif + uint idx = stack.index(); + Node* n_clone = clones.at(clones.size()-1); + if (idx < n->outcnt()) { + Node* u = n->raw_out(idx); + Node* c = phase->ctrl_or_self(u); + if (c == ctrl) { + stack.set_index(idx+1); + assert(!u->is_CFG(), ""); + stack.push(u, 0); + Node* u_clone = u->clone(); + int nb = u_clone->replace_edge(n, n_clone); + assert(nb > 0, "should have replaced some uses"); + phase->register_new_node(u_clone, projs.catchall_catchproj); + clones.push(u_clone); + phase->set_ctrl(u, projs.fallthrough_catchproj); + } else { + bool replaced = false; + if (u->is_Phi()) { + for (uint k = 1; k < u->req(); k++) { + if (u->in(k) == n) { + if (phase->is_dominator(projs.catchall_catchproj, u->in(0)->in(k))) { + phase->igvn().replace_input_of(u, k, n_clone); + replaced = true; + } else if (!phase->is_dominator(projs.fallthrough_catchproj, u->in(0)->in(k))) { + phase->igvn().replace_input_of(u, k, create_phis_on_call_return(ctrl, u->in(0)->in(k), n, n_clone, projs, phase)); + replaced = true; + } + } + } + } else { + if (phase->is_dominator(projs.catchall_catchproj, c)) { + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(n, n_clone); + assert(nb > 0, "should have replaced some uses"); + replaced = true; + } else if (!phase->is_dominator(projs.fallthrough_catchproj, c)) { + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(n, create_phis_on_call_return(ctrl, c, n, n_clone, projs, phase)); + assert(nb > 0, "should have replaced some uses"); + replaced = true; + } + } + if (!replaced) { + stack.set_index(idx+1); + } + } + } else { + // assert(n_clone->outcnt() > 0, ""); + // assert(n->outcnt() > 0, ""); + stack.pop(); + clones.pop(); + } + } while (stack.size() > 0); + assert(stack.size() == 0 && clones.size() == 0, ""); + ctrl = projs.fallthrough_catchproj; } } + // Expand load-reference-barriers MemoryGraphFixer fixer(Compile::AliasIdxRaw, true, phase); Unique_Node_List uses_to_ignore; - for (uint i = 0; i < enqueue_barriers.size(); i++) { - Node* barrier = enqueue_barriers.at(i); + for (int i = state->load_reference_barriers_count() - 1; i >= 0; i--) { + ShenandoahLoadReferenceBarrierNode* lrb = state->load_reference_barrier(i); + if (lrb->get_barrier_strength() == ShenandoahLoadReferenceBarrierNode::NONE) { + phase->igvn().replace_node(lrb, lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn)); + continue; + } + uint last = phase->C->unique(); + Node* ctrl = phase->get_ctrl(lrb); + Node* val = lrb->in(ShenandoahLoadReferenceBarrierNode::ValueIn); + + + Node* orig_ctrl = ctrl; + + Node* raw_mem = fixer.find_mem(ctrl, lrb); + Node* init_raw_mem = raw_mem; + Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL); + // int alias = phase->C->get_alias_index(lrb->adr_type()); + + IdealLoopTree *loop = phase->get_loop(ctrl); + CallStaticJavaNode* unc = lrb->pin_and_expand_null_check(phase->igvn()); + Node* unc_ctrl = NULL; + if (unc != NULL) { + if (val->in(ShenandoahLoadReferenceBarrierNode::Control) != ctrl) { + unc = NULL; + } else { + unc_ctrl = val->in(ShenandoahLoadReferenceBarrierNode::Control); + } + } + + Node* uncasted_val = val; + if (unc != NULL) { + uncasted_val = val->in(1); + } + + Node* heap_stable_ctrl = NULL; + Node* null_ctrl = NULL; + + assert(val->bottom_type()->make_oopptr(), "need oop"); + assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant"); + + enum { _heap_stable = 1, _not_cset, _not_equal, _evac_path, _null_path, PATH_LIMIT }; + Node* region = new RegionNode(PATH_LIMIT); + Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr()); + Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); + + // Stable path. + test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase); + IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If(); + + // Heap stable case + region->init_req(_heap_stable, heap_stable_ctrl); + val_phi->init_req(_heap_stable, uncasted_val); + raw_mem_phi->init_req(_heap_stable, raw_mem); + + Node* reg2_ctrl = NULL; + // Null case + test_null(ctrl, val, null_ctrl, phase); + if (null_ctrl != NULL) { + reg2_ctrl = null_ctrl->in(0); + region->init_req(_null_path, null_ctrl); + val_phi->init_req(_null_path, uncasted_val); + raw_mem_phi->init_req(_null_path, raw_mem); + } else { + region->del_req(_null_path); + val_phi->del_req(_null_path); + raw_mem_phi->del_req(_null_path); + } + + // Test for in-cset. + // Wires !in_cset(obj) to slot 2 of region and phis + Node* not_cset_ctrl = NULL; + in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase); + if (not_cset_ctrl != NULL) { + if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0); + region->init_req(_not_cset, not_cset_ctrl); + val_phi->init_req(_not_cset, uncasted_val); + raw_mem_phi->init_req(_not_cset, raw_mem); + } + + // Resolve object when orig-value is in cset. + // Make the unconditional resolve for fwdptr. + Node* new_val = uncasted_val; + if (unc_ctrl != NULL) { + // Clone the null check in this branch to allow implicit null check + new_val = clone_null_check(ctrl, val, unc_ctrl, phase); + fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase); + + IfNode* iff = unc_ctrl->in(0)->as_If(); + phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1)); + } + Node* addr = new AddPNode(new_val, uncasted_val, phase->igvn().MakeConX(ShenandoahBrooksPointer::byte_offset())); + phase->register_new_node(addr, ctrl); + assert(val->bottom_type()->isa_oopptr(), "what else?"); + const TypePtr* obj_type = val->bottom_type()->is_oopptr(); + const TypePtr* adr_type = TypeRawPtr::BOTTOM; + Node* fwd = new LoadPNode(ctrl, raw_mem, addr, adr_type, obj_type, MemNode::unordered); + phase->register_new_node(fwd, ctrl); + + // Only branch to LRB stub if object is not forwarded; otherwise reply with fwd ptr + Node* cmp = new CmpPNode(fwd, new_val); + phase->register_new_node(cmp, ctrl); + Node* bol = new BoolNode(cmp, BoolTest::eq); + phase->register_new_node(bol, ctrl); + + IfNode* iff = new IfNode(ctrl, bol, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); + if (reg2_ctrl == NULL) reg2_ctrl = iff; + phase->register_control(iff, loop, ctrl); + Node* if_not_eq = new IfFalseNode(iff); + phase->register_control(if_not_eq, loop, iff); + Node* if_eq = new IfTrueNode(iff); + phase->register_control(if_eq, loop, iff); + + // Wire up not-equal-path in slots 3. + region->init_req(_not_equal, if_not_eq); + val_phi->init_req(_not_equal, fwd); + raw_mem_phi->init_req(_not_equal, raw_mem); + + // Call wb-stub and wire up that path in slots 4 + Node* result_mem = NULL; + ctrl = if_eq; + call_lrb_stub(ctrl, fwd, result_mem, raw_mem, phase); + region->init_req(_evac_path, ctrl); + val_phi->init_req(_evac_path, fwd); + raw_mem_phi->init_req(_evac_path, result_mem); + + phase->register_control(region, loop, heap_stable_iff); + Node* out_val = val_phi; + phase->register_new_node(val_phi, region); + phase->register_new_node(raw_mem_phi, region); + + fix_ctrl(lrb, region, fixer, uses, uses_to_ignore, last, phase); + + ctrl = orig_ctrl; + + if (unc != NULL) { + for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { + Node* u = val->fast_out(i); + Node* c = phase->ctrl_or_self(u); + if (u != lrb && (c != ctrl || is_dominator_same_ctrl(c, lrb, u, phase))) { + phase->igvn().rehash_node_delayed(u); + int nb = u->replace_edge(val, out_val); + --i, imax -= nb; + } + } + if (val->outcnt() == 0) { + phase->igvn()._worklist.push(val); + } + } + phase->igvn().replace_node(lrb, out_val); + + follow_barrier_uses(out_val, ctrl, uses, phase); + + for(uint next = 0; next < uses.size(); next++ ) { + Node *n = uses.at(next); + assert(phase->get_ctrl(n) == ctrl, "bad control"); + assert(n != init_raw_mem, "should leave input raw mem above the barrier"); + phase->set_ctrl(n, region); + follow_barrier_uses(n, ctrl, uses, phase); + } + + // The slow path call produces memory: hook the raw memory phi + // from the expanded load reference barrier with the rest of the graph + // which may require adding memory phis at every post dominated + // region and at enclosing loop heads. Use the memory state + // collected in memory_nodes to fix the memory graph. Update that + // memory state as we go. + fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses); + } + // Done expanding load-reference-barriers. + assert(ShenandoahBarrierSetC2::bsc2()->state()->load_reference_barriers_count() == 0, "all load reference barrier nodes should have been replaced"); + + for (int i = state->enqueue_barriers_count() - 1; i >= 0; i--) { + Node* barrier = state->enqueue_barrier(i); Node* pre_val = barrier->in(1); if (phase->igvn().type(pre_val)->higher_equal(TypePtr::NULL_PTR)) { @@ -2840,212 +1737,11 @@ phase->igvn().replace_node(barrier, pre_val); } + assert(state->enqueue_barriers_count() == 0, "all enqueue barrier nodes should have been replaced"); - for (int i = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); i > 0; i--) { - int cnt = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count(); - ShenandoahWriteBarrierNode* wb = ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barrier(i-1); - - uint last = phase->C->unique(); - Node* ctrl = phase->get_ctrl(wb); - Node* orig_ctrl = ctrl; - - Node* raw_mem = fixer.find_mem(ctrl, wb); - Node* init_raw_mem = raw_mem; - Node* raw_mem_for_ctrl = fixer.find_mem(ctrl, NULL); - int alias = phase->C->get_alias_index(wb->adr_type()); - Node* wb_mem = wb->in(Memory); - Node* init_wb_mem = wb_mem; - - Node* val = wb->in(ValueIn); - Node* wbproj = wb->find_out_with(Op_ShenandoahWBMemProj); - IdealLoopTree *loop = phase->get_loop(ctrl); - - assert(val->Opcode() != Op_ShenandoahWriteBarrier, "No chain of write barriers"); - - CallStaticJavaNode* unc = wb->pin_and_expand_null_check(phase->igvn()); - Node* unc_ctrl = NULL; - if (unc != NULL) { - if (val->in(0) != ctrl) { - unc = NULL; - } else { - unc_ctrl = val->in(0); - } - } - - Node* uncasted_val = val; - if (unc != NULL) { - uncasted_val = val->in(1); - } - - Node* heap_stable_ctrl = NULL; - Node* null_ctrl = NULL; - - assert(val->bottom_type()->make_oopptr(), "need oop"); - assert(val->bottom_type()->make_oopptr()->const_oop() == NULL, "expect non-constant"); - - enum { _heap_stable = 1, _heap_unstable, PATH_LIMIT }; - Node* region = new RegionNode(PATH_LIMIT); - Node* val_phi = new PhiNode(region, uncasted_val->bottom_type()->is_oopptr()); - Node* mem_phi = PhiNode::make(region, wb_mem, Type::MEMORY, phase->C->alias_type(wb->adr_type())->adr_type()); - Node* raw_mem_phi = PhiNode::make(region, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); - - enum { _not_cset = 1, _not_equal, _evac_path, _null_path, PATH_LIMIT2 }; - Node* region2 = new RegionNode(PATH_LIMIT2); - Node* val_phi2 = new PhiNode(region2, uncasted_val->bottom_type()->is_oopptr()); - Node* mem_phi2 = PhiNode::make(region2, wb_mem, Type::MEMORY, phase->C->alias_type(wb->adr_type())->adr_type()); - Node* raw_mem_phi2 = PhiNode::make(region2, raw_mem, Type::MEMORY, TypeRawPtr::BOTTOM); - - // Stable path. - test_heap_stable(ctrl, raw_mem, heap_stable_ctrl, phase); - IfNode* heap_stable_iff = heap_stable_ctrl->in(0)->as_If(); - - // Heap stable case - region->init_req(_heap_stable, heap_stable_ctrl); - val_phi->init_req(_heap_stable, uncasted_val); - mem_phi->init_req(_heap_stable, wb_mem); - raw_mem_phi->init_req(_heap_stable, raw_mem); - - Node* reg2_ctrl = NULL; - // Null case - test_null(ctrl, val, null_ctrl, phase); - if (null_ctrl != NULL) { - reg2_ctrl = null_ctrl->in(0); - region2->init_req(_null_path, null_ctrl); - val_phi2->init_req(_null_path, uncasted_val); - mem_phi2->init_req(_null_path, wb_mem); - raw_mem_phi2->init_req(_null_path, raw_mem); - } else { - region2->del_req(_null_path); - val_phi2->del_req(_null_path); - mem_phi2->del_req(_null_path); - raw_mem_phi2->del_req(_null_path); - } - - // Test for in-cset. - // Wires !in_cset(obj) to slot 2 of region and phis - Node* not_cset_ctrl = NULL; - in_cset_fast_test(ctrl, not_cset_ctrl, uncasted_val, raw_mem, phase); - if (not_cset_ctrl != NULL) { - if (reg2_ctrl == NULL) reg2_ctrl = not_cset_ctrl->in(0); - region2->init_req(_not_cset, not_cset_ctrl); - val_phi2->init_req(_not_cset, uncasted_val); - mem_phi2->init_req(_not_cset, wb_mem); - raw_mem_phi2->init_req(_not_cset, raw_mem); - } - - // Resolve object when orig-value is in cset. - // Make the unconditional resolve for fwdptr, not the read barrier. - Node* new_val = uncasted_val; - if (unc_ctrl != NULL) { - // Clone the null check in this branch to allow implicit null check - new_val = clone_null_check(ctrl, val, unc_ctrl, phase); - fix_null_check(unc, unc_ctrl, ctrl->in(0)->as_If()->proj_out(0), uses, phase); - - IfNode* iff = unc_ctrl->in(0)->as_If(); - phase->igvn().replace_input_of(iff, 1, phase->igvn().intcon(1)); - } - Node* addr = new AddPNode(new_val, uncasted_val, phase->igvn().MakeConX(ShenandoahBrooksPointer::byte_offset())); - phase->register_new_node(addr, ctrl); - assert(val->bottom_type()->isa_oopptr(), "what else?"); - const TypePtr* obj_type = val->bottom_type()->is_oopptr(); - const TypePtr* adr_type = ShenandoahBarrierNode::brooks_pointer_type(obj_type); - Node* fwd = new LoadPNode(ctrl, wb_mem, addr, adr_type, obj_type, MemNode::unordered); - phase->register_new_node(fwd, ctrl); - - // Only branch to WB stub if object is not forwarded; otherwise reply with fwd ptr - Node* cmp = new CmpPNode(fwd, new_val); - phase->register_new_node(cmp, ctrl); - Node* bol = new BoolNode(cmp, BoolTest::eq); - phase->register_new_node(bol, ctrl); - - IfNode* iff = new IfNode(ctrl, bol, PROB_UNLIKELY(0.999), COUNT_UNKNOWN); - if (reg2_ctrl == NULL) reg2_ctrl = iff; - phase->register_control(iff, loop, ctrl); - Node* if_not_eq = new IfFalseNode(iff); - phase->register_control(if_not_eq, loop, iff); - Node* if_eq = new IfTrueNode(iff); - phase->register_control(if_eq, loop, iff); - - // Wire up not-equal-path in slots 3. - region2->init_req(_not_equal, if_not_eq); - val_phi2->init_req(_not_equal, fwd); - mem_phi2->init_req(_not_equal, wb_mem); - raw_mem_phi2->init_req(_not_equal, raw_mem); - - // Call wb-stub and wire up that path in slots 4 - Node* result_mem = NULL; - ctrl = if_eq; - call_wb_stub(ctrl, new_val, result_mem, - raw_mem, wb_mem, - alias, phase); - region2->init_req(_evac_path, ctrl); - val_phi2->init_req(_evac_path, new_val); - mem_phi2->init_req(_evac_path, result_mem); - raw_mem_phi2->init_req(_evac_path, result_mem); - - phase->register_control(region2, loop, reg2_ctrl); - phase->register_new_node(val_phi2, region2); - phase->register_new_node(mem_phi2, region2); - phase->register_new_node(raw_mem_phi2, region2); - - region->init_req(_heap_unstable, region2); - val_phi->init_req(_heap_unstable, val_phi2); - mem_phi->init_req(_heap_unstable, mem_phi2); - raw_mem_phi->init_req(_heap_unstable, raw_mem_phi2); - - phase->register_control(region, loop, heap_stable_iff); - Node* out_val = val_phi; - phase->register_new_node(val_phi, region); - phase->register_new_node(mem_phi, region); - phase->register_new_node(raw_mem_phi, region); - - fix_ctrl(wb, region, fixer, uses, uses_to_ignore, last, phase); - - ctrl = orig_ctrl; - - phase->igvn().replace_input_of(wbproj, ShenandoahWBMemProjNode::WriteBarrier, phase->C->top()); - phase->igvn().replace_node(wbproj, mem_phi); - if (unc != NULL) { - for (DUIterator_Fast imax, i = val->fast_outs(imax); i < imax; i++) { - Node* u = val->fast_out(i); - Node* c = phase->ctrl_or_self(u); - if (u != wb && (c != ctrl || is_dominator_same_ctrl(c, wb, u, phase))) { - phase->igvn().rehash_node_delayed(u); - int nb = u->replace_edge(val, out_val); - --i, imax -= nb; - } - } - if (val->outcnt() == 0) { - phase->igvn()._worklist.push(val); - } - } - phase->igvn().replace_node(wb, out_val); - - follow_barrier_uses(mem_phi, ctrl, uses, phase); - follow_barrier_uses(out_val, ctrl, uses, phase); - - for(uint next = 0; next < uses.size(); next++ ) { - Node *n = uses.at(next); - assert(phase->get_ctrl(n) == ctrl, "bad control"); - assert(n != init_raw_mem, "should leave input raw mem above the barrier"); - phase->set_ctrl(n, region); - follow_barrier_uses(n, ctrl, uses, phase); - } - - // The slow path call produces memory: hook the raw memory phi - // from the expanded write barrier with the rest of the graph - // which may require adding memory phis at every post dominated - // region and at enclosing loop heads. Use the memory state - // collected in memory_nodes to fix the memory graph. Update that - // memory state as we go. - fixer.fix_mem(ctrl, region, init_raw_mem, raw_mem_for_ctrl, raw_mem_phi, uses); - assert(ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() == cnt - 1, "not replaced"); - } - - assert(ShenandoahBarrierSetC2::bsc2()->state()->shenandoah_barriers_count() == 0, "all write barrier nodes should have been replaced"); } -void ShenandoahWriteBarrierNode::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) { +void ShenandoahBarrierC2Support::move_heap_stable_test_out_of_loop(IfNode* iff, PhaseIdealLoop* phase) { IdealLoopTree *loop = phase->get_loop(iff); Node* loop_head = loop->_head; Node* entry_c = loop_head->in(LoopNode::EntryControl); @@ -3078,7 +1774,7 @@ } } -bool ShenandoahWriteBarrierNode::identical_backtoback_ifs(Node *n, PhaseIdealLoop* phase) { +bool ShenandoahBarrierC2Support::identical_backtoback_ifs(Node* n, PhaseIdealLoop* phase) { if (!n->is_If() || n->is_CountedLoopEnd()) { return false; } @@ -3113,7 +1809,7 @@ return true; } -void ShenandoahWriteBarrierNode::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) { +void ShenandoahBarrierC2Support::merge_back_to_back_tests(Node* n, PhaseIdealLoop* phase) { assert(is_heap_stable_test(n), "no other tests"); if (identical_backtoback_ifs(n, phase)) { Node* n_ctrl = n->in(0); @@ -3149,7 +1845,7 @@ } } -IfNode* ShenandoahWriteBarrierNode::find_unswitching_candidate(const IdealLoopTree *loop, PhaseIdealLoop* phase) { +IfNode* ShenandoahBarrierC2Support::find_unswitching_candidate(const IdealLoopTree* loop, PhaseIdealLoop* phase) { // Find first invariant test that doesn't exit the loop LoopNode *head = loop->_head->as_Loop(); IfNode* unswitch_iff = NULL; @@ -3194,10 +1890,9 @@ } -void ShenandoahWriteBarrierNode::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) { +void ShenandoahBarrierC2Support::optimize_after_expansion(VectorSet &visited, Node_Stack &stack, Node_List &old_new, PhaseIdealLoop* phase) { Node_List heap_stable_tests; Node_List gc_state_loads; - stack.push(phase->C->start(), 0); do { Node* n = stack.node(); @@ -3274,7 +1969,7 @@ } #ifdef ASSERT -void ShenandoahBarrierNode::verify_raw_mem(RootNode* root) { +void ShenandoahBarrierC2Support::verify_raw_mem(RootNode* root) { const bool trace = false; ResourceMark rm; Unique_Node_List nodes; @@ -3372,6 +2067,10 @@ } #endif +ShenandoahEnqueueBarrierNode::ShenandoahEnqueueBarrierNode(Node* val) : Node(NULL, val) { + ShenandoahBarrierSetC2::bsc2()->state()->add_enqueue_barrier(this); +} + const Type* ShenandoahEnqueueBarrierNode::bottom_type() const { if (in(1) == NULL || in(1)->is_top()) { return Type::TOP; @@ -3400,6 +2099,7 @@ int ShenandoahEnqueueBarrierNode::needed(Node* n) { if (n == NULL || n->is_Allocate() || + n->Opcode() == Op_ShenandoahEnqueueBarrier || n->bottom_type() == TypePtr::NULL_PTR || (n->bottom_type()->make_oopptr() != NULL && n->bottom_type()->make_oopptr()->const_oop() != NULL)) { return NotNeeded; @@ -3530,6 +2230,26 @@ Node* call = in->in(0)->in(0); assert(call->is_Call(), ""); mem = call->in(TypeFunc::Memory); + } else if (in->Opcode() == Op_NeverBranch) { + ResourceMark rm; + Unique_Node_List wq; + wq.push(in); + wq.push(in->as_Multi()->proj_out(0)); + for (uint j = 1; j < wq.size(); j++) { + Node* c = wq.at(j); + assert(!c->is_Root(), "shouldn't leave loop"); + if (c->is_SafePoint()) { + assert(mem == NULL, "only one safepoint"); + mem = c->in(TypeFunc::Memory); + } + for (DUIterator_Fast kmax, k = c->fast_outs(kmax); k < kmax; k++) { + Node* u = c->fast_out(k); + if (u->is_CFG()) { + wq.push(u); + } + } + } + assert(mem != NULL, "should have found safepoint"); } } } else { @@ -3568,12 +2288,6 @@ assert(_alias == Compile::AliasIdxRaw, ""); stack.push(mem, mem->req()); mem = mem->in(MemNode::Memory); - } else if (mem->Opcode() == Op_ShenandoahWriteBarrier) { - assert(_alias != Compile::AliasIdxRaw, ""); - mem = mem->in(ShenandoahBarrierNode::Memory); - } else if (mem->Opcode() == Op_ShenandoahWBMemProj) { - stack.push(mem, mem->req()); - mem = mem->in(ShenandoahWBMemProjNode::WriteBarrier); } else { #ifdef ASSERT mem->dump(); @@ -3627,7 +2341,7 @@ while (progress) { progress = false; iteration++; - assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop(), ""); + assert(iteration <= 2+max_depth || _phase->C->has_irreducible_loop() || has_never_branch(_phase->C->root()), ""); if (trace) { tty->print_cr("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"); } IdealLoopTree* last_updated_ilt = NULL; for (int i = rpo_list.size() - 1; i >= 0; i--) { @@ -3795,7 +2509,7 @@ mem = _memory_nodes[c->_idx]; } if (n != NULL && mem_is_valid(mem, c)) { - while (!ShenandoahWriteBarrierNode::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) { + while (!ShenandoahBarrierC2Support::is_dominator_same_ctrl(c, mem, n, _phase) && _phase->ctrl_or_self(mem) == ctrl) { mem = next_mem(mem, _alias); } if (mem->is_MergeMem()) { @@ -3841,12 +2555,6 @@ } else if (old->Opcode() == Op_SCMemProj) { assert(_alias == Compile::AliasIdxRaw, ""); old = old->in(0); - } else if (old->Opcode() == Op_ShenandoahWBMemProj) { - assert(_alias != Compile::AliasIdxRaw, ""); - old = old->in(ShenandoahWBMemProjNode::WriteBarrier); - } else if (old->Opcode() == Op_ShenandoahWriteBarrier) { - assert(_alias != Compile::AliasIdxRaw, ""); - old = old->in(ShenandoahBarrierNode::Memory); } else { ShouldNotReachHere(); } @@ -3856,7 +2564,7 @@ _memory_nodes.map(ctrl->_idx, mem); _memory_nodes.map(new_ctrl->_idx, mem_for_ctrl); } - uint input = prev->Opcode() == Op_ShenandoahWriteBarrier ? (uint)ShenandoahBarrierNode::Memory : (uint)MemNode::Memory; + uint input = (uint)MemNode::Memory; _phase->igvn().replace_input_of(prev, input, new_mem); } else { uses.clear(); @@ -3924,19 +2632,14 @@ } else { DEBUG_ONLY(if (trace) { tty->print("ZZZ NOT setting mem"); m->dump(); }); for (;;) { - assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj() || m->Opcode() == Op_ShenandoahWriteBarrier || m->Opcode() == Op_ShenandoahWBMemProj, ""); + assert(m->is_Mem() || m->is_LoadStore() || m->is_Proj(), ""); Node* next = NULL; if (m->is_Proj()) { next = m->in(0); - } else if (m->Opcode() == Op_ShenandoahWBMemProj) { - next = m->in(ShenandoahWBMemProjNode::WriteBarrier); - } else if (m->is_Mem() || m->is_LoadStore()) { + } else { + assert(m->is_Mem() || m->is_LoadStore(), ""); assert(_alias == Compile::AliasIdxRaw, ""); next = m->in(MemNode::Memory); - } else { - assert(_alias != Compile::AliasIdxRaw, ""); - assert (m->Opcode() == Op_ShenandoahWriteBarrier, ""); - next = m->in(ShenandoahBarrierNode::Memory); } if (_phase->get_ctrl(next) != u) { break; @@ -3953,8 +2656,8 @@ } DEBUG_ONLY(if (trace) { tty->print("ZZZ setting to phi"); m->dump(); }); - assert(m->is_Mem() || m->is_LoadStore() || m->Opcode() == Op_ShenandoahWriteBarrier, ""); - uint input = (m->is_Mem() || m->is_LoadStore()) ? (uint)MemNode::Memory : (uint)ShenandoahBarrierNode::Memory; + assert(m->is_Mem() || m->is_LoadStore(), ""); + uint input = (uint)MemNode::Memory; _phase->igvn().replace_input_of(m, input, phi); push = false; } @@ -4180,20 +2883,7 @@ for (DUIterator i = mem->outs(); mem->has_out(i); i++) { Node* u = mem->out(i); if (u != replacement && u->_idx < last) { - if (u->is_ShenandoahBarrier() && _alias != Compile::AliasIdxRaw) { - if (_phase->C->get_alias_index(u->adr_type()) == _alias && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { - _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); - assert(u->find_edge(mem) == -1, "only one edge"); - --i; - } - } else if (u->is_Mem()) { - if (_phase->C->get_alias_index(u->adr_type()) == _alias && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { - assert(_alias == Compile::AliasIdxRaw , "only raw memory can lead to a memory operation"); - _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); - assert(u->find_edge(mem) == -1, "only one edge"); - --i; - } - } else if (u->is_MergeMem()) { + if (u->is_MergeMem()) { MergeMemNode* u_mm = u->as_MergeMem(); if (u_mm->memory_at(_alias) == mem) { MergeMemNode* newmm = NULL; @@ -4221,7 +2911,7 @@ } } } else { - if (rep_ctrl != uu && ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) { + if (rep_ctrl != uu && ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(uu), replacement, uu, _phase)) { if (newmm == NULL) { newmm = clone_merge_mem(u, mem, rep_proj, rep_ctrl, i); } @@ -4262,10 +2952,11 @@ u->Opcode() == Op_Rethrow || u->Opcode() == Op_Return || u->Opcode() == Op_SafePoint || + u->Opcode() == Op_StoreLConditional || (u->is_CallStaticJava() && u->as_CallStaticJava()->uncommon_trap_request() != 0) || (u->is_CallStaticJava() && u->as_CallStaticJava()->_entry_point == OptoRuntime::rethrow_stub()) || u->Opcode() == Op_CallLeaf, ""); - if (ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { + if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { if (mm == NULL) { mm = allocate_merge_mem(mem, rep_proj, rep_ctrl); } @@ -4273,7 +2964,7 @@ --i; } } else if (_phase->C->get_alias_index(u->adr_type()) == _alias) { - if (ShenandoahWriteBarrierNode::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { + if (ShenandoahBarrierC2Support::is_dominator(rep_ctrl, _phase->ctrl_or_self(u), replacement, u, _phase)) { _phase->igvn().replace_input_of(u, u->find_edge(mem), rep_proj); --i; } @@ -4282,11 +2973,322 @@ } } -void MemoryGraphFixer::remove(Node* n) { - assert(n->Opcode() == Op_ShenandoahWBMemProj, ""); - Node* c = _phase->get_ctrl(n); - Node* mem = find_mem(c, NULL); - if (mem == n) { - _memory_nodes.map(c->_idx, mem->in(ShenandoahWBMemProjNode::WriteBarrier)->in(ShenandoahBarrierNode::Memory)); +ShenandoahLoadReferenceBarrierNode::ShenandoahLoadReferenceBarrierNode(Node* ctrl, Node* obj) +: Node(ctrl, obj) { + ShenandoahBarrierSetC2::bsc2()->state()->add_load_reference_barrier(this); +} + +const Type* ShenandoahLoadReferenceBarrierNode::bottom_type() const { + if (in(ValueIn) == NULL || in(ValueIn)->is_top()) { + return Type::TOP; } + const Type* t = in(ValueIn)->bottom_type(); + if (t == TypePtr::NULL_PTR) { + return t; + } + return t->is_oopptr(); } + +const Type* ShenandoahLoadReferenceBarrierNode::Value(PhaseGVN* phase) const { + // Either input is TOP ==> the result is TOP + const Type *t2 = phase->type(in(ValueIn)); + if( t2 == Type::TOP ) return Type::TOP; + + if (t2 == TypePtr::NULL_PTR) { + return t2; + } + + const Type* type = t2->is_oopptr(