OpenJDK / amber / amber
changeset 52140:7afd61192cd4 switch
Automatic merge with default
author | mcimadamore |
---|---|
date | Thu, 16 Aug 2018 22:05:57 +0200 |
parents | ca29d5cf9e8f 083e731da31a |
children | f76a81258355 |
files | src/hotspot/share/gc/g1/g1SATBMarkQueueFilter.cpp src/hotspot/share/gc/g1/g1SATBMarkQueueFilter.hpp src/hotspot/share/runtime/simpleThresholdPolicy.cpp src/hotspot/share/runtime/simpleThresholdPolicy.hpp src/hotspot/share/runtime/simpleThresholdPolicy.inline.hpp src/jdk.compiler/share/classes/com/sun/tools/javac/comp/LambdaToMethod.java test/hotspot/jtreg/gc/g1/TestStringSymbolTableStats.java test/jdk/com/sun/jdi/ArrayLengthDumpTest.sh test/jdk/com/sun/jdi/BreakpointWithFullGC.sh |
diffstat | 502 files changed, 9416 insertions(+), 4772 deletions(-) [+] |
line wrap: on
line diff
--- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk Thu Aug 09 22:06:11 2018 +0200 +++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk Thu Aug 16 22:05:57 2018 +0200 @@ -124,7 +124,7 @@ ($(CD) $(GENSRC_DIR)/META-INF/providers && \ p=""; \ impl=""; \ - for i in $$($(LS) | $(SORT)); do \ + for i in $$($(GREP) '^' * | $(SORT) -t ':' -k 2 | $(SED) 's/:.*//'); do \ c=$$($(CAT) $$i | $(TR) -d '\n\r'); \ if test x$$p != x$$c; then \ if test x$$p != x; then \
--- a/make/lib/Awt2dLibraries.gmk Thu Aug 09 22:06:11 2018 +0200 +++ b/make/lib/Awt2dLibraries.gmk Thu Aug 16 22:05:57 2018 +0200 @@ -796,6 +796,12 @@ LIBSPLASHSCREEN_CFLAGS += -DSPLASHSCREEN -DPNG_NO_MMX_CODE -DPNG_ARM_NEON_OPT=0 + ifeq ($(OPENJDK_TARGET_OS), linux) + ifeq ($(OPENJDK_TARGET_CPU_ARCH), ppc) + LIBSPLASHSCREEN_CFLAGS += -DPNG_POWERPC_VSX_OPT=0 + endif + endif + ifeq ($(OPENJDK_TARGET_OS), macosx) LIBSPLASHSCREEN_CFLAGS += -DWITH_MACOSX
--- a/src/hotspot/.mx.jvmci/suite.py Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/.mx.jvmci/suite.py Thu Aug 16 22:05:57 2018 +0200 @@ -43,7 +43,8 @@ "jdk.vm.ci.services" : { "subDir" : "../jdk.internal.vm.ci/share/classes", "sourceDirs" : ["src"], - "javaCompliance" : "9", + "javaCompliance" : "9+", + "checkstyleVersion" : "8.8", "workingSets" : "API,JVMCI", }, @@ -53,7 +54,7 @@ "subDir" : "../jdk.internal.vm.ci/share/classes", "sourceDirs" : ["src"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -61,7 +62,7 @@ "subDir" : "../jdk.internal.vm.ci/share/classes", "sourceDirs" : ["src"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -70,7 +71,7 @@ "sourceDirs" : ["src"], "dependencies" : ["jdk.vm.ci.meta"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -85,7 +86,7 @@ "jdk.vm.ci.hotspot", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -97,7 +98,7 @@ "jdk.vm.ci.services", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -110,7 +111,7 @@ "jdk.vm.ci.runtime", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -121,7 +122,7 @@ "sourceDirs" : ["src"], "dependencies" : ["jdk.vm.ci.code"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,AArch64", }, @@ -130,7 +131,7 @@ "sourceDirs" : ["src"], "dependencies" : ["jdk.vm.ci.code"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,AMD64", }, @@ -139,7 +140,7 @@ "sourceDirs" : ["src"], "dependencies" : ["jdk.vm.ci.code"], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,SPARC", }, @@ -156,7 +157,7 @@ "jdk.internal.org.objectweb.asm", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI", }, @@ -168,7 +169,7 @@ "jdk.vm.ci.hotspot", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "API,JVMCI", }, @@ -180,7 +181,7 @@ "jdk.vm.ci.hotspot", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,HotSpot,AArch64", }, @@ -192,7 +193,7 @@ "jdk.vm.ci.hotspot", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,HotSpot,AMD64", }, @@ -204,7 +205,7 @@ "jdk.vm.ci.hotspot", ], "checkstyle" : "jdk.vm.ci.services", - "javaCompliance" : "9", + "javaCompliance" : "9+", "workingSets" : "JVMCI,HotSpot,SPARC", },
--- a/src/hotspot/cpu/aarch64/aarch64.ad Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/aarch64.ad Thu Aug 16 22:05:57 2018 +0200 @@ -14471,7 +14471,7 @@ format %{ "cmp $op1, $op2\t# overflow check long" %} ins_cost(INSN_COST); ins_encode %{ - __ cmp($op1$$Register, $op2$$constant); + __ subs(zr, $op1$$Register, $op2$$constant); %} ins_pipe(icmp_reg_imm);
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -295,7 +295,7 @@ int _offset; Register _r; public: - PrePost(Register reg, int o) : _r(reg), _offset(o) { } + PrePost(Register reg, int o) : _offset(o), _r(reg) { } int offset() { return _offset; } Register reg() { return _r; } }; @@ -353,7 +353,7 @@ ext::operation _op; public: extend() { } - extend(int s, int o, ext::operation op) : _shift(s), _option(o), _op(op) { } + extend(int s, int o, ext::operation op) : _option(o), _shift(s), _op(op) { } int option() const{ return _option; } int shift() const { return _shift; } ext::operation op() const { return _op; } @@ -398,26 +398,25 @@ Address() : _mode(no_mode) { } Address(Register r) - : _mode(base_plus_offset), _base(r), _offset(0), _index(noreg), _target(0) { } + : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { } Address(Register r, int o) - : _mode(base_plus_offset), _base(r), _offset(o), _index(noreg), _target(0) { } + : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } Address(Register r, long o) - : _mode(base_plus_offset), _base(r), _offset(o), _index(noreg), _target(0) { } + : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } Address(Register r, unsigned long o) - : _mode(base_plus_offset), _base(r), _offset(o), _index(noreg), _target(0) { } + : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { } #ifdef ASSERT Address(Register r, ByteSize disp) - : _mode(base_plus_offset), _base(r), _offset(in_bytes(disp)), - _index(noreg), _target(0) { } + : _base(r), _index(noreg), _offset(in_bytes(disp)), _mode(base_plus_offset), _target(0) { } #endif Address(Register r, Register r1, extend ext = lsl()) - : _mode(base_plus_offset_reg), _base(r), _index(r1), - _ext(ext), _offset(0), _target(0) { } + : _base(r), _index(r1), _offset(0), _mode(base_plus_offset_reg), + _ext(ext), _target(0) { } Address(Pre p) - : _mode(pre), _base(p.reg()), _offset(p.offset()) { } + : _base(p.reg()), _offset(p.offset()), _mode(pre) { } Address(Post p) - : _mode(p.idx_reg() == NULL ? post : post_reg), _base(p.reg()), - _offset(p.offset()), _target(0), _index(p.idx_reg()) { } + : _base(p.reg()), _index(p.idx_reg()), _offset(p.offset()), + _mode(p.idx_reg() == NULL ? post : post_reg), _target(0) { } Address(address target, RelocationHolder const& rspec) : _mode(literal), _rspec(rspec), @@ -426,7 +425,7 @@ Address(address target, relocInfo::relocType rtype = relocInfo::external_word_type); Address(Register base, RegisterOrConstant index, extend ext = lsl()) : _base (base), - _ext(ext), _offset(0), _target(0) { + _offset(0), _ext(ext), _target(0) { if (index.is_register()) { _mode = base_plus_offset_reg; _index = index.as_register();
--- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -50,13 +50,13 @@ } RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) - : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { + : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); } RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) - : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { + : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); }
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1922,7 +1922,7 @@ if (is_32bit) __ cmpw(reg1, imm); else - __ cmp(reg1, imm); + __ subs(zr, reg1, imm); return; } else { __ mov(rscratch1, imm); @@ -2705,7 +2705,7 @@ if (TypeEntries::is_type_none(current_klass)) { __ cbz(rscratch2, none); - __ cmp(rscratch2, TypeEntries::null_seen); + __ cmp(rscratch2, (u1)TypeEntries::null_seen); __ br(Assembler::EQ, none); // There is a chance that the checks above (re-reading profiling // data from memory) fail if another thread has just set the @@ -2750,7 +2750,7 @@ Label ok; __ ldr(rscratch1, mdo_addr); __ cbz(rscratch1, ok); - __ cmp(rscratch1, TypeEntries::null_seen); + __ cmp(rscratch1, (u1)TypeEntries::null_seen); __ br(Assembler::EQ, ok); // may have been set by another thread __ dmb(Assembler::ISHLD);
--- a/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/c1_Runtime1_aarch64.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -712,7 +712,7 @@ { Label ok, not_ok; __ ldrw(obj_size, Address(klass, Klass::layout_helper_offset())); - __ cmp(obj_size, 0u); + __ cmp(obj_size, (u1)0); __ br(Assembler::LE, not_ok); // make sure it's an instance (LH > 0) __ tstw(obj_size, Klass::_lh_instance_slow_path_bit); __ br(Assembler::EQ, ok);
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1636,7 +1636,7 @@ ldr(rscratch1, mdo_addr); cbz(rscratch1, none); - cmp(rscratch1, TypeEntries::null_seen); + cmp(rscratch1, (u1)TypeEntries::null_seen); br(Assembler::EQ, none); // There is a chance that the checks above (re-reading profiling // data from memory) fail if another thread has just set the @@ -1670,7 +1670,7 @@ int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size()); ldrb(rscratch1, Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start)); - cmp(rscratch1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag); + cmp(rscratch1, u1(is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag)); br(Assembler::NE, profile_continue); if (MethodData::profile_arguments()) { @@ -1682,7 +1682,7 @@ // If return value type is profiled we may have no argument to profile ldr(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset()))); sub(tmp, tmp, i*TypeStackSlotEntries::per_arg_count()); - cmp(tmp, TypeStackSlotEntries::per_arg_count()); + cmp(tmp, (u1)TypeStackSlotEntries::per_arg_count()); add(rscratch1, mdp, off_to_args); br(Assembler::LT, done); } @@ -1752,13 +1752,13 @@ // length Label do_profile; ldrb(rscratch1, Address(rbcp, 0)); - cmp(rscratch1, Bytecodes::_invokedynamic); + cmp(rscratch1, (u1)Bytecodes::_invokedynamic); br(Assembler::EQ, do_profile); - cmp(rscratch1, Bytecodes::_invokehandle); + cmp(rscratch1, (u1)Bytecodes::_invokehandle); br(Assembler::EQ, do_profile); get_method(tmp); ldrh(rscratch1, Address(tmp, Method::intrinsic_id_offset_in_bytes())); - cmp(rscratch1, vmIntrinsics::_compiledLambdaForm); + subs(zr, rscratch1, vmIntrinsics::_compiledLambdaForm); br(Assembler::NE, profile_continue); bind(do_profile);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -494,7 +494,7 @@ ldr(swap_reg, mark_addr); } andr(tmp_reg, swap_reg, markOopDesc::biased_lock_mask_in_place); - cmp(tmp_reg, markOopDesc::biased_lock_pattern); + cmp(tmp_reg, (u1)markOopDesc::biased_lock_pattern); br(Assembler::NE, cas_label); // The bias pattern is present in the object's header. Need to check // whether the bias owner and the epoch are both still current. @@ -633,7 +633,7 @@ // the bias bit would be clear. ldr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes())); andr(temp_reg, temp_reg, markOopDesc::biased_lock_mask_in_place); - cmp(temp_reg, markOopDesc::biased_lock_pattern); + cmp(temp_reg, (u1)markOopDesc::biased_lock_pattern); br(Assembler::EQ, done); } @@ -1137,7 +1137,7 @@ if (super_check_offset.is_register()) { br(Assembler::EQ, *L_success); - cmp(super_check_offset.as_register(), sc_offset); + subs(zr, super_check_offset.as_register(), sc_offset); if (L_failure == &L_fallthrough) { br(Assembler::EQ, *L_slow_path); } else { @@ -3312,7 +3312,7 @@ add(table3, table0, 3*256*sizeof(juint)); if (UseNeon) { - cmp(len, 64); + cmp(len, (u1)64); br(Assembler::LT, L_by16); eor(v16, T16B, v16, v16); @@ -4371,10 +4371,10 @@ if (icnt1 == -1) { sub(result_tmp, cnt2, cnt1); - cmp(cnt1, 8); // Use Linear Scan if cnt1 < 8 || cnt1 >= 256 + cmp(cnt1, (u1)8); // Use Linear Scan if cnt1 < 8 || cnt1 >= 256 br(LT, LINEARSEARCH); dup(v0, T16B, cnt1); // done in separate FPU pipeline. Almost no penalty - cmp(cnt1, 256); + subs(zr, cnt1, 256); lsr(tmp1, cnt2, 2); ccmp(cnt1, tmp1, 0b0000, LT); // Source must be 4 * pattern for BM br(GE, LINEARSTUB); @@ -4480,7 +4480,7 @@ BIND(BCLOOP); (this->*str1_load_1chr)(ch1, Address(post(tmp3, str1_chr_size))); if (!str1_isL) { - cmp(ch1, ASIZE); + subs(zr, ch1, ASIZE); br(HS, BCSKIP); } strb(ch2, Address(sp, ch1)); @@ -4544,7 +4544,7 @@ } else { mov(result_tmp, 1); } - cmp(skipch, ASIZE); + subs(zr, skipch, ASIZE); br(HS, BMADV); } ldrb(result_tmp, Address(sp, skipch)); // load skip distance @@ -4565,7 +4565,7 @@ b(DONE); BIND(LINEARSTUB); - cmp(cnt1, 16); // small patterns still should be handled by simple algorithm + cmp(cnt1, (u1)16); // small patterns still should be handled by simple algorithm br(LT, LINEAR_MEDIUM); mov(result, zr); RuntimeAddress stub = NULL; @@ -4594,7 +4594,7 @@ { Label DOSHORT, FIRST_LOOP, STR2_NEXT, STR1_LOOP, STR1_NEXT; - cmp(cnt1, str1_isL == str2_isL ? 4 : 2); + cmp(cnt1, u1(str1_isL == str2_isL ? 4 : 2)); br(LT, DOSHORT); BIND(LINEAR_MEDIUM); (this->*str1_load_1chr)(first, Address(str1)); @@ -4629,7 +4629,7 @@ BIND(DOSHORT); if (str1_isL == str2_isL) { - cmp(cnt1, 2); + cmp(cnt1, (u1)2); br(LT, DO1); br(GT, DO3); } @@ -4704,7 +4704,7 @@ BIND(DO1); (this->*str1_load_1chr)(ch1, str1); - cmp(cnt2, 8); + cmp(cnt2, (u1)8); br(LT, DO1_SHORT); sub(result_tmp, cnt2, 8/str2_chr_size); @@ -4727,7 +4727,7 @@ adds(cnt2_neg, cnt2_neg, 8); br(LT, CH1_LOOP); - cmp(cnt2_neg, 8); + cmp(cnt2_neg, (u1)8); mov(cnt2_neg, 0); br(LT, CH1_LOOP); b(NOMATCH); @@ -4770,7 +4770,7 @@ Register ch1 = rscratch1; Register result_tmp = rscratch2; - cmp(cnt1, 4); + cmp(cnt1, (u1)4); br(LT, DO1_SHORT); orr(ch, ch, ch, LSL, 16); @@ -4793,7 +4793,7 @@ adds(cnt1_neg, cnt1_neg, 8); br(LT, CH1_LOOP); - cmp(cnt1_neg, 8); + cmp(cnt1_neg, (u1)8); mov(cnt1_neg, 0); br(LT, CH1_LOOP); b(NOMATCH); @@ -4830,7 +4830,7 @@ DIFFERENCE, NEXT_WORD, SHORT_LOOP_TAIL, SHORT_LAST2, SHORT_LAST_INIT, SHORT_LOOP_START, TAIL_CHECK; - const int STUB_THRESHOLD = 64 + 8; + const u1 STUB_THRESHOLD = 64 + 8; bool isLL = ae == StrIntrinsicNode::LL; bool isLU = ae == StrIntrinsicNode::LU; bool isUL = ae == StrIntrinsicNode::UL; @@ -5225,10 +5225,10 @@ ldrw(cnt2, Address(a2, length_offset)); // on most CPUs a2 is still "locked"(surprisingly) in ldrw and it's // faster to perform another branch before comparing a1 and a2 - cmp(cnt1, elem_per_word); + cmp(cnt1, (u1)elem_per_word); br(LE, SHORT); // short or same ldr(tmp3, Address(pre(a1, base_offset))); - cmp(cnt1, stubBytesThreshold); + subs(zr, cnt1, stubBytesThreshold); br(GE, STUB); ldr(tmp4, Address(pre(a2, base_offset))); sub(tmp5, zr, cnt1, LSL, 3 + log_elem_size); @@ -5245,7 +5245,7 @@ cbnz(tmp4, DONE); ldr(tmp3, Address(pre(a1, wordSize))); ldr(tmp4, Address(pre(a2, wordSize))); - cmp(cnt1, elem_per_word); + cmp(cnt1, (u1)elem_per_word); br(LE, TAIL2); cmp(tmp1, tmp2); } br(EQ, NEXT_DWORD); @@ -5418,7 +5418,7 @@ assert(ptr == r10 && cnt == r11, "mismatch in register usage"); BLOCK_COMMENT("zero_words {"); - cmp(cnt, zero_words_block_size); + cmp(cnt, (u1)zero_words_block_size); Label around, done, done16; br(LO, around); { @@ -5599,15 +5599,15 @@ mov(result, len); // Save initial len #ifndef BUILTIN_SIM - cmp(len, 8); // handle shortest strings first + cmp(len, (u1)8); // handle shortest strings first br(LT, LOOP_1); - cmp(len, 32); + cmp(len, (u1)32); br(LT, NEXT_8); // The following code uses the SIMD 'uzp1' and 'uzp2' instructions // to convert chars to bytes if (SoftwarePrefetchHintDistance >= 0) { ld1(Vtmp1, Vtmp2, Vtmp3, Vtmp4, T8H, src); - cmp(len, SoftwarePrefetchHintDistance/2 + 16); + subs(tmp2, len, SoftwarePrefetchHintDistance/2 + 16); br(LE, NEXT_32_START); b(NEXT_32_PRFM_START); BIND(NEXT_32_PRFM); @@ -5627,9 +5627,9 @@ sub(len, len, 32); add(dst, dst, 32); add(src, src, 64); - cmp(len, SoftwarePrefetchHintDistance/2 + 16); + subs(tmp2, len, SoftwarePrefetchHintDistance/2 + 16); br(GE, NEXT_32_PRFM); - cmp(len, 32); + cmp(len, (u1)32); br(LT, LOOP_8); BIND(NEXT_32); ld1(Vtmp1, Vtmp2, Vtmp3, Vtmp4, T8H, src); @@ -5652,12 +5652,12 @@ sub(len, len, 32); add(dst, dst, 32); add(src, src, 64); - cmp(len, 32); + cmp(len, (u1)32); br(GE, NEXT_32); cbz(len, DONE); BIND(LOOP_8); - cmp(len, 8); + cmp(len, (u1)8); br(LT, LOOP_1); BIND(NEXT_8); ld1(Vtmp1, T8H, src); @@ -5670,7 +5670,7 @@ sub(len, len, 8); add(dst, dst, 8); add(src, src, 16); - cmp(len, 8); + cmp(len, (u1)8); br(GE, NEXT_8); BIND(LOOP_1); @@ -5747,7 +5747,7 @@ const int large_loop_threshold = (64 + 16)/8; ldrd(vtmp2, post(src, 8)); andw(len, len, 7); - cmp(tmp4, large_loop_threshold); + cmp(tmp4, (u1)large_loop_threshold); br(GE, to_stub); b(loop_start);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -180,8 +180,9 @@ template<class T> inline void cmpw(Register Rd, T imm) { subsw(zr, Rd, imm); } - // imm is limited to 12 bits. - inline void cmp(Register Rd, unsigned imm) { subs(zr, Rd, imm); } + + inline void cmp(Register Rd, unsigned char imm8) { subs(zr, Rd, imm8); } + inline void cmp(Register Rd, unsigned imm) __attribute__ ((deprecated)); inline void cmnw(Register Rd, unsigned imm) { addsw(zr, Rd, imm); } inline void cmn(Register Rd, unsigned imm) { adds(zr, Rd, imm); }
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -292,7 +292,7 @@ // vtmp1 = AS_DOUBLE_BITS(0x77F0 << 48 | mantissa(X)) == mx fmovd(vtmp1, tmp4); subw(tmp2, tmp2, 16); - cmp(tmp2, 0x8000); + subs(zr, tmp2, 0x8000); br(GE, SMALL_VALUE); bind(MAIN); fmovs(tmp3, vtmp5); // int intB0 = AS_INT_BITS(B);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_trig.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_trig.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -297,7 +297,7 @@ fmsubd(v3, v2, v6, v31); // v3 = r = t - fn * pio2_1 fmuld(v26, v2, v7); // v26 = w = fn * pio2_1t fsubd(v4, v3, v26); // y[0] = r - w. Calculated before branch - cmp(n, 32); + cmp(n, (u1)32); br(GT, LARGE_ELSE); subw(tmp5, n, 1); // tmp5 = n - 1 ldrw(jv, Address(ih, tmp5, Address::lsl(2))); @@ -312,7 +312,7 @@ sub(tmp3, tmp5, jx, LSR, 32 + 20 + 1); // r7 = j-(((*(i0+(int*)&y[0]))>>20)&0x7ff); block_comment("if(i>16)"); { - cmp(tmp3, 16); + cmp(tmp3, (u1)16); br(LE, X_IS_MEDIUM_BRANCH_DONE); // i > 16. 2nd iteration needed ldpd(v6, v7, Address(ih, -32)); @@ -328,7 +328,7 @@ sub(tmp3, tmp5, jx, LSR, 32 + 20 + 1); // r7 = j-(((*(i0+(int*)&y[0]))>>20)&0x7ff); block_comment("if(i>49)"); { - cmp(tmp3, 49); + cmp(tmp3, (u1)49); br(LE, X_IS_MEDIUM_BRANCH_DONE); // 3rd iteration need, 151 bits acc ldpd(v6, v7, Address(ih, -16));
--- a/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/methodHandles_aarch64.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -196,7 +196,7 @@ Label L; BLOCK_COMMENT("verify_intrinsic_id {"); __ ldrh(rscratch1, Address(rmethod, Method::intrinsic_id_offset_in_bytes())); - __ cmp(rscratch1, (int) iid); + __ subs(zr, rscratch1, (int) iid); __ br(Assembler::EQ, L); if (iid == vmIntrinsics::_linkToVirtual || iid == vmIntrinsics::_linkToSpecial) {
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1152,12 +1152,12 @@ public: MoveOperation(int src_index, VMRegPair src, int dst_index, VMRegPair dst): _src(src) + , _dst(dst) , _src_index(src_index) - , _dst(dst) , _dst_index(dst_index) + , _processed(false) , _next(NULL) - , _prev(NULL) - , _processed(false) { Unimplemented(); } + , _prev(NULL) { Unimplemented(); } VMRegPair src() const { Unimplemented(); return _src; } int src_id() const { Unimplemented(); return 0; }
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -265,7 +265,7 @@ { Label L; __ ldr(rscratch1, Address(rthread, in_bytes(Thread::pending_exception_offset()))); - __ cmp(rscratch1, (unsigned)NULL_WORD); + __ cmp(rscratch1, (u1)NULL_WORD); __ br(Assembler::EQ, L); __ stop("StubRoutines::call_stub: entered with pending exception"); __ BIND(L); @@ -322,13 +322,13 @@ __ ldr(j_rarg2, result); Label is_long, is_float, is_double, exit; __ ldr(j_rarg1, result_type); - __ cmp(j_rarg1, T_OBJECT); + __ cmp(j_rarg1, (u1)T_OBJECT); __ br(Assembler::EQ, is_long); - __ cmp(j_rarg1, T_LONG); + __ cmp(j_rarg1, (u1)T_LONG); __ br(Assembler::EQ, is_long); - __ cmp(j_rarg1, T_FLOAT); + __ cmp(j_rarg1, (u1)T_FLOAT); __ br(Assembler::EQ, is_float); - __ cmp(j_rarg1, T_DOUBLE); + __ cmp(j_rarg1, (u1)T_DOUBLE); __ br(Assembler::EQ, is_double); // handle T_INT case @@ -743,7 +743,7 @@ // Make sure we are never given < 8 words { Label L; - __ cmp(count, 8); + __ cmp(count, (u1)8); __ br(Assembler::GE, L); __ stop("genrate_copy_longs called with < 8 words"); __ bind(L); @@ -1103,19 +1103,19 @@ if (PrefetchCopyIntervalInBytes > 0) __ prfm(Address(s, 0), PLDL1KEEP); - __ cmp(count, (UseSIMDForMemoryOps ? 96:80)/granularity); + __ cmp(count, u1((UseSIMDForMemoryOps ? 96:80)/granularity)); __ br(Assembler::HI, copy_big); __ lea(send, Address(s, count, Address::lsl(exact_log2(granularity)))); __ lea(dend, Address(d, count, Address::lsl(exact_log2(granularity)))); - __ cmp(count, 16/granularity); + __ cmp(count, u1(16/granularity)); __ br(Assembler::LS, copy16); - __ cmp(count, 64/granularity); + __ cmp(count, u1(64/granularity)); __ br(Assembler::HI, copy80); - __ cmp(count, 32/granularity); + __ cmp(count, u1(32/granularity)); __ br(Assembler::LS, copy32); // 33..64 bytes @@ -1170,7 +1170,7 @@ // 0..16 bytes __ bind(copy16); - __ cmp(count, 8/granularity); + __ cmp(count, u1(8/granularity)); __ br(Assembler::LO, copy8); // 8..16 bytes @@ -3270,7 +3270,7 @@ // The pipelined loop needs at least 16 elements for 1 iteration // It does check this, but it is more effective to skip to the cleanup loop - __ cmp(len, 16); + __ cmp(len, (u1)16); __ br(Assembler::HS, L_nmax); __ cbz(len, L_combine); @@ -3654,7 +3654,7 @@ address generate_has_negatives(address &has_negatives_long) { StubCodeMark mark(this, "StubRoutines", "has_negatives"); - const int large_loop_size = 64; + const u1 large_loop_size = 64; const uint64_t UPPER_BIT_MASK=0x8080808080808080; int dcache_line = VM_Version::dcache_line_size(); @@ -3668,7 +3668,7 @@ Label RET_TRUE, RET_TRUE_NO_POP, RET_FALSE, ALIGNED, LOOP16, CHECK_16, DONE, LARGE_LOOP, POST_LOOP16, LEN_OVER_15, LEN_OVER_8, POST_LOOP16_LOAD_TAIL; - __ cmp(len, 15); + __ cmp(len, (u1)15); __ br(Assembler::GT, LEN_OVER_15); // The only case when execution falls into this code is when pointer is near // the end of memory page and we have to avoid reading next page @@ -3764,7 +3764,7 @@ __ br(Assembler::GE, LARGE_LOOP); __ bind(CHECK_16); // small 16-byte load pre-loop - __ cmp(len, 16); + __ cmp(len, (u1)16); __ br(Assembler::LT, POST_LOOP16); __ bind(LOOP16); // small 16-byte load loop @@ -3773,11 +3773,11 @@ __ orr(tmp2, tmp2, tmp3); __ tst(tmp2, UPPER_BIT_MASK); __ br(Assembler::NE, RET_TRUE); - __ cmp(len, 16); + __ cmp(len, (u1)16); __ br(Assembler::GE, LOOP16); // 16-byte load loop end __ bind(POST_LOOP16); // 16-byte aligned, so we can read unconditionally - __ cmp(len, 8); + __ cmp(len, (u1)8); __ br(Assembler::LE, POST_LOOP16_LOAD_TAIL); __ ldr(tmp3, Address(__ post(ary1, 8))); __ sub(len, len, 8); @@ -3942,7 +3942,7 @@ __ br(__ LE, NO_PREFETCH_LARGE_LOOP); generate_large_array_equals_loop_simd(prefetchLoopThreshold, /* prfm = */ true, NOT_EQUAL); - __ cmp(cnt1, nonPrefetchLoopThreshold); + __ subs(zr, cnt1, nonPrefetchLoopThreshold); __ br(__ LT, TAIL); } __ bind(NO_PREFETCH_LARGE_LOOP); @@ -3955,7 +3955,7 @@ __ br(__ LE, NO_PREFETCH_LARGE_LOOP); generate_large_array_equals_loop_nonsimd(prefetchLoopThreshold, /* prfm = */ true, NOT_EQUAL); - __ cmp(cnt1, nonPrefetchLoopThreshold); + __ subs(zr, cnt1, nonPrefetchLoopThreshold); __ br(__ LT, TAIL); } __ bind(NO_PREFETCH_LARGE_LOOP); @@ -4106,7 +4106,7 @@ __ ldr(tmp3, Address(__ post(cnt1, 8))); if (SoftwarePrefetchHintDistance >= 0) { - __ cmp(cnt2, prefetchLoopExitCondition); + __ subs(rscratch2, cnt2, prefetchLoopExitCondition); __ br(__ LT, SMALL_LOOP); __ bind(LARGE_LOOP_PREFETCH); __ prfm(Address(tmp2, SoftwarePrefetchHintDistance)); @@ -4123,7 +4123,7 @@ __ subs(tmp4, tmp4, 1); __ br(__ GT, LARGE_LOOP_PREFETCH_REPEAT2); __ sub(cnt2, cnt2, 64); - __ cmp(cnt2, prefetchLoopExitCondition); + __ subs(rscratch2, cnt2, prefetchLoopExitCondition); __ br(__ GE, LARGE_LOOP_PREFETCH); } __ cbz(cnt2, LOAD_LAST); // no characters left except last load @@ -4137,7 +4137,7 @@ __ br(__ GE, SMALL_LOOP); __ cbz(cnt2, LOAD_LAST); __ bind(TAIL); // 1..15 characters left - __ cmp(cnt2, -8); + __ subs(zr, cnt2, -8); __ br(__ GT, TAIL_LOAD_16); __ ldrd(vtmp, Address(tmp2)); __ zip1(vtmp3, __ T8B, vtmp, vtmpZ); @@ -4240,7 +4240,7 @@ compare_string_16_bytes_same(DIFF, DIFF2); __ sub(cnt2, cnt2, isLL ? 64 : 32); compare_string_16_bytes_same(DIFF, DIFF2); - __ cmp(cnt2, largeLoopExitCondition); + __ subs(rscratch2, cnt2, largeLoopExitCondition); compare_string_16_bytes_same(DIFF, DIFF2); __ br(__ GT, LARGE_LOOP_PREFETCH); __ cbz(cnt2, LAST_CHECK_AND_LENGTH_DIFF); // no more chars left? @@ -4416,7 +4416,7 @@ __ add(result, result, wordSize/str2_chr_size); __ br(__ GE, L_LOOP); __ BIND(L_POST_LOOP); - __ cmp(cnt2, -wordSize/str2_chr_size); // no extra characters to check + __ subs(zr, cnt2, -wordSize/str2_chr_size); // no extra characters to check __ br(__ LE, NOMATCH); __ ldr(ch2, Address(str2)); __ sub(cnt2, zr, cnt2, __ LSL, LogBitsPerByte + str2_chr_shift); @@ -4446,7 +4446,7 @@ __ br(__ EQ, NOMATCH); __ BIND(L_SMALL_HAS_ZERO_LOOP); __ clz(tmp4, tmp2); // potentially long. Up to 4 cycles on some cpu's - __ cmp(cnt1, wordSize/str2_chr_size); + __ cmp(cnt1, u1(wordSize/str2_chr_size)); __ br(__ LE, L_SMALL_CMP_LOOP_LAST_CMP2); if (str2_isL) { // LL __ add(str2, str2, tmp4, __ LSR, LogBitsPerByte); // address of "index" @@ -4659,7 +4659,7 @@ __ zip1(v2, __ T16B, v2, v0); __ st1(v1, v2, __ T16B, __ post(dst, 32)); __ ld1(v3, v4, v5, v6, __ T16B, Address(__ post(src, 64))); - __ cmp(octetCounter, large_loop_threshold); + __ subs(rscratch1, octetCounter, large_loop_threshold); __ br(__ LE, LOOP_START); __ b(LOOP_PRFM_START); __ bind(LOOP_PRFM); @@ -4667,17 +4667,17 @@ __ bind(LOOP_PRFM_START); __ prfm(Address(src, SoftwarePrefetchHintDistance)); __ sub(octetCounter, octetCounter, 8); - __ cmp(octetCounter, large_loop_threshold); + __ subs(rscratch1, octetCounter, large_loop_threshold); inflate_and_store_2_fp_registers(true, v3, v4); inflate_and_store_2_fp_registers(true, v5, v6); __ br(__ GT, LOOP_PRFM); - __ cmp(octetCounter, 8); + __ cmp(octetCounter, (u1)8); __ br(__ LT, DONE); __ bind(LOOP); __ ld1(v3, v4, v5, v6, __ T16B, Address(__ post(src, 64))); __ bind(LOOP_START); __ sub(octetCounter, octetCounter, 8); - __ cmp(octetCounter, 8); + __ cmp(octetCounter, (u1)8); inflate_and_store_2_fp_registers(false, v3, v4); inflate_and_store_2_fp_registers(false, v5, v6); __ br(__ GE, LOOP); @@ -5308,7 +5308,7 @@ { ldr(Rn, Address(Pn_base, 0)); mul(Rlo_mn, Rn, inv); - cmp(Rlo_mn, -1); + subs(zr, Rlo_mn, -1); Label ok; br(EQ, ok); { stop("broken inverse in Montgomery multiply");
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1360,7 +1360,7 @@ { Label L; __ ldrw(t, Address(rthread, JavaThread::thread_state_offset())); - __ cmp(t, _thread_in_Java); + __ cmp(t, (u1)_thread_in_Java); __ br(Assembler::EQ, L); __ stop("Wrong thread state in native stub"); __ bind(L); @@ -1467,7 +1467,7 @@ Label no_reguard; __ lea(rscratch1, Address(rthread, in_bytes(JavaThread::stack_guard_state_offset()))); __ ldrw(rscratch1, Address(rscratch1)); - __ cmp(rscratch1, JavaThread::stack_guard_yellow_reserved_disabled); + __ cmp(rscratch1, (u1)JavaThread::stack_guard_yellow_reserved_disabled); __ br(Assembler::NE, no_reguard); __ pusha(); // XXX only save smashed registers
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -331,16 +331,16 @@ __ ldarb(r3, r3); // unresolved class - get the resolved class - __ cmp(r3, JVM_CONSTANT_UnresolvedClass); + __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClass); __ br(Assembler::EQ, call_ldc); // unresolved class in error state - call into runtime to throw the error // from the first resolution attempt - __ cmp(r3, JVM_CONSTANT_UnresolvedClassInError); + __ cmp(r3, (u1)JVM_CONSTANT_UnresolvedClassInError); __ br(Assembler::EQ, call_ldc); // resolved class - need to call vm to get java mirror of the class - __ cmp(r3, JVM_CONSTANT_Class); + __ cmp(r3, (u1)JVM_CONSTANT_Class); __ br(Assembler::NE, notClass); __ bind(call_ldc); @@ -351,7 +351,7 @@ __ b(Done); __ bind(notClass); - __ cmp(r3, JVM_CONSTANT_Float); + __ cmp(r3, (u1)JVM_CONSTANT_Float); __ br(Assembler::NE, notFloat); // ftos __ adds(r1, r2, r1, Assembler::LSL, 3); @@ -361,7 +361,7 @@ __ bind(notFloat); - __ cmp(r3, JVM_CONSTANT_Integer); + __ cmp(r3, (u1)JVM_CONSTANT_Integer); __ br(Assembler::NE, notInt); // itos @@ -2333,7 +2333,7 @@ assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range"); __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size); - __ cmp(temp, (int) code); // have we resolved this bytecode? + __ subs(zr, temp, (int) code); // have we resolved this bytecode? __ br(Assembler::EQ, resolved); // resolve first time through @@ -2515,7 +2515,7 @@ __ b(Done); __ bind(notByte); - __ cmp(flags, ztos); + __ cmp(flags, (u1)ztos); __ br(Assembler::NE, notBool); // ztos (same code as btos) @@ -2529,7 +2529,7 @@ __ b(Done); __ bind(notBool); - __ cmp(flags, atos); + __ cmp(flags, (u1)atos); __ br(Assembler::NE, notObj); // atos do_oop_load(_masm, field, r0, IN_HEAP); @@ -2540,7 +2540,7 @@ __ b(Done); __ bind(notObj); - __ cmp(flags, itos); + __ cmp(flags, (u1)itos); __ br(Assembler::NE, notInt); // itos __ access_load_at(T_INT, IN_HEAP, r0, field, noreg, noreg); @@ -2552,7 +2552,7 @@ __ b(Done); __ bind(notInt); - __ cmp(flags, ctos); + __ cmp(flags, (u1)ctos); __ br(Assembler::NE, notChar); // ctos __ access_load_at(T_CHAR, IN_HEAP, r0, field, noreg, noreg); @@ -2564,7 +2564,7 @@ __ b(Done); __ bind(notChar); - __ cmp(flags, stos); + __ cmp(flags, (u1)stos); __ br(Assembler::NE, notShort); // stos __ access_load_at(T_SHORT, IN_HEAP, r0, field, noreg, noreg); @@ -2576,7 +2576,7 @@ __ b(Done); __ bind(notShort); - __ cmp(flags, ltos); + __ cmp(flags, (u1)ltos); __ br(Assembler::NE, notLong); // ltos __ access_load_at(T_LONG, IN_HEAP, r0, field, noreg, noreg); @@ -2588,7 +2588,7 @@ __ b(Done); __ bind(notLong); - __ cmp(flags, ftos); + __ cmp(flags, (u1)ftos); __ br(Assembler::NE, notFloat); // ftos __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg); @@ -2601,7 +2601,7 @@ __ bind(notFloat); #ifdef ASSERT - __ cmp(flags, dtos); + __ cmp(flags, (u1)dtos); __ br(Assembler::NE, notDouble); #endif // dtos @@ -2751,7 +2751,7 @@ } __ bind(notByte); - __ cmp(flags, ztos); + __ cmp(flags, (u1)ztos); __ br(Assembler::NE, notBool); // ztos @@ -2766,7 +2766,7 @@ } __ bind(notBool); - __ cmp(flags, atos); + __ cmp(flags, (u1)atos); __ br(Assembler::NE, notObj); // atos @@ -2782,7 +2782,7 @@ } __ bind(notObj); - __ cmp(flags, itos); + __ cmp(flags, (u1)itos); __ br(Assembler::NE, notInt); // itos @@ -2797,7 +2797,7 @@ } __ bind(notInt); - __ cmp(flags, ctos); + __ cmp(flags, (u1)ctos); __ br(Assembler::NE, notChar); // ctos @@ -2812,7 +2812,7 @@ } __ bind(notChar); - __ cmp(flags, stos); + __ cmp(flags, (u1)stos); __ br(Assembler::NE, notShort); // stos @@ -2827,7 +2827,7 @@ } __ bind(notShort); - __ cmp(flags, ltos); + __ cmp(flags, (u1)ltos); __ br(Assembler::NE, notLong); // ltos @@ -2842,7 +2842,7 @@ } __ bind(notLong); - __ cmp(flags, ftos); + __ cmp(flags, (u1)ftos); __ br(Assembler::NE, notFloat); // ftos @@ -2858,7 +2858,7 @@ __ bind(notFloat); #ifdef ASSERT - __ cmp(flags, dtos); + __ cmp(flags, (u1)dtos); __ br(Assembler::NE, notDouble); #endif @@ -3534,7 +3534,7 @@ __ lea(rscratch1, Address(r0, r3, Address::lsl(0))); __ lea(rscratch1, Address(rscratch1, tags_offset)); __ ldarb(rscratch1, rscratch1); - __ cmp(rscratch1, JVM_CONSTANT_Class); + __ cmp(rscratch1, (u1)JVM_CONSTANT_Class); __ br(Assembler::NE, slow_case); // get InstanceKlass @@ -3543,7 +3543,7 @@ // make sure klass is initialized & doesn't have finalizer // make sure klass is fully initialized __ ldrb(rscratch1, Address(r4, InstanceKlass::init_state_offset())); - __ cmp(rscratch1, InstanceKlass::fully_initialized); + __ cmp(rscratch1, (u1)InstanceKlass::fully_initialized); __ br(Assembler::NE, slow_case); // get instance_size in InstanceKlass (scaled to a count of bytes) @@ -3683,7 +3683,7 @@ __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes()); __ lea(r1, Address(rscratch1, r19)); __ ldarb(r1, r1); - __ cmp(r1, JVM_CONSTANT_Class); + __ cmp(r1, (u1)JVM_CONSTANT_Class); __ br(Assembler::EQ, quicked); __ push(atos); // save receiver for result, and for GC @@ -3737,7 +3737,7 @@ __ add(rscratch1, r3, Array<u1>::base_offset_in_bytes()); __ lea(r1, Address(rscratch1, r19)); __ ldarb(r1, r1); - __ cmp(r1, JVM_CONSTANT_Class); + __ cmp(r1, (u1)JVM_CONSTANT_Class); __ br(Assembler::EQ, quicked); __ push(atos); // save receiver for result, and for GC
--- a/src/hotspot/cpu/arm/assembler_arm_32.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/arm/assembler_arm_32.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -1083,6 +1083,7 @@ break; default: ShouldNotReachHere(); + return; } emit_int32(0xf << 28 | 0x1 << 25 | 0x1 << 23 | 0x1 << 4 | (imm8 >> 7) << 24 | ((imm8 & 0x70) >> 4) << 16 | (imm8 & 0xf) | @@ -1113,6 +1114,7 @@ break; default: ShouldNotReachHere(); + return; } emit_int32(cond << 28 | 0x1D /* 0b11101 */ << 23 | 0xB /* 0b1011 */ << 8 | 0x1 << 4 | quad << 21 | b << 22 | e << 5 | Rs->encoding() << 12 | @@ -1143,6 +1145,7 @@ break; default: ShouldNotReachHere(); + return; } emit_int32(0xF /* 0b1111 */ << 28 | 0x3B /* 0b00111011 */ << 20 | 0x6 /* 0b110 */ << 9 | quad << 6 | imm4 << 16 |
--- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -52,13 +52,13 @@ RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) - : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { + : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); } RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) - : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { + : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); }
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -3086,7 +3086,7 @@ Label ok; if (op->condition() != lir_cond_always) { - AsmCondition acond; + AsmCondition acond = al; switch (op->condition()) { case lir_cond_equal: acond = eq; break; case lir_cond_notEqual: acond = ne; break;
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -733,6 +733,7 @@ default: ShouldNotReachHere(); + return; } #else switch (x->op()) { @@ -757,6 +758,7 @@ break; default: ShouldNotReachHere(); + return; } LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL); set_result(x, result); @@ -824,7 +826,7 @@ if (x->op() == Bytecodes::_irem) { out_reg = FrameMap::R0_opr; __ irem(left_arg->result(), right_arg->result(), out_reg, tmp, info); - } else if (x->op() == Bytecodes::_idiv) { + } else { // (x->op() == Bytecodes::_idiv) out_reg = FrameMap::R1_opr; __ idiv(left_arg->result(), right_arg->result(), out_reg, tmp, info); }
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -39,13 +39,13 @@ RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) - : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { + : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); } RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) - : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { + : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); }
--- a/src/hotspot/cpu/ppc/frame_ppc.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/ppc/frame_ppc.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -359,13 +359,13 @@ // ... // - // frame pointer for this frame - intptr_t* _fp; - // The frame's stack pointer before it has been extended by a c2i adapter; // needed by deoptimization intptr_t* _unextended_sp; + // frame pointer for this frame + intptr_t* _fp; + public: // Accessors for fields
--- a/src/hotspot/cpu/ppc/frame_ppc.inline.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/ppc/frame_ppc.inline.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -55,7 +55,7 @@ // Constructors // Initialize all fields, _unextended_sp will be adjusted in find_codeblob_and_set_pc_and_deopt_state. -inline frame::frame() : _sp(NULL), _unextended_sp(NULL), _fp(NULL), _cb(NULL), _pc(NULL), _deopt_state(unknown) {} +inline frame::frame() : _sp(NULL), _pc(NULL), _cb(NULL), _deopt_state(unknown), _unextended_sp(NULL), _fp(NULL) {} inline frame::frame(intptr_t* sp) : _sp(sp), _unextended_sp(sp) { find_codeblob_and_set_pc_and_deopt_state((address)own_abi()->lr); // also sets _fp and adjusts _unextended_sp
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -29,6 +29,7 @@ #include "gc/g1/g1BarrierSetAssembler.hpp" #include "gc/g1/g1BarrierSetRuntime.hpp" #include "gc/g1/g1CardTable.hpp" +#include "gc/g1/g1SATBMarkQueueSet.hpp" #include "gc/g1/g1ThreadLocalData.hpp" #include "gc/g1/heapRegion.hpp" #include "interpreter/interp_masm.hpp" @@ -458,7 +459,7 @@ __ mflr(R0); __ std(R0, _abi(lr), R1_SP); __ push_frame_reg_args(nbytes_save, R0); // dummy frame for C call - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread); + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1SATBMarkQueueSet::handle_zero_index_for_thread), R16_thread); __ pop_frame(); __ ld(R0, _abi(lr), R1_SP); __ mtlr(R0);
--- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -41,13 +41,13 @@ #define CHECK_BAILOUT() { if (ce->compilation()->bailed_out()) return; } RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index, LIR_Opr array) - : _throw_index_out_of_bounds_exception(false), _index(index), _array(array) { + : _index(index), _array(array), _throw_index_out_of_bounds_exception(false) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); } RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index) - : _throw_index_out_of_bounds_exception(true), _index(index), _array(NULL) { + : _index(index), _array(NULL), _throw_index_out_of_bounds_exception(true) { assert(info != NULL, "must have info"); _info = new CodeEmitInfo(info); }
--- a/src/hotspot/cpu/s390/frame_s390.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/s390/frame_s390.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -444,12 +444,12 @@ // NOTE: Stack pointer is now held in the base class, so remove it from here. + // Needed by deoptimization. + intptr_t* _unextended_sp; + // Frame pointer for this frame. intptr_t* _fp; - // Needed by deoptimization. - intptr_t* _unextended_sp; - public: // Interface for all frames:
--- a/src/hotspot/cpu/s390/frame_s390.inline.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/s390/frame_s390.inline.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -54,7 +54,7 @@ // Constructors // Initialize all fields, _unextended_sp will be adjusted in find_codeblob_and_set_pc_and_deopt_state. -inline frame::frame() : _sp(NULL), _unextended_sp(NULL), _fp(NULL), _cb(NULL), _pc(NULL), _deopt_state(unknown) {} +inline frame::frame() : _sp(NULL), _pc(NULL), _cb(NULL), _deopt_state(unknown), _unextended_sp(NULL), _fp(NULL) {} inline frame::frame(intptr_t* sp) : _sp(sp), _unextended_sp(sp) { find_codeblob_and_set_pc_and_deopt_state((address)own_abi()->return_pc); @@ -71,7 +71,7 @@ // Generic constructor. Used by pns() in debug.cpp only #ifndef PRODUCT inline frame::frame(void* sp, void* pc, void* unextended_sp) : - _sp((intptr_t*)sp), _unextended_sp((intptr_t*)unextended_sp), _cb(NULL), _pc(NULL) { + _sp((intptr_t*)sp), _pc(NULL), _cb(NULL), _unextended_sp((intptr_t*)unextended_sp) { find_codeblob_and_set_pc_and_deopt_state((address)pc); // Also sets _fp and adjusts _unextended_sp. } #endif
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -30,6 +30,7 @@ #include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1BarrierSetAssembler.hpp" #include "gc/g1/g1BarrierSetRuntime.hpp" +#include "gc/g1/g1SATBMarkQueueSet.hpp" #include "gc/g1/g1ThreadLocalData.hpp" #include "gc/g1/heapRegion.hpp" #include "interpreter/interp_masm.hpp" @@ -520,7 +521,7 @@ __ bind(refill); save_volatile_registers(sasm); __ z_lgr(tmp, pre_val); // save pre_val - __ call_VM_leaf(CAST_FROM_FN_PTR(address, SATBMarkQueueSet::handle_zero_index_for_thread), + __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1SATBMarkQueueSet::handle_zero_index_for_thread), Z_thread); __ z_lgr(pre_val, tmp); // restore pre_val restore_volatile_registers(sasm);
--- a/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -28,6 +28,7 @@ #include "gc/g1/g1BarrierSetAssembler.hpp" #include "gc/g1/g1BarrierSetRuntime.hpp" #include "gc/g1/g1CardTable.hpp" +#include "gc/g1/g1SATBMarkQueueSet.hpp" #include "gc/g1/g1ThreadLocalData.hpp" #include "gc/g1/heapRegion.hpp" #include "interpreter/interp_masm.hpp" @@ -160,7 +161,7 @@ address handle_zero = CAST_FROM_FN_PTR(address, - &SATBMarkQueueSet::handle_zero_index_for_thread); + &G1SATBMarkQueueSet::handle_zero_index_for_thread); // This should be rare enough that we can afford to save all the // scratch registers that the calling context might be using. __ mov(G1_scratch, L0); @@ -606,8 +607,8 @@ __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, - SATBMarkQueueSet::handle_zero_index_for_thread), - G2_thread); + G1SATBMarkQueueSet::handle_zero_index_for_thread), + G2_thread); __ restore_live_registers(true); @@ -694,7 +695,7 @@ __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, DirtyCardQueueSet::handle_zero_index_for_thread), - G2_thread); + G2_thread); __ restore_live_registers(true);
--- a/src/hotspot/cpu/x86/macroAssembler_x86.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/cpu/x86/macroAssembler_x86.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -10567,7 +10567,7 @@ XMMRegister tmp1Reg, XMMRegister tmp2Reg, XMMRegister tmp3Reg, XMMRegister tmp4Reg, Register tmp5, Register result) { - Label copy_chars_loop, return_length, return_zero, done, below_threshold; + Label copy_chars_loop, return_length, return_zero, done; // rsi: src // rdi: dst @@ -10590,13 +10590,12 @@ set_vector_masking(); // opening of the stub context for programming mask registers - Label copy_32_loop, copy_loop_tail, restore_k1_return_zero; - - // alignement - Label post_alignement; - - // if length of the string is less than 16, handle it in an old fashioned - // way + Label copy_32_loop, copy_loop_tail, restore_k1_return_zero, below_threshold; + + // alignment + Label post_alignment; + + // if length of the string is less than 16, handle it in an old fashioned way testl(len, -32); jcc(Assembler::zero, below_threshold); @@ -10609,7 +10608,7 @@ kmovql(k3, k1); testl(len, -64); - jcc(Assembler::zero, post_alignement); + jcc(Assembler::zero, post_alignment); movl(tmp5, dst); andl(tmp5, (32 - 1)); @@ -10618,7 +10617,7 @@ // bail out when there is nothing to be done testl(tmp5, 0xFFFFFFFF); - jcc(Assembler::zero, post_alignement); + jcc(Assembler::zero, post_alignment); // ~(~0 << len), where len is the # of remaining elements to process movl(result, 0xFFFFFFFF); @@ -10638,8 +10637,8 @@ addptr(dst, tmp5); subl(len, tmp5); - bind(post_alignement); - // end of alignement + bind(post_alignment); + // end of alignment movl(tmp5, len); andl(tmp5, (32 - 1)); // tail count (in chars) @@ -10694,12 +10693,13 @@ jmp(return_zero); clear_vector_masking(); // closing of the stub context for programming mask registers - } + + bind(below_threshold); + } + if (UseSSE42Intrinsics) { Label copy_32_loop, copy_16, copy_tail; - bind(below_threshold); - movl(result, len); movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vectors @@ -10812,8 +10812,7 @@ Label copy_32_loop, copy_tail; Register tmp3_aliased = len; - // if length of the string is less than 16, handle it in an old fashioned - // way + // if length of the string is less than 16, handle it in an old fashioned way testl(len, -16); jcc(Assembler::zero, below_threshold); @@ -10927,7 +10926,10 @@ addptr(dst, 8); bind(copy_bytes); - } + } else { + bind(below_threshold); + } + testl(len, len); jccb(Assembler::zero, done); lea(src, Address(src, len, Address::times_1));
--- a/src/hotspot/os/aix/os_aix.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/os/aix/os_aix.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -2705,10 +2705,6 @@ return (ret == 0) ? OS_OK : OS_ERR; } -// Hint to the underlying OS that a task switch would not be good. -// Void return because it's a hint and can fail. -void os::hint_no_preempt() {} - //////////////////////////////////////////////////////////////////////////////// // suspend/resume support
--- a/src/hotspot/os/bsd/os_bsd.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/os/bsd/os_bsd.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -2465,10 +2465,6 @@ return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR); } -// Hint to the underlying OS that a task switch would not be good. -// Void return because it's a hint and can fail. -void os::hint_no_preempt() {} - //////////////////////////////////////////////////////////////////////////////// // suspend/resume support
--- a/src/hotspot/os/linux/os_linux.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/os/linux/os_linux.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -4192,10 +4192,6 @@ return (*priority_ptr != -1 || errno == 0 ? OS_OK : OS_ERR); } -// Hint to the underlying OS that a task switch would not be good. -// Void return because it's a hint and can fail. -void os::hint_no_preempt() {} - //////////////////////////////////////////////////////////////////////////////// // suspend/resume support @@ -5797,11 +5793,21 @@ core_pattern[ret] = '\0'; } + // Replace the %p in the core pattern with the process id. NOTE: we do this + // only if the pattern doesn't start with "|", and we support only one %p in + // the pattern. char *pid_pos = strstr(core_pattern, "%p"); + const char* tail = (pid_pos != NULL) ? (pid_pos + 2) : ""; // skip over the "%p" int written; if (core_pattern[0] == '/') { - written = jio_snprintf(buffer, bufferSize, "%s", core_pattern); + if (pid_pos != NULL) { + *pid_pos = '\0'; + written = jio_snprintf(buffer, bufferSize, "%s%d%s", core_pattern, + current_process_id(), tail); + } else { + written = jio_snprintf(buffer, bufferSize, "%s", core_pattern); + } } else { char cwd[PATH_MAX]; @@ -5814,6 +5820,10 @@ written = jio_snprintf(buffer, bufferSize, "\"%s\" (or dumping to %s/core.%d)", &core_pattern[1], p, current_process_id()); + } else if (pid_pos != NULL) { + *pid_pos = '\0'; + written = jio_snprintf(buffer, bufferSize, "%s/%s%d%s", p, core_pattern, + current_process_id(), tail); } else { written = jio_snprintf(buffer, bufferSize, "%s/%s", p, core_pattern); }
--- a/src/hotspot/os/solaris/os_solaris.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/os/solaris/os_solaris.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -78,7 +78,6 @@ # include <link.h> # include <poll.h> # include <pthread.h> -# include <schedctl.h> # include <setjmp.h> # include <signal.h> # include <stdio.h> @@ -742,7 +741,6 @@ OSThread* osthr = thread->osthread(); osthr->set_lwp_id(_lwp_self()); // Store lwp in case we are bound - thread->_schedctl = (void *) schedctl_init(); log_info(os, thread)("Thread is alive (tid: " UINTX_FORMAT ").", os::current_thread_id()); @@ -812,7 +810,6 @@ // Store info on the Solaris thread into the OSThread osthread->set_thread_id(thread_id); osthread->set_lwp_id(_lwp_self()); - thread->_schedctl = (void *) schedctl_init(); if (UseNUMA) { int lgrp_id = os::numa_get_group_id(); @@ -3407,13 +3404,6 @@ return OS_OK; } - -// Hint to the underlying OS that a task switch would not be good. -// Void return because it's a hint and can fail. -void os::hint_no_preempt() { - schedctl_start(schedctl_init()); -} - //////////////////////////////////////////////////////////////////////////////// // suspend/resume support
--- a/src/hotspot/os/windows/os_windows.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/os/windows/os_windows.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -3609,11 +3609,6 @@ return OS_OK; } - -// Hint to the underlying OS that a task switch would not be good. -// Void return because it's a hint and can fail. -void os::hint_no_preempt() {} - void os::interrupt(Thread* thread) { debug_only(Thread::check_for_dangling_thread_pointer(thread);)
--- a/src/hotspot/share/c1/c1_LIRGenerator.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/c1/c1_LIRGenerator.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1285,9 +1285,10 @@ // FIXME T_ADDRESS should actually be T_METADATA but it can't because the // meaning of these two is mixed up (see JDK-8026837). __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info); - __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), result); + __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), temp); // mirror = ((OopHandle)mirror)->resolve(); - __ move_wide(new LIR_Address(result, T_OBJECT), result); + access_load(IN_NATIVE, T_OBJECT, + LIR_OprFact::address(new LIR_Address(temp, T_OBJECT)), result); } // java.lang.Class::isPrimitive() @@ -1623,6 +1624,18 @@ } } +void LIRGenerator::access_load(DecoratorSet decorators, BasicType type, + LIR_Opr addr, LIR_Opr result) { + decorators |= C1_READ_ACCESS; + LIRAccess access(this, decorators, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, type); + access.set_resolved_addr(addr); + if (access.is_raw()) { + _barrier_set->BarrierSetC1::load(access, result); + } else { + _barrier_set->load(access, result); + } +} + void LIRGenerator::access_store_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIR_Opr offset, LIR_Opr value, CodeEmitInfo* patch_info, CodeEmitInfo* store_emit_info) {
--- a/src/hotspot/share/c1/c1_LIRGenerator.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/c1/c1_LIRGenerator.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -288,6 +288,9 @@ LIRItem& base, LIR_Opr offset, LIR_Opr result, CodeEmitInfo* patch_info = NULL, CodeEmitInfo* load_emit_info = NULL); + void access_load(DecoratorSet decorators, BasicType type, + LIR_Opr addr, LIR_Opr result); + LIR_Opr access_atomic_cmpxchg_at(DecoratorSet decorators, BasicType type, LIRItem& base, LIRItem& offset, LIRItem& cmp_value, LIRItem& new_value);
--- a/src/hotspot/share/ci/ciInstanceKlass.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/ci/ciInstanceKlass.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -596,6 +596,7 @@ // Go into the VM to fetch the implementor. { VM_ENTRY_MARK; + MutexLocker ml(Compile_lock); Klass* k = get_instanceKlass()->implementor(); if (k != NULL) { if (k == get_instanceKlass()) {
--- a/src/hotspot/share/classfile/classLoaderData.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/classLoaderData.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1418,27 +1418,6 @@ } if (seen_dead_loader) { - data = _head; - while (data != NULL) { - // Remove entries in the dictionary of live class loader that have - // initiated loading classes in a dead class loader. - if (data->dictionary() != NULL) { - data->dictionary()->do_unloading(); - } - // Walk a ModuleEntry's reads, and a PackageEntry's exports - // lists to determine if there are modules on those lists that are now - // dead and should be removed. A module's life cycle is equivalent - // to its defining class loader's life cycle. Since a module is - // considered dead if its class loader is dead, these walks must - // occur after each class loader's aliveness is determined. - if (data->packages() != NULL) { - data->packages()->purge_all_package_exports(); - } - if (data->modules_defined()) { - data->modules()->purge_all_module_reads(); - } - data = data->next(); - } JFR_ONLY(post_class_unload_events();) } @@ -1447,6 +1426,32 @@ return seen_dead_loader; } +// There's at least one dead class loader. Purge refererences of healthy module +// reads lists and package export lists to modules belonging to dead loaders. +void ClassLoaderDataGraph::clean_module_and_package_info() { + ClassLoaderData* data = _head; + while (data != NULL) { + // Remove entries in the dictionary of live class loader that have + // initiated loading classes in a dead class loader. + if (data->dictionary() != NULL) { + data->dictionary()->do_unloading(); + } + // Walk a ModuleEntry's reads, and a PackageEntry's exports + // lists to determine if there are modules on those lists that are now + // dead and should be removed. A module's life cycle is equivalent + // to its defining class loader's life cycle. Since a module is + // considered dead if its class loader is dead, these walks must + // occur after each class loader's aliveness is determined. + if (data->packages() != NULL) { + data->packages()->purge_all_package_exports(); + } + if (data->modules_defined()) { + data->modules()->purge_all_module_reads(); + } + data = data->next(); + } +} + void ClassLoaderDataGraph::purge() { assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!"); ClassLoaderData* list = _unloading;
--- a/src/hotspot/share/classfile/classLoaderData.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/classLoaderData.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -97,6 +97,7 @@ public: static ClassLoaderData* find_or_create(Handle class_loader); + static void clean_module_and_package_info(); static void purge(); static void clear_claimed_marks(); // oops do
--- a/src/hotspot/share/classfile/classLoaderData.inline.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/classLoaderData.inline.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -94,9 +94,15 @@ } bool ClassLoaderDataGraph::should_clean_metaspaces_and_reset() { + // Only clean metaspaces after full GC. bool do_cleaning = _safepoint_cleanup_needed; +#if INCLUDE_JVMTI + do_cleaning = do_cleaning && (_should_clean_deallocate_lists || InstanceKlass::has_previous_versions()); +#else + do_cleaning = do_cleaning && _should_clean_deallocate_lists; +#endif _safepoint_cleanup_needed = false; // reset - return (do_cleaning && _should_clean_deallocate_lists) || InstanceKlass::has_previous_versions(); + return do_cleaning; } #endif // SHARE_VM_CLASSFILE_CLASSLOADERDATA_INLINE_HPP
--- a/src/hotspot/share/classfile/compactHashtable.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/compactHashtable.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -231,6 +231,10 @@ // For reading from/writing to the CDS archive void serialize(SerializeClosure* soc); + + inline bool empty() { + return (_entry_count == 0); + } }; template <class T, class N> class CompactHashtable : public SimpleCompactHashtable {
--- a/src/hotspot/share/classfile/javaClasses.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/javaClasses.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -209,7 +209,7 @@ } #if INCLUDE_CDS -void java_lang_String::serialize(SerializeClosure* f) { +void java_lang_String::serialize_offsets(SerializeClosure* f) { STRING_FIELDS_DO(FIELD_SERIALIZE_OFFSET); f->do_u4((u4*)&initialized); } @@ -1534,7 +1534,7 @@ } #if INCLUDE_CDS -void java_lang_Class::serialize(SerializeClosure* f) { +void java_lang_Class::serialize_offsets(SerializeClosure* f) { f->do_u4((u4*)&offsets_computed); f->do_u4((u4*)&_init_lock_offset); @@ -1608,7 +1608,7 @@ } #if INCLUDE_CDS -void java_lang_Thread::serialize(SerializeClosure* f) { +void java_lang_Thread::serialize_offsets(SerializeClosure* f) { THREAD_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -1860,7 +1860,7 @@ } #if INCLUDE_CDS -void java_lang_ThreadGroup::serialize(SerializeClosure* f) { +void java_lang_ThreadGroup::serialize_offsets(SerializeClosure* f) { THREADGROUP_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -1878,7 +1878,7 @@ } #if INCLUDE_CDS -void java_lang_Throwable::serialize(SerializeClosure* f) { +void java_lang_Throwable::serialize_offsets(SerializeClosure* f) { THROWABLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -2654,7 +2654,7 @@ } #if INCLUDE_CDS -void java_lang_StackFrameInfo::serialize(SerializeClosure* f) { +void java_lang_StackFrameInfo::serialize_offsets(SerializeClosure* f) { STACKFRAMEINFO_FIELDS_DO(FIELD_SERIALIZE_OFFSET); STACKFRAMEINFO_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } @@ -2672,7 +2672,7 @@ } #if INCLUDE_CDS -void java_lang_LiveStackFrameInfo::serialize(SerializeClosure* f) { +void java_lang_LiveStackFrameInfo::serialize_offsets(SerializeClosure* f) { LIVESTACKFRAMEINFO_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -2686,7 +2686,7 @@ } #if INCLUDE_CDS -void java_lang_reflect_AccessibleObject::serialize(SerializeClosure* f) { +void java_lang_reflect_AccessibleObject::serialize_offsets(SerializeClosure* f) { ACCESSIBLEOBJECT_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -2727,7 +2727,7 @@ } #if INCLUDE_CDS -void java_lang_reflect_Method::serialize(SerializeClosure* f) { +void java_lang_reflect_Method::serialize_offsets(SerializeClosure* f) { METHOD_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -2914,7 +2914,7 @@ } #if INCLUDE_CDS -void java_lang_reflect_Constructor::serialize(SerializeClosure* f) { +void java_lang_reflect_Constructor::serialize_offsets(SerializeClosure* f) { CONSTRUCTOR_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3063,7 +3063,7 @@ } #if INCLUDE_CDS -void java_lang_reflect_Field::serialize(SerializeClosure* f) { +void java_lang_reflect_Field::serialize_offsets(SerializeClosure* f) { FIELD_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3186,7 +3186,7 @@ } #if INCLUDE_CDS -void reflect_ConstantPool::serialize(SerializeClosure* f) { +void reflect_ConstantPool::serialize_offsets(SerializeClosure* f) { CONSTANTPOOL_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3203,7 +3203,7 @@ } #if INCLUDE_CDS -void java_lang_reflect_Parameter::serialize(SerializeClosure* f) { +void java_lang_reflect_Parameter::serialize_offsets(SerializeClosure* f) { PARAMETER_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3281,7 +3281,7 @@ } #if INCLUDE_CDS -void java_lang_Module::serialize(SerializeClosure* f) { +void java_lang_Module::serialize_offsets(SerializeClosure* f) { MODULE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); MODULE_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } @@ -3371,7 +3371,7 @@ } #if INCLUDE_CDS -void reflect_UnsafeStaticFieldAccessorImpl::serialize(SerializeClosure* f) { +void reflect_UnsafeStaticFieldAccessorImpl::serialize_offsets(SerializeClosure* f) { UNSAFESTATICFIELDACCESSORIMPL_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3543,7 +3543,7 @@ } #if INCLUDE_CDS -void java_lang_ref_SoftReference::serialize(SerializeClosure* f) { +void java_lang_ref_SoftReference::serialize_offsets(SerializeClosure* f) { SOFTREFERENCE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3584,7 +3584,7 @@ } #if INCLUDE_CDS -void java_lang_invoke_DirectMethodHandle::serialize(SerializeClosure* f) { +void java_lang_invoke_DirectMethodHandle::serialize_offsets(SerializeClosure* f) { DIRECTMETHODHANDLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3616,7 +3616,7 @@ } #if INCLUDE_CDS -void java_lang_invoke_MethodHandle::serialize(SerializeClosure* f) { +void java_lang_invoke_MethodHandle::serialize_offsets(SerializeClosure* f) { METHODHANDLE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3635,7 +3635,7 @@ } #if INCLUDE_CDS -void java_lang_invoke_MemberName::serialize(SerializeClosure* f) { +void java_lang_invoke_MemberName::serialize_offsets(SerializeClosure* f) { MEMBERNAME_FIELDS_DO(FIELD_SERIALIZE_OFFSET); MEMBERNAME_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } @@ -3648,7 +3648,7 @@ } #if INCLUDE_CDS -void java_lang_invoke_ResolvedMethodName::serialize(SerializeClosure* f) { +void java_lang_invoke_ResolvedMethodName::serialize_offsets(SerializeClosure* f) { RESOLVEDMETHOD_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } #endif @@ -3663,7 +3663,7 @@ } #if INCLUDE_CDS -void java_lang_invoke_LambdaForm::serialize(SerializeClosure* f) { +void java_lang_invoke_LambdaForm::serialize_offsets(SerializeClosure* f) { LAMBDAFORM_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3815,7 +3815,7 @@ } #if INCLUDE_CDS -void java_lang_invoke_MethodType::serialize(SerializeClosure* f) { +void java_lang_invoke_MethodType::serialize_offsets(SerializeClosure* f) { METHODTYPE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3909,7 +3909,7 @@ } #if INCLUDE_CDS -void java_lang_invoke_CallSite::serialize(SerializeClosure* f) { +void java_lang_invoke_CallSite::serialize_offsets(SerializeClosure* f) { CALLSITE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -3931,7 +3931,7 @@ } #if INCLUDE_CDS -void java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize(SerializeClosure* f) { +void java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize_offsets(SerializeClosure* f) { CALLSITECONTEXT_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } #endif @@ -3963,7 +3963,7 @@ } #if INCLUDE_CDS -void java_security_AccessControlContext::serialize(SerializeClosure* f) { +void java_security_AccessControlContext::serialize_offsets(SerializeClosure* f) { ACCESSCONTROLCONTEXT_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4029,7 +4029,7 @@ } #if INCLUDE_CDS -void java_lang_ClassLoader::serialize(SerializeClosure* f) { +void java_lang_ClassLoader::serialize_offsets(SerializeClosure* f) { CLASSLOADER_FIELDS_DO(FIELD_SERIALIZE_OFFSET); CLASSLOADER_INJECTED_FIELDS(INJECTED_FIELD_SERIALIZE_OFFSET); } @@ -4143,7 +4143,7 @@ } #if INCLUDE_CDS -void java_lang_System::serialize(SerializeClosure* f) { +void java_lang_System::serialize_offsets(SerializeClosure* f) { SYSTEM_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4250,6 +4250,12 @@ int jdk_internal_module_ArchivedModuleGraph::_archivedSystemModules_offset; int jdk_internal_module_ArchivedModuleGraph::_archivedModuleFinder_offset; int jdk_internal_module_ArchivedModuleGraph::_archivedMainModule_offset; +int jdk_internal_module_ArchivedModuleGraph::_archivedConfiguration_offset; +int java_lang_Integer_IntegerCache::_archivedCache_offset; +int java_lang_module_Configuration::_EMPTY_CONFIGURATION_offset; +int java_util_ImmutableCollections_ListN::_EMPTY_LIST_offset; +int java_util_ImmutableCollections_SetN::_EMPTY_SET_offset; +int java_util_ImmutableCollections_MapN::_EMPTY_MAP_offset; #define STACKTRACEELEMENT_FIELDS_DO(macro) \ macro(declaringClassObject_offset, k, "declaringClassObject", class_signature, false); \ @@ -4268,7 +4274,7 @@ } #if INCLUDE_CDS -void java_lang_StackTraceElement::serialize(SerializeClosure* f) { +void java_lang_StackTraceElement::serialize_offsets(SerializeClosure* f) { STACKTRACEELEMENT_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4343,7 +4349,7 @@ } #if INCLUDE_CDS -void java_lang_AssertionStatusDirectives::serialize(SerializeClosure* f) { +void java_lang_AssertionStatusDirectives::serialize_offsets(SerializeClosure* f) { ASSERTIONSTATUSDIRECTIVES_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4384,7 +4390,7 @@ } #if INCLUDE_CDS -void java_nio_Buffer::serialize(SerializeClosure* f) { +void java_nio_Buffer::serialize_offsets(SerializeClosure* f) { BUFFER_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4403,7 +4409,7 @@ } #if INCLUDE_CDS -void java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize(SerializeClosure* f) { +void java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize_offsets(SerializeClosure* f) { AOS_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4412,20 +4418,96 @@ return (hardcoded_offset * heapOopSize) + instanceOopDesc::base_offset_in_bytes(); } -#define MODULEBOOTSTRAP_FIELDS_DO(macro) \ +#define INTEGERCACHE_FIELDS_DO(macro) \ + macro(_archivedCache_offset, k, "archivedCache", java_lang_Integer_array_signature, true) + +void java_lang_Integer_IntegerCache::compute_offsets() { + InstanceKlass* k = SystemDictionary::Integer_IntegerCache_klass(); + assert(k != NULL, "must be loaded"); + INTEGERCACHE_FIELDS_DO(FIELD_COMPUTE_OFFSET); +} + +#if INCLUDE_CDS +void java_lang_Integer_IntegerCache::serialize_offsets(SerializeClosure* f) { + INTEGERCACHE_FIELDS_DO(FIELD_SERIALIZE_OFFSET); +} +#endif + +#define ARCHIVEDMODULEGRAPH_FIELDS_DO(macro) \ macro(_archivedSystemModules_offset, k, "archivedSystemModules", systemModules_signature, true); \ macro(_archivedModuleFinder_offset, k, "archivedModuleFinder", moduleFinder_signature, true); \ - macro(_archivedMainModule_offset, k, "archivedMainModule", string_signature, true) + macro(_archivedMainModule_offset, k, "archivedMainModule", string_signature, true); \ + macro(_archivedConfiguration_offset, k, "archivedConfiguration", configuration_signature, true) void jdk_internal_module_ArchivedModuleGraph::compute_offsets() { InstanceKlass* k = SystemDictionary::ArchivedModuleGraph_klass(); assert(k != NULL, "must be loaded"); - MODULEBOOTSTRAP_FIELDS_DO(FIELD_COMPUTE_OFFSET); + ARCHIVEDMODULEGRAPH_FIELDS_DO(FIELD_COMPUTE_OFFSET); } #if INCLUDE_CDS -void jdk_internal_module_ArchivedModuleGraph::serialize(SerializeClosure* f) { - MODULEBOOTSTRAP_FIELDS_DO(FIELD_SERIALIZE_OFFSET); +void jdk_internal_module_ArchivedModuleGraph::serialize_offsets(SerializeClosure* f) { + ARCHIVEDMODULEGRAPH_FIELDS_DO(FIELD_SERIALIZE_OFFSET); +} +#endif + +#define CONFIGURATION_FIELDS_DO(macro) \ + macro(_EMPTY_CONFIGURATION_offset, k, "EMPTY_CONFIGURATION", configuration_signature, true) + +void java_lang_module_Configuration::compute_offsets() { + InstanceKlass* k = SystemDictionary::Configuration_klass(); + assert(k != NULL, "must be loaded"); + CONFIGURATION_FIELDS_DO(FIELD_COMPUTE_OFFSET); +} + +#if INCLUDE_CDS +void java_lang_module_Configuration::serialize_offsets(SerializeClosure* f) { + CONFIGURATION_FIELDS_DO(FIELD_SERIALIZE_OFFSET); +} +#endif + +#define LISTN_FIELDS_DO(macro) \ + macro(_EMPTY_LIST_offset, k, "EMPTY_LIST", list_signature, true) + +void java_util_ImmutableCollections_ListN::compute_offsets() { + InstanceKlass* k = SystemDictionary::ImmutableCollections_ListN_klass(); + assert(k != NULL, "must be loaded"); + LISTN_FIELDS_DO(FIELD_COMPUTE_OFFSET); +} + +#if INCLUDE_CDS +void java_util_ImmutableCollections_ListN::serialize_offsets(SerializeClosure* f) { + LISTN_FIELDS_DO(FIELD_SERIALIZE_OFFSET); +} +#endif + +#define SETN_FIELDS_DO(macro) \ + macro(_EMPTY_SET_offset, k, "EMPTY_SET", set_signature, true) + +void java_util_ImmutableCollections_SetN::compute_offsets() { + InstanceKlass* k = SystemDictionary::ImmutableCollections_SetN_klass(); + assert(k != NULL, "must be loaded"); + SETN_FIELDS_DO(FIELD_COMPUTE_OFFSET); +} + +#if INCLUDE_CDS +void java_util_ImmutableCollections_SetN::serialize_offsets(SerializeClosure* f) { + SETN_FIELDS_DO(FIELD_SERIALIZE_OFFSET); +} +#endif + +#define MAPN_FIELDS_DO(macro) \ + macro(_EMPTY_MAP_offset, k, "EMPTY_MAP", map_signature, true) + +void java_util_ImmutableCollections_MapN::compute_offsets() { + InstanceKlass* k = SystemDictionary::ImmutableCollections_MapN_klass(); + assert(k != NULL, "must be loaded"); + MAPN_FIELDS_DO(FIELD_COMPUTE_OFFSET); +} + +#if INCLUDE_CDS +void java_util_ImmutableCollections_MapN::serialize_offsets(SerializeClosure* f) { + MAPN_FIELDS_DO(FIELD_SERIALIZE_OFFSET); } #endif @@ -4445,6 +4527,7 @@ java_lang_ref_Reference::discovered_offset = member_offset(java_lang_ref_Reference::hc_discovered_offset); } +#define DO_COMPUTE_OFFSETS(k) k::compute_offsets(); // Compute non-hard-coded field offsets of all the classes in this file void JavaClasses::compute_offsets() { @@ -4452,47 +4535,24 @@ return; // field offsets are loaded from archive } - // java_lang_Class::compute_offsets was called earlier in bootstrap - java_lang_System::compute_offsets(); - java_lang_ClassLoader::compute_offsets(); - java_lang_Throwable::compute_offsets(); - java_lang_Thread::compute_offsets(); - java_lang_ThreadGroup::compute_offsets(); - java_lang_AssertionStatusDirectives::compute_offsets(); - java_lang_ref_SoftReference::compute_offsets(); - java_lang_invoke_MethodHandle::compute_offsets(); - java_lang_invoke_DirectMethodHandle::compute_offsets(); - java_lang_invoke_MemberName::compute_offsets(); - java_lang_invoke_ResolvedMethodName::compute_offsets(); - java_lang_invoke_LambdaForm::compute_offsets(); - java_lang_invoke_MethodType::compute_offsets(); - java_lang_invoke_CallSite::compute_offsets(); - java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets(); - java_security_AccessControlContext::compute_offsets(); - // Initialize reflection classes. The layouts of these classes - // changed with the new reflection implementation in JDK 1.4, and - // since the Universe doesn't know what JDK version it is until this - // point we defer computation of these offsets until now. - java_lang_reflect_AccessibleObject::compute_offsets(); - java_lang_reflect_Method::compute_offsets(); - java_lang_reflect_Constructor::compute_offsets(); - java_lang_reflect_Field::compute_offsets(); - java_nio_Buffer::compute_offsets(); - reflect_ConstantPool::compute_offsets(); - reflect_UnsafeStaticFieldAccessorImpl::compute_offsets(); - java_lang_reflect_Parameter::compute_offsets(); - java_lang_Module::compute_offsets(); - java_lang_StackTraceElement::compute_offsets(); - java_lang_StackFrameInfo::compute_offsets(); - java_lang_LiveStackFrameInfo::compute_offsets(); - java_util_concurrent_locks_AbstractOwnableSynchronizer::compute_offsets(); - - jdk_internal_module_ArchivedModuleGraph::compute_offsets(); + // We have already called the compute_offsets() of the + // BASIC_JAVA_CLASSES_DO_PART1 classes (java_lang_String and java_lang_Class) + // earlier inside SystemDictionary::resolve_preloaded_classes() + BASIC_JAVA_CLASSES_DO_PART2(DO_COMPUTE_OFFSETS); // generated interpreter code wants to know about the offsets we just computed: AbstractAssembler::update_delayed_values(); } +#if INCLUDE_CDS +#define DO_SERIALIZE_OFFSETS(k) k::serialize_offsets(soc); + +void JavaClasses::serialize_offsets(SerializeClosure* soc) { + BASIC_JAVA_CLASSES_DO(DO_SERIALIZE_OFFSETS); +} +#endif + + #ifndef PRODUCT // These functions exist to assert the validity of hard-coded field offsets to guard
--- a/src/hotspot/share/classfile/javaClasses.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/javaClasses.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -47,6 +47,52 @@ // correspondingly. The names in the enums must be identical to the actual field // names in order for the verification code to work. +#define BASIC_JAVA_CLASSES_DO_PART1(f) \ + f(java_lang_Class) \ + f(java_lang_String) \ + //end + +#define BASIC_JAVA_CLASSES_DO_PART2(f) \ + f(java_lang_System) \ + f(java_lang_ClassLoader) \ + f(java_lang_Throwable) \ + f(java_lang_Thread) \ + f(java_lang_ThreadGroup) \ + f(java_lang_AssertionStatusDirectives) \ + f(java_lang_ref_SoftReference) \ + f(java_lang_invoke_MethodHandle) \ + f(java_lang_invoke_DirectMethodHandle) \ + f(java_lang_invoke_MemberName) \ + f(java_lang_invoke_ResolvedMethodName) \ + f(java_lang_invoke_LambdaForm) \ + f(java_lang_invoke_MethodType) \ + f(java_lang_invoke_CallSite) \ + f(java_lang_invoke_MethodHandleNatives_CallSiteContext) \ + f(java_security_AccessControlContext) \ + f(java_lang_reflect_AccessibleObject) \ + f(java_lang_reflect_Method) \ + f(java_lang_reflect_Constructor) \ + f(java_lang_reflect_Field) \ + f(java_nio_Buffer) \ + f(reflect_ConstantPool) \ + f(reflect_UnsafeStaticFieldAccessorImpl) \ + f(java_lang_reflect_Parameter) \ + f(java_lang_Module) \ + f(java_lang_StackTraceElement) \ + f(java_lang_StackFrameInfo) \ + f(java_lang_LiveStackFrameInfo) \ + f(java_util_concurrent_locks_AbstractOwnableSynchronizer) \ + f(jdk_internal_module_ArchivedModuleGraph) \ + f(java_lang_Integer_IntegerCache) \ + f(java_lang_module_Configuration) \ + f(java_util_ImmutableCollections_ListN) \ + f(java_util_ImmutableCollections_MapN) \ + f(java_util_ImmutableCollections_SetN) \ + //end + +#define BASIC_JAVA_CLASSES_DO(f) \ + BASIC_JAVA_CLASSES_DO_PART1(f) \ + BASIC_JAVA_CLASSES_DO_PART2(f) // Interface to java.lang.String objects @@ -71,7 +117,7 @@ }; static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Instance creation static Handle create_from_unicode(jchar* unicode, int len, TRAPS); @@ -224,7 +270,7 @@ static oop create_basic_type_mirror(const char* basic_type_name, BasicType type, TRAPS); // Archiving - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static void archive_basic_type_mirrors(TRAPS) NOT_CDS_JAVA_HEAP_RETURN; static oop archive_mirror(Klass* k, TRAPS) NOT_CDS_JAVA_HEAP_RETURN_(NULL); static oop process_archived_mirror(Klass* k, oop mirror, oop archived_mirror, Thread *THREAD) @@ -317,7 +363,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Instance creation static oop create(); @@ -419,7 +465,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // parent ThreadGroup static oop parent(oop java_thread_group); @@ -500,7 +546,7 @@ static void print_stack_usage(Handle stream); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocate space for backtrace (created but stack trace not filled in) static void allocate_backtrace(Handle throwable, TRAPS); @@ -531,7 +577,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static jboolean override(oop reflect); @@ -564,7 +610,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(TRAPS); @@ -635,7 +681,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(TRAPS); @@ -695,7 +741,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(TRAPS); @@ -752,7 +798,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(TRAPS); @@ -784,7 +830,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(Handle loader, Handle module_name, TRAPS); @@ -815,7 +861,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Allocation static Handle create(TRAPS); @@ -839,7 +885,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static int base_offset() { return _base_offset; @@ -944,7 +990,7 @@ static void set_clock(jlong value); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; }; // Interface to java.lang.invoke.MethodHandle objects @@ -961,7 +1007,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop type(oop mh); @@ -992,7 +1038,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop member(oop mh); @@ -1019,7 +1065,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop vmentry(oop lform); @@ -1052,7 +1098,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static int vmtarget_offset_in_bytes() { return _vmtarget_offset; } @@ -1091,7 +1137,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop clazz(oop mname); static void set_clazz(oop mname, oop clazz); @@ -1156,7 +1202,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop rtype(oop mt); static objArrayOop ptypes(oop mt); @@ -1192,7 +1238,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static oop target( oop site); static void set_target( oop site, oop target); @@ -1226,7 +1272,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Accessors static DependencyContext vmdependencies(oop context); @@ -1250,7 +1296,7 @@ static void compute_offsets(); public: - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static oop create(objArrayHandle context, bool isPrivileged, Handle privileged_context, TRAPS); static bool is_authorized(Handle context); @@ -1277,7 +1323,7 @@ public: static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static ClassLoaderData* loader_data(oop loader); static ClassLoaderData* cmpxchg_loader_data(ClassLoaderData* new_data, oop loader, ClassLoaderData* expected_data); @@ -1330,7 +1376,7 @@ static bool has_security_manager(); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Debugging friend class JavaClasses; @@ -1368,7 +1414,7 @@ int version, int bci, Symbol* name, TRAPS); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Debugging friend class JavaClasses; @@ -1412,7 +1458,7 @@ static void set_version(oop info, short value); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; static void to_stack_trace_element(Handle stackFrame, Handle stack_trace_element, TRAPS); @@ -1434,7 +1480,7 @@ static void set_mode(oop info, int value); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Debugging friend class JavaClasses; @@ -1459,7 +1505,7 @@ static void set_deflt(oop obj, bool val); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; // Debugging friend class JavaClasses; @@ -1473,7 +1519,7 @@ public: static int limit_offset(); static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; }; class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic { @@ -1482,7 +1528,16 @@ public: static void compute_offsets(); static oop get_owner_threadObj(oop obj); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; +}; + +class java_lang_Integer_IntegerCache: AllStatic { + private: + static int _archivedCache_offset; + public: + static int archivedCache_offset() { return _archivedCache_offset; } + static void compute_offsets(); + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; }; class jdk_internal_module_ArchivedModuleGraph: AllStatic { @@ -1490,12 +1545,50 @@ static int _archivedSystemModules_offset; static int _archivedModuleFinder_offset; static int _archivedMainModule_offset; + static int _archivedConfiguration_offset; public: static int archivedSystemModules_offset() { return _archivedSystemModules_offset; } static int archivedModuleFinder_offset() { return _archivedModuleFinder_offset; } static int archivedMainModule_offset() { return _archivedMainModule_offset; } + static int archivedConfiguration_offset() { return _archivedConfiguration_offset; } static void compute_offsets(); - static void serialize(SerializeClosure* f) NOT_CDS_RETURN; + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; +}; + +class java_lang_module_Configuration: AllStatic { + private: + static int _EMPTY_CONFIGURATION_offset; + public: + static int EMPTY_CONFIGURATION_offset() { return _EMPTY_CONFIGURATION_offset; } + static void compute_offsets(); + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; +}; + +class java_util_ImmutableCollections_ListN : AllStatic { + private: + static int _EMPTY_LIST_offset; + public: + static int EMPTY_LIST_offset() { return _EMPTY_LIST_offset; } + static void compute_offsets(); + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; +}; + +class java_util_ImmutableCollections_SetN : AllStatic { + private: + static int _EMPTY_SET_offset; + public: + static int EMPTY_SET_offset() { return _EMPTY_SET_offset; } + static void compute_offsets(); + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; +}; + +class java_util_ImmutableCollections_MapN : AllStatic { + private: + static int _EMPTY_MAP_offset; + public: + static int EMPTY_MAP_offset() { return _EMPTY_MAP_offset; } + static void compute_offsets(); + static void serialize_offsets(SerializeClosure* f) NOT_CDS_RETURN; }; // Use to declare fields that need to be injected into Java classes @@ -1558,7 +1651,7 @@ static void compute_hard_coded_offsets(); static void compute_offsets(); static void check_offsets() PRODUCT_RETURN; - + static void serialize_offsets(SerializeClosure* soc) NOT_CDS_RETURN; static InjectedField* get_injected(Symbol* class_name, int* field_count); };
--- a/src/hotspot/share/classfile/loaderConstraints.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/loaderConstraints.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -65,7 +65,7 @@ LoaderConstraintEntry** LoaderConstraintTable::find_loader_constraint( Symbol* name, Handle loader) { - + assert_lock_strong(SystemDictionary_lock); unsigned int hash = compute_hash(name); int index = hash_to_index(hash); LoaderConstraintEntry** pp = bucket_addr(index); @@ -89,7 +89,7 @@ void LoaderConstraintTable::purge_loader_constraints() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + assert_locked_or_safepoint(SystemDictionary_lock); LogTarget(Info, class, loader, constraints) lt; // Remove unloaded entries from constraint table for (int index = 0; index < table_size(); index++) {
--- a/src/hotspot/share/classfile/moduleEntry.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/moduleEntry.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -436,7 +436,7 @@ // Remove dead modules from all other alive modules' reads list. // This should only occur at class unloading. void ModuleEntryTable::purge_all_module_reads() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + assert_locked_or_safepoint(Module_lock); for (int i = 0; i < table_size(); i++) { for (ModuleEntry* entry = bucket(i); entry != NULL;
--- a/src/hotspot/share/classfile/packageEntry.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/packageEntry.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -302,7 +302,7 @@ // Remove dead entries from all packages' exported list void PackageEntryTable::purge_all_package_exports() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + assert_locked_or_safepoint(Module_lock); for (int i = 0; i < table_size(); i++) { for (PackageEntry* entry = bucket(i); entry != NULL;
--- a/src/hotspot/share/classfile/resolutionErrors.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/resolutionErrors.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -100,7 +100,7 @@ // RedefineClasses support - remove matching entry of a // constant pool that is going away void ResolutionErrorTable::delete_entry(ConstantPool* c) { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + assert_locked_or_safepoint(SystemDictionary_lock); for (int i = 0; i < table_size(); i++) { for (ResolutionErrorEntry** p = bucket_addr(i); *p != NULL; ) { ResolutionErrorEntry* entry = *p; @@ -118,7 +118,7 @@ // Remove unloaded entries from the table void ResolutionErrorTable::purge_resolution_errors() { - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); + assert_locked_or_safepoint(SystemDictionary_lock); for (int i = 0; i < table_size(); i++) { for (ResolutionErrorEntry** p = bucket_addr(i); *p != NULL; ) { ResolutionErrorEntry* entry = *p;
--- a/src/hotspot/share/classfile/stringTable.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/stringTable.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -64,9 +64,9 @@ // -------------------------------------------------------------------------- StringTable* StringTable::_the_table = NULL; -bool StringTable::_shared_string_mapped = false; CompactHashtable<oop, char> StringTable::_shared_table; -bool StringTable::_alt_hash = false; +volatile bool StringTable::_shared_string_mapped = false; +volatile bool StringTable::_alt_hash = false; static juint murmur_seed = 0; @@ -176,18 +176,18 @@ } }; -static size_t ceil_pow_2(uintx val) { +static size_t ceil_log2(size_t val) { size_t ret; for (ret = 1; ((size_t)1 << ret) < val; ++ret); return ret; } StringTable::StringTable() : _local_table(NULL), _current_size(0), _has_work(0), - _needs_rehashing(false), _weak_handles(NULL), _items(0), _uncleaned_items(0) { + _needs_rehashing(false), _weak_handles(NULL), _items_count(0), _uncleaned_items_count(0) { _weak_handles = new OopStorage("StringTable weak", StringTableWeakAlloc_lock, StringTableWeakActive_lock); - size_t start_size_log_2 = ceil_pow_2(StringTableSize); + size_t start_size_log_2 = ceil_log2(StringTableSize); _current_size = ((size_t)1) << start_size_log_2; log_trace(stringtable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")", _current_size, start_size_log_2); @@ -195,32 +195,31 @@ } size_t StringTable::item_added() { - return Atomic::add((size_t)1, &(the_table()->_items)); + return Atomic::add((size_t)1, &(the_table()->_items_count)); } -size_t StringTable::add_items_to_clean(size_t ndead) { - size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items)); +size_t StringTable::add_items_count_to_clean(size_t ndead) { + size_t total = Atomic::add((size_t)ndead, &(the_table()->_uncleaned_items_count)); log_trace(stringtable)( "Uncleaned items:" SIZE_FORMAT " added: " SIZE_FORMAT " total:" SIZE_FORMAT, - the_table()->_uncleaned_items, ndead, total); + the_table()->_uncleaned_items_count, ndead, total); return total; } void StringTable::item_removed() { - Atomic::add((size_t)-1, &(the_table()->_items)); + Atomic::add((size_t)-1, &(the_table()->_items_count)); } double StringTable::get_load_factor() { - return (_items*1.0)/_current_size; + return (double)_items_count/_current_size; } double StringTable::get_dead_factor() { - return (_uncleaned_items*1.0)/_current_size; + return (double)_uncleaned_items_count/_current_size; } -size_t StringTable::table_size(Thread* thread) { - return ((size_t)(1)) << _local_table->get_size_log2(thread != NULL ? thread - : Thread::current()); +size_t StringTable::table_size() { + return ((size_t)1) << _local_table->get_size_log2(Thread::current()); } void StringTable::trigger_concurrent_work() { @@ -406,7 +405,7 @@ // This is the serial case without ParState. // Just set the correct number and check for a cleaning phase. - the_table()->_uncleaned_items = stiac._count; + the_table()->_uncleaned_items_count = stiac._count; StringTable::the_table()->check_concurrent_work(); if (processed != NULL) { @@ -433,7 +432,7 @@ _par_state_string->weak_oops_do(&stiac, &dnc); // Accumulate the dead strings. - the_table()->add_items_to_clean(stiac._count); + the_table()->add_items_count_to_clean(stiac._count); *processed = (int) stiac._count_total; *removed = (int) stiac._count; @@ -465,7 +464,7 @@ } } gt.done(jt); - _current_size = table_size(jt); + _current_size = table_size(); log_debug(stringtable)("Grown to size:" SIZE_FORMAT, _current_size); } @@ -843,7 +842,7 @@ assert(MetaspaceShared::is_heap_object_archiving_allowed(), "must be"); _shared_table.reset(); - int num_buckets = the_table()->_items / SharedSymbolTableBucketSize; + int num_buckets = the_table()->_items_count / SharedSymbolTableBucketSize; // calculation of num_buckets can result in zero buckets, we need at least one CompactStringTableWriter writer(num_buckets > 1 ? num_buckets : 1, &MetaspaceShared::stats()->string);
--- a/src/hotspot/share/classfile/stringTable.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/stringTable.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -58,21 +58,22 @@ static StringTable* _the_table; // Shared string table static CompactHashtable<oop, char> _shared_table; - static bool _shared_string_mapped; - static bool _alt_hash; + static volatile bool _shared_string_mapped; + static volatile bool _alt_hash; + private: - // Set if one bucket is out of balance due to hash algorithm deficiency StringTableHash* _local_table; size_t _current_size; volatile bool _has_work; + // Set if one bucket is out of balance due to hash algorithm deficiency volatile bool _needs_rehashing; OopStorage* _weak_handles; - volatile size_t _items; + volatile size_t _items_count; DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); - volatile size_t _uncleaned_items; + volatile size_t _uncleaned_items_count; DEFINE_PAD_MINUS_SIZE(2, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t)); double get_load_factor(); @@ -83,7 +84,7 @@ static size_t item_added(); static void item_removed(); - size_t add_items_to_clean(size_t ndead); + size_t add_items_count_to_clean(size_t ndead); StringTable(); @@ -100,7 +101,7 @@ public: // The string table static StringTable* the_table() { return _the_table; } - size_t table_size(Thread* thread = NULL); + size_t table_size(); static OopStorage* weak_storage() { return the_table()->_weak_handles; } @@ -116,7 +117,7 @@ // Must be called before a parallel walk where strings might die. static void reset_dead_counter() { - the_table()->_uncleaned_items = 0; + the_table()->_uncleaned_items_count = 0; } // After the parallel walk this method must be called to trigger // cleaning. Note it might trigger a resize instead. @@ -127,7 +128,7 @@ // If GC uses ParState directly it should add the number of cleared // strings to this method. static void inc_dead_counter(size_t ndead) { - the_table()->add_items_to_clean(ndead); + the_table()->add_items_count_to_clean(ndead); } // Delete pointers to otherwise-unreachable objects.
--- a/src/hotspot/share/classfile/symbolTable.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/symbolTable.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -27,46 +27,178 @@ #include "classfile/compactHashtable.inline.hpp" #include "classfile/javaClasses.hpp" #include "classfile/symbolTable.hpp" -#include "classfile/systemDictionary.hpp" -#include "gc/shared/collectedHeap.inline.hpp" #include "memory/allocation.inline.hpp" -#include "memory/filemap.hpp" #include "memory/metaspaceClosure.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" -#include "runtime/mutexLocker.hpp" -#include "runtime/safepointVerifiers.hpp" +#include "runtime/interfaceSupport.inline.hpp" +#include "runtime/timerTrace.hpp" #include "services/diagnosticCommand.hpp" -#include "utilities/hashtable.inline.hpp" +#include "utilities/concurrentHashTable.inline.hpp" +#include "utilities/concurrentHashTableTasks.inline.hpp" + +// We used to not resize at all, so let's be conservative +// and not set it too short before we decide to resize, +// to match previous startup behavior +#define PREF_AVG_LIST_LEN 8 +// 2^17 (131,072) is max size, which is about 6.5 times as large +// as the previous table size (used to be 20,011), +// which never resized +#define END_SIZE 17 +// If a chain gets to 100 something might be wrong +#define REHASH_LEN 100 +// We only get a chance to check whether we need +// to clean infrequently (on class unloading), +// so if we have even one dead entry then mark table for cleaning +#define CLEAN_DEAD_HIGH_WATER_MARK 0.0 + +#define ON_STACK_BUFFER_LENGTH 128 // -------------------------------------------------------------------------- -// the number of buckets a thread claims -const int ClaimChunkSize = 32; - SymbolTable* SymbolTable::_the_table = NULL; +CompactHashtable<Symbol*, char> SymbolTable::_shared_table; +volatile bool SymbolTable::_alt_hash = false; +volatile bool SymbolTable::_lookup_shared_first = false; // Static arena for symbols that are not deallocated Arena* SymbolTable::_arena = NULL; -bool SymbolTable::_needs_rehashing = false; -bool SymbolTable::_lookup_shared_first = false; -CompactHashtable<Symbol*, char> SymbolTable::_shared_table; +static juint murmur_seed = 0; -Symbol* SymbolTable::allocate_symbol(const u1* name, int len, bool c_heap, TRAPS) { +static inline void log_trace_symboltable_helper(Symbol* sym, const char* msg) { +#ifndef PRODUCT + ResourceMark rm; + log_trace(symboltable)("%s [%s]", msg, sym->as_quoted_ascii()); +#endif // PRODUCT +} + +// Pick hashing algorithm. +static uintx hash_symbol(const char* s, int len, bool useAlt) { + return useAlt ? + AltHashing::murmur3_32(murmur_seed, (const jbyte*)s, len) : + java_lang_String::hash_code((const jbyte*)s, len); +} + +static uintx hash_shared_symbol(const char* s, int len) { + return java_lang_String::hash_code((const jbyte*)s, len); +} + +class SymbolTableConfig : public SymbolTableHash::BaseConfig { +private: +public: + static uintx get_hash(Symbol* const& value, bool* is_dead) { + *is_dead = (value->refcount() == 0); + if (*is_dead) { + return 0; + } else { + return hash_symbol((const char*)value->bytes(), value->utf8_length(), SymbolTable::_alt_hash); + } + } + // We use default allocation/deallocation but counted + static void* allocate_node(size_t size, Symbol* const& value) { + SymbolTable::item_added(); + return SymbolTableHash::BaseConfig::allocate_node(size, value); + } + static void free_node(void* memory, Symbol* const& value) { + // We get here either because #1 some threads lost a race + // to insert a newly created Symbol, or #2 we are freeing + // a symbol during normal cleanup deletion. + // If #1, then the symbol can be a permanent (refcount==PERM_REFCOUNT), + // or regular newly created one but with refcount==0 (see SymbolTableCreateEntry) + // If #2, then the symbol must have refcount==0 + assert((value->refcount() == PERM_REFCOUNT) || (value->refcount() == 0), + "refcount %d", value->refcount()); + SymbolTable::delete_symbol(value); + SymbolTableHash::BaseConfig::free_node(memory, value); + SymbolTable::item_removed(); + } +}; + +static size_t ceil_log2(size_t value) { + size_t ret; + for (ret = 1; ((size_t)1 << ret) < value; ++ret); + return ret; +} + +SymbolTable::SymbolTable() : + _symbols_removed(0), _symbols_counted(0), _local_table(NULL), + _current_size(0), _has_work(0), _needs_rehashing(false), + _items_count(0), _uncleaned_items_count(0) { + + size_t start_size_log_2 = ceil_log2(SymbolTableSize); + _current_size = ((size_t)1) << start_size_log_2; + log_trace(symboltable)("Start size: " SIZE_FORMAT " (" SIZE_FORMAT ")", + _current_size, start_size_log_2); + _local_table = new SymbolTableHash(start_size_log_2, END_SIZE, REHASH_LEN); +} + +void SymbolTable::delete_symbol(Symbol* sym) { + if (sym->refcount() == PERM_REFCOUNT) { + MutexLocker ml(SymbolTable_lock); // Protect arena + // Deleting permanent symbol should not occur very often (insert race condition), + // so log it. + log_trace_symboltable_helper(sym, "Freeing permanent symbol"); + if (!arena()->Afree(sym, sym->size())) { + log_trace_symboltable_helper(sym, "Leaked permanent symbol"); + } + } else { + delete sym; + } +} + +void SymbolTable::item_added() { + Atomic::inc(&(SymbolTable::the_table()->_items_count)); +} + +void SymbolTable::set_item_clean_count(size_t ncl) { + Atomic::store(ncl, &(SymbolTable::the_table()->_uncleaned_items_count)); + log_trace(symboltable)("Set uncleaned items:" SIZE_FORMAT, SymbolTable::the_table()->_uncleaned_items_count); +} + +void SymbolTable::mark_item_clean_count() { + if (Atomic::cmpxchg((size_t)1, &(SymbolTable::the_table()->_uncleaned_items_count), (size_t)0) == 0) { // only mark if unset + log_trace(symboltable)("Marked uncleaned items:" SIZE_FORMAT, SymbolTable::the_table()->_uncleaned_items_count); + } +} + +void SymbolTable::item_removed() { + Atomic::inc(&(SymbolTable::the_table()->_symbols_removed)); + Atomic::dec(&(SymbolTable::the_table()->_items_count)); +} + +double SymbolTable::get_load_factor() { + return (double)_items_count/_current_size; +} + +double SymbolTable::get_dead_factor() { + return (double)_uncleaned_items_count/_current_size; +} + +size_t SymbolTable::table_size() { + return ((size_t)1) << _local_table->get_size_log2(Thread::current()); +} + +void SymbolTable::trigger_concurrent_work() { + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); + SymbolTable::the_table()->_has_work = true; + Service_lock->notify_all(); +} + +Symbol* SymbolTable::allocate_symbol(const char* name, int len, bool c_heap, TRAPS) { assert (len <= Symbol::max_length(), "should be checked by caller"); Symbol* sym; - if (DumpSharedSpaces) { c_heap = false; } if (c_heap) { // refcount starts as 1 - sym = new (len, THREAD) Symbol(name, len, 1); + sym = new (len, THREAD) Symbol((const u1*)name, len, 1); assert(sym != NULL, "new should call vm_exit_out_of_memory if C_HEAP is exhausted"); } else { // Allocate to global arena - sym = new (len, arena(), THREAD) Symbol(name, len, PERM_REFCOUNT); + MutexLocker ml(SymbolTable_lock); // Protect arena + sym = new (len, arena(), THREAD) Symbol((const u1*)name, len, PERM_REFCOUNT); } return sym; } @@ -80,314 +212,176 @@ } } +class SymbolsDo : StackObj { + SymbolClosure *_cl; +public: + SymbolsDo(SymbolClosure *cl) : _cl(cl) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + _cl->do_symbol(value); + return true; + }; +}; + // Call function for all symbols in the symbol table. void SymbolTable::symbols_do(SymbolClosure *cl) { // all symbols from shared table _shared_table.symbols_do(cl); // all symbols from the dynamic table - const int n = the_table()->table_size(); - for (int i = 0; i < n; i++) { - for (HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i); - p != NULL; - p = p->next()) { - cl->do_symbol(p->literal_addr()); - } + SymbolsDo sd(cl); + if (!SymbolTable::the_table()->_local_table->try_scan(Thread::current(), sd)) { + log_info(stringtable)("symbols_do unavailable at this moment"); } } +class MetaspacePointersDo : StackObj { + MetaspaceClosure *_it; +public: + MetaspacePointersDo(MetaspaceClosure *it) : _it(it) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + _it->push(value); + return true; + }; +}; + void SymbolTable::metaspace_pointers_do(MetaspaceClosure* it) { assert(DumpSharedSpaces, "called only during dump time"); - const int n = the_table()->table_size(); - for (int i = 0; i < n; i++) { - for (HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i); - p != NULL; - p = p->next()) { - it->push(p->literal_addr()); - } - } + MetaspacePointersDo mpd(it); + SymbolTable::the_table()->_local_table->do_scan(Thread::current(), mpd); } -int SymbolTable::_symbols_removed = 0; -int SymbolTable::_symbols_counted = 0; -volatile int SymbolTable::_parallel_claimed_idx = 0; - -void SymbolTable::buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context) { - for (int i = start_idx; i < end_idx; ++i) { - HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i); - HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i); - while (entry != NULL) { - // Shared entries are normally at the end of the bucket and if we run into - // a shared entry, then there is nothing more to remove. However, if we - // have rehashed the table, then the shared entries are no longer at the - // end of the bucket. - if (entry->is_shared() && !use_alternate_hashcode()) { - break; - } - Symbol* s = entry->literal(); - context->_num_processed++; - assert(s != NULL, "just checking"); - // If reference count is zero, remove. - if (s->refcount() == 0) { - assert(!entry->is_shared(), "shared entries should be kept live"); - delete s; - *p = entry->next(); - context->free_entry(entry); - } else { - p = entry->next_addr(); - } - // get next entry - entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p); - } - } -} - -// Remove unreferenced symbols from the symbol table -// This is done late during GC. -void SymbolTable::unlink(int* processed, int* removed) { - BucketUnlinkContext context; - buckets_unlink(0, the_table()->table_size(), &context); - _the_table->bulk_free_entries(&context); - *processed = context._num_processed; - *removed = context._num_removed; - - _symbols_removed = context._num_removed; - _symbols_counted = context._num_processed; -} - -void SymbolTable::possibly_parallel_unlink(int* processed, int* removed) { - const int limit = the_table()->table_size(); - - BucketUnlinkContext context; - for (;;) { - // Grab next set of buckets to scan - int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; - if (start_idx >= limit) { - // End of table - break; - } - - int end_idx = MIN2(limit, start_idx + ClaimChunkSize); - buckets_unlink(start_idx, end_idx, &context); - } - - _the_table->bulk_free_entries(&context); - *processed = context._num_processed; - *removed = context._num_removed; - - Atomic::add(context._num_processed, &_symbols_counted); - Atomic::add(context._num_removed, &_symbols_removed); -} - -// Create a new table and using alternate hash code, populate the new table -// with the existing strings. Set flag to use the alternate hash code afterwards. -void SymbolTable::rehash_table() { - if (DumpSharedSpaces) { - tty->print_cr("Warning: rehash_table should not be called while dumping archive"); - return; - } - - assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); - // This should never happen with -Xshare:dump but it might in testing mode. - if (DumpSharedSpaces) return; - - // Create a new symbol table - SymbolTable* new_table = new SymbolTable(); - - the_table()->move_to(new_table); - - // Delete the table and buckets (entries are reused in new table). - delete _the_table; - // Don't check if we need rehashing until the table gets unbalanced again. - // Then rehash with a new global seed. - _needs_rehashing = false; - _the_table = new_table; -} - -// Lookup a symbol in a bucket. - -Symbol* SymbolTable::lookup_dynamic(int index, const char* name, +Symbol* SymbolTable::lookup_dynamic(const char* name, int len, unsigned int hash) { - int count = 0; - for (HashtableEntry<Symbol*, mtSymbol>* e = bucket(index); e != NULL; e = e->next()) { - count++; // count all entries in this bucket, not just ones with same hash - if (e->hash() == hash) { - Symbol* sym = e->literal(); - // Skip checking already dead symbols in the bucket. - if (sym->refcount() == 0) { - count--; // Don't count this symbol towards rehashing. - } else if (sym->equals(name, len)) { - if (sym->try_increment_refcount()) { - // something is referencing this symbol now. - return sym; - } else { - count--; // don't count this symbol. - } - } - } - } - // If the bucket size is too deep check if this hash code is insufficient. - if (count >= rehash_count && !needs_rehashing()) { - _needs_rehashing = check_rehash_table(count); - } - return NULL; + Symbol* sym = SymbolTable::the_table()->do_lookup(name, len, hash); + assert((sym == NULL) || sym->refcount() != 0, "refcount must not be zero"); + return sym; } Symbol* SymbolTable::lookup_shared(const char* name, int len, unsigned int hash) { - if (use_alternate_hashcode()) { - // hash_code parameter may use alternate hashing algorithm but the shared table - // always uses the same original hash code. - hash = hash_shared_symbol(name, len); + if (!_shared_table.empty()) { + if (SymbolTable::_alt_hash) { + // hash_code parameter may use alternate hashing algorithm but the shared table + // always uses the same original hash code. + hash = hash_shared_symbol(name, len); + } + return _shared_table.lookup(name, hash, len); + } else { + return NULL; } - return _shared_table.lookup(name, hash, len); } -Symbol* SymbolTable::lookup(int index, const char* name, +Symbol* SymbolTable::lookup_common(const char* name, int len, unsigned int hash) { Symbol* sym; if (_lookup_shared_first) { sym = lookup_shared(name, len, hash); - if (sym != NULL) { - return sym; + if (sym == NULL) { + _lookup_shared_first = false; + sym = lookup_dynamic(name, len, hash); } - _lookup_shared_first = false; - return lookup_dynamic(index, name, len, hash); } else { - sym = lookup_dynamic(index, name, len, hash); - if (sym != NULL) { - return sym; + sym = lookup_dynamic(name, len, hash); + if (sym == NULL) { + sym = lookup_shared(name, len, hash); + if (sym != NULL) { + _lookup_shared_first = true; + } } - sym = lookup_shared(name, len, hash); - if (sym != NULL) { - _lookup_shared_first = true; - } - return sym; } -} - -u4 SymbolTable::encode_shared(Symbol* sym) { - assert(DumpSharedSpaces, "called only during dump time"); - uintx base_address = uintx(MetaspaceShared::shared_rs()->base()); - uintx offset = uintx(sym) - base_address; - assert(offset < 0x7fffffff, "sanity"); - return u4(offset); -} - -Symbol* SymbolTable::decode_shared(u4 offset) { - assert(!DumpSharedSpaces, "called only during runtime"); - uintx base_address = _shared_table.base_address(); - Symbol* sym = (Symbol*)(base_address + offset); - -#ifndef PRODUCT - const char* s = (const char*)sym->bytes(); - int len = sym->utf8_length(); - unsigned int hash = hash_symbol(s, len); - assert(sym == lookup_shared(s, len, hash), "must be shared symbol"); -#endif - return sym; } -// Pick hashing algorithm. -unsigned int SymbolTable::hash_symbol(const char* s, int len) { - return use_alternate_hashcode() ? - AltHashing::murmur3_32(seed(), (const jbyte*)s, len) : - java_lang_String::hash_code((const jbyte*)s, len); -} - -unsigned int SymbolTable::hash_shared_symbol(const char* s, int len) { - return java_lang_String::hash_code((const jbyte*)s, len); -} - - -// We take care not to be blocking while holding the -// SymbolTable_lock. Otherwise, the system might deadlock, since the -// symboltable is used during compilation (VM_thread) The lock free -// synchronization is simplified by the fact that we do not delete -// entries in the symbol table during normal execution (only during -// safepoints). - Symbol* SymbolTable::lookup(const char* name, int len, TRAPS) { - unsigned int hashValue = hash_symbol(name, len); - int index = the_table()->hash_to_index(hashValue); - - Symbol* s = the_table()->lookup(index, name, len, hashValue); - - // Found - if (s != NULL) return s; - - // Grab SymbolTable_lock first. - MutexLocker ml(SymbolTable_lock, THREAD); - - // Otherwise, add to symbol to table - return the_table()->basic_add(index, (u1*)name, len, hashValue, true, THREAD); + unsigned int hash = hash_symbol(name, len, SymbolTable::_alt_hash); + Symbol* sym = SymbolTable::the_table()->lookup_common(name, len, hash); + if (sym == NULL) { + sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, true, CHECK_NULL); + } + assert(sym->refcount() != 0, "lookup should have incremented the count"); + assert(sym->equals(name, len), "symbol must be properly initialized"); + return sym; } Symbol* SymbolTable::lookup(const Symbol* sym, int begin, int end, TRAPS) { - char* buffer; - int index, len; - unsigned int hashValue; - char* name; - { - debug_only(NoSafepointVerifier nsv;) - - name = (char*)sym->base() + begin; - len = end - begin; - hashValue = hash_symbol(name, len); - index = the_table()->hash_to_index(hashValue); - Symbol* s = the_table()->lookup(index, name, len, hashValue); - - // Found - if (s != NULL) return s; + assert(sym->refcount() != 0, "require a valid symbol"); + const char* name = (const char*)sym->base() + begin; + int len = end - begin; + unsigned int hash = hash_symbol(name, len, SymbolTable::_alt_hash); + Symbol* found = SymbolTable::the_table()->lookup_common(name, len, hash); + if (found == NULL) { + found = SymbolTable::the_table()->do_add_if_needed(name, len, hash, true, THREAD); } - - // Otherwise, add to symbol to table. Copy to a C string first. - char stack_buf[128]; - ResourceMark rm(THREAD); - if (len <= 128) { - buffer = stack_buf; - } else { - buffer = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, char, len); - } - for (int i=0; i<len; i++) { - buffer[i] = name[i]; - } - // Make sure there is no safepoint in the code above since name can't move. - // We can't include the code in NoSafepointVerifier because of the - // ResourceMark. - - // Grab SymbolTable_lock first. - MutexLocker ml(SymbolTable_lock, THREAD); - - return the_table()->basic_add(index, (u1*)buffer, len, hashValue, true, THREAD); + return found; } -Symbol* SymbolTable::lookup_only(const char* name, int len, - unsigned int& hash) { - hash = hash_symbol(name, len); - int index = the_table()->hash_to_index(hash); +class SymbolTableLookup : StackObj { +private: + Thread* _thread; + uintx _hash; + int _len; + const char* _str; +public: + SymbolTableLookup(Thread* thread, const char* key, int len, uintx hash) + : _thread(thread), _hash(hash), _len(len), _str(key) {} + uintx get_hash() const { + return _hash; + } + bool equals(Symbol** value, bool* is_dead) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol *sym = *value; + if (sym->equals(_str, _len)) { + if (sym->try_increment_refcount()) { + // something is referencing this symbol now. + return true; + } else { + assert(sym->refcount() == 0, "expected dead symbol"); + *is_dead = true; + return false; + } + } else { + *is_dead = (sym->refcount() == 0); + return false; + } + } +}; - Symbol* s = the_table()->lookup(index, name, len, hash); - return s; +class SymbolTableGet : public StackObj { + Symbol* _return; +public: + SymbolTableGet() : _return(NULL) {} + void operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + _return = *value; + } + Symbol* get_res_sym() { + return _return; + } +}; + +Symbol* SymbolTable::do_lookup(const char* name, int len, uintx hash) { + Thread* thread = Thread::current(); + SymbolTableLookup lookup(thread, name, len, hash); + SymbolTableGet stg; + bool rehash_warning = false; + _local_table->get(thread, lookup, stg, &rehash_warning); + if (rehash_warning) { + _needs_rehashing = true; + } + Symbol* sym = stg.get_res_sym(); + assert((sym == NULL) || sym->refcount() != 0, "found dead symbol"); + return sym; } -// Look up the address of the literal in the SymbolTable for this Symbol* -// Do not create any new symbols -// Do not increment the reference count to keep this alive -Symbol** SymbolTable::lookup_symbol_addr(Symbol* sym){ - unsigned int hash = hash_symbol((char*)sym->bytes(), sym->utf8_length()); - int index = the_table()->hash_to_index(hash); - - for (HashtableEntry<Symbol*, mtSymbol>* e = the_table()->bucket(index); e != NULL; e = e->next()) { - if (e->hash() == hash) { - Symbol* literal_sym = e->literal(); - if (sym == literal_sym) { - return e->literal_addr(); - } - } - } - return NULL; +Symbol* SymbolTable::lookup_only(const char* name, int len, unsigned int& hash) { + hash = hash_symbol(name, len, SymbolTable::_alt_hash); + return SymbolTable::the_table()->lookup_common(name, len, hash); } // Suggestion: Push unicode-based lookup all the way into the hashing @@ -395,14 +389,14 @@ // an actual new Symbol* is created. Symbol* SymbolTable::lookup_unicode(const jchar* name, int utf16_length, TRAPS) { int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length); - char stack_buf[128]; + char stack_buf[ON_STACK_BUFFER_LENGTH]; if (utf8_length < (int) sizeof(stack_buf)) { char* chars = stack_buf; UNICODE::convert_to_utf8(name, utf16_length, chars); return lookup(chars, utf8_length, THREAD); } else { ResourceMark rm(THREAD); - char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);; + char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1); UNICODE::convert_to_utf8(name, utf16_length, chars); return lookup(chars, utf8_length, THREAD); } @@ -411,214 +405,243 @@ Symbol* SymbolTable::lookup_only_unicode(const jchar* name, int utf16_length, unsigned int& hash) { int utf8_length = UNICODE::utf8_length((jchar*) name, utf16_length); - char stack_buf[128]; + char stack_buf[ON_STACK_BUFFER_LENGTH]; if (utf8_length < (int) sizeof(stack_buf)) { char* chars = stack_buf; UNICODE::convert_to_utf8(name, utf16_length, chars); return lookup_only(chars, utf8_length, hash); } else { ResourceMark rm; - char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1);; + char* chars = NEW_RESOURCE_ARRAY(char, utf8_length + 1); UNICODE::convert_to_utf8(name, utf16_length, chars); return lookup_only(chars, utf8_length, hash); } } void SymbolTable::add(ClassLoaderData* loader_data, const constantPoolHandle& cp, - int names_count, - const char** names, int* lengths, int* cp_indices, - unsigned int* hashValues, TRAPS) { - // Grab SymbolTable_lock first. - MutexLocker ml(SymbolTable_lock, THREAD); + int names_count, const char** names, int* lengths, + int* cp_indices, unsigned int* hashValues, TRAPS) { + bool c_heap = !loader_data->is_the_null_class_loader_data(); + for (int i = 0; i < names_count; i++) { + const char *name = names[i]; + int len = lengths[i]; + unsigned int hash = hashValues[i]; + Symbol* sym = SymbolTable::the_table()->lookup_common(name, len, hash); + if (sym == NULL) { + sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, c_heap, CHECK); + } + assert(sym->refcount() != 0, "lookup should have incremented the count"); + cp->symbol_at_put(cp_indices[i], sym); + } +} - SymbolTable* table = the_table(); - bool added = table->basic_add(loader_data, cp, names_count, names, lengths, - cp_indices, hashValues, CHECK); - if (!added) { - // do it the hard way - for (int i=0; i<names_count; i++) { - int index = table->hash_to_index(hashValues[i]); - bool c_heap = !loader_data->is_the_null_class_loader_data(); - Symbol* sym = table->basic_add(index, (u1*)names[i], lengths[i], hashValues[i], c_heap, CHECK); - cp->symbol_at_put(cp_indices[i], sym); +class SymbolTableCreateEntry : public StackObj { +private: + Thread* _thread; + const char* _name; + int _len; + bool _heap; + Symbol* _return; + Symbol* _created; + + void assert_for_name(Symbol* sym, const char* where) const { +#ifdef ASSERT + assert(sym->utf8_length() == _len, "%s [%d,%d]", where, sym->utf8_length(), _len); + for (int i = 0; i < _len; i++) { + assert(sym->byte_at(i) == _name[i], + "%s [%d,%d,%d]", where, i, sym->byte_at(i), _name[i]); + } +#endif + } + +public: + SymbolTableCreateEntry(Thread* thread, const char* name, int len, bool heap) + : _thread(thread), _name(name) , _len(len), _heap(heap), _return(NULL) , _created(NULL) { + assert(_name != NULL, "expected valid name"); + } + Symbol* operator()() { + _created = SymbolTable::the_table()->allocate_symbol(_name, _len, _heap, _thread); + assert(_created != NULL, "expected created symbol"); + assert_for_name(_created, "operator()()"); + assert(_created->equals(_name, _len), + "symbol must be properly initialized [%p,%d,%d]", _name, _len, (int)_heap); + return _created; + } + void operator()(bool inserted, Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + if (!inserted && (_created != NULL)) { + // We created our symbol, but someone else inserted + // theirs first, so ours will be destroyed. + // Since symbols are created with refcount of 1, + // we must decrement it here to 0 to delete, + // unless it's a permanent one. + if (_created->refcount() != PERM_REFCOUNT) { + assert(_created->refcount() == 1, "expected newly created symbol"); + _created->decrement_refcount(); + assert(_created->refcount() == 0, "expected dead symbol"); + } + } + _return = *value; + assert_for_name(_return, "operator()"); + } + Symbol* get_new_sym() const { + assert_for_name(_return, "get_new_sym"); + return _return; + } +}; + +Symbol* SymbolTable::do_add_if_needed(const char* name, int len, uintx hash, bool heap, TRAPS) { + SymbolTableLookup lookup(THREAD, name, len, hash); + SymbolTableCreateEntry stce(THREAD, name, len, heap); + bool rehash_warning = false; + bool clean_hint = false; + _local_table->get_insert_lazy(THREAD, lookup, stce, stce, &rehash_warning, &clean_hint); + if (rehash_warning) { + _needs_rehashing = true; + } + if (clean_hint) { + // we just found out that there is a dead item, + // which we were unable to clean right now, + // but we have no way of telling whether it's + // been previously counted or not, so mark + // it only if no other items were found yet + mark_item_clean_count(); + check_concurrent_work(); + } + Symbol* sym = stce.get_new_sym(); + assert(sym->refcount() != 0, "zero is invalid"); + return sym; +} + +Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) { + unsigned int hash = 0; + int len = (int)strlen(name); + Symbol* sym = SymbolTable::lookup_only(name, len, hash); + if (sym == NULL) { + sym = SymbolTable::the_table()->do_add_if_needed(name, len, hash, false, CHECK_NULL); + } + if (sym->refcount() != PERM_REFCOUNT) { + sym->increment_refcount(); + log_trace_symboltable_helper(sym, "Asked for a permanent symbol, but got a regular one"); + } + return sym; +} + +struct SizeFunc : StackObj { + size_t operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + return (*value)->size() * HeapWordSize; + }; +}; + +void SymbolTable::print_table_statistics(outputStream* st, + const char* table_name) { + SizeFunc sz; + _local_table->statistics_to(Thread::current(), sz, st, table_name); +} + +// Verification +class VerifySymbols : StackObj { +public: + bool operator()(Symbol** value) { + guarantee(value != NULL, "expected valid value"); + guarantee(*value != NULL, "value should point to a symbol"); + Symbol* sym = *value; + guarantee(sym->equals((const char*)sym->bytes(), sym->utf8_length()), + "symbol must be internally consistent"); + return true; + }; +}; + +void SymbolTable::verify() { + Thread* thr = Thread::current(); + VerifySymbols vs; + if (!SymbolTable::the_table()->_local_table->try_scan(thr, vs)) { + log_info(stringtable)("verify unavailable at this moment"); + } +} + +// Dumping +class DumpSymbol : StackObj { + Thread* _thr; + outputStream* _st; +public: + DumpSymbol(Thread* thr, outputStream* st) : _thr(thr), _st(st) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol* sym = *value; + const char* utf8_string = (const char*)sym->bytes(); + int utf8_length = sym->utf8_length(); + _st->print("%d %d: ", utf8_length, sym->refcount()); + HashtableTextDump::put_utf8(_st, utf8_string, utf8_length); + _st->cr(); + return true; + }; +}; + +void SymbolTable::dump(outputStream* st, bool verbose) { + if (!verbose) { + SymbolTable::the_table()->print_table_statistics(st, "SymbolTable"); + } else { + Thread* thr = Thread::current(); + ResourceMark rm(thr); + st->print_cr("VERSION: 1.1"); + DumpSymbol ds(thr, st); + if (!SymbolTable::the_table()->_local_table->try_scan(thr, ds)) { + log_info(symboltable)("dump unavailable at this moment"); } } } -Symbol* SymbolTable::new_permanent_symbol(const char* name, TRAPS) { - unsigned int hash; - Symbol* result = SymbolTable::lookup_only((char*)name, (int)strlen(name), hash); - if (result != NULL) { - return result; +#if INCLUDE_CDS +struct CopyToArchive : StackObj { + CompactSymbolTableWriter* _writer; + CopyToArchive(CompactSymbolTableWriter* writer) : _writer(writer) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol* sym = *value; + unsigned int fixed_hash = hash_shared_symbol((const char*)sym->bytes(), sym->utf8_length()); + if (fixed_hash == 0) { + return true; + } + assert(fixed_hash == hash_symbol((const char*)sym->bytes(), sym->utf8_length(), false), + "must not rehash during dumping"); + + // add to the compact table + _writer->add(fixed_hash, sym); + + return true; } - // Grab SymbolTable_lock first. - MutexLocker ml(SymbolTable_lock, THREAD); +}; - SymbolTable* table = the_table(); - int index = table->hash_to_index(hash); - return table->basic_add(index, (u1*)name, (int)strlen(name), hash, false, THREAD); -} - -Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len, - unsigned int hashValue_arg, bool c_heap, TRAPS) { - assert(!Universe::heap()->is_in_reserved(name), - "proposed name of symbol must be stable"); - - // Don't allow symbols to be created which cannot fit in a Symbol*. - if (len > Symbol::max_length()) { - THROW_MSG_0(vmSymbols::java_lang_InternalError(), - "name is too long to represent"); - } - - // Cannot hit a safepoint in this function because the "this" pointer can move. - NoSafepointVerifier nsv; - - // Check if the symbol table has been rehashed, if so, need to recalculate - // the hash value and index. - unsigned int hashValue; - int index; - if (use_alternate_hashcode()) { - hashValue = hash_symbol((const char*)name, len); - index = hash_to_index(hashValue); - } else { - hashValue = hashValue_arg; - index = index_arg; - } - - // Since look-up was done lock-free, we need to check if another - // thread beat us in the race to insert the symbol. - Symbol* test = lookup(index, (char*)name, len, hashValue); - if (test != NULL) { - // A race occurred and another thread introduced the symbol. - assert(test->refcount() != 0, "lookup should have incremented the count"); - return test; - } - - // Create a new symbol. - Symbol* sym = allocate_symbol(name, len, c_heap, CHECK_NULL); - assert(sym->equals((char*)name, len), "symbol must be properly initialized"); - - HashtableEntry<Symbol*, mtSymbol>* entry = new_entry(hashValue, sym); - add_entry(index, entry); - return sym; -} - -// This version of basic_add adds symbols in batch from the constant pool -// parsing. -bool SymbolTable::basic_add(ClassLoaderData* loader_data, const constantPoolHandle& cp, - int names_count, - const char** names, int* lengths, - int* cp_indices, unsigned int* hashValues, - TRAPS) { - - // Check symbol names are not too long. If any are too long, don't add any. - for (int i = 0; i< names_count; i++) { - if (lengths[i] > Symbol::max_length()) { - THROW_MSG_0(vmSymbols::java_lang_InternalError(), - "name is too long to represent"); - } - } - - // Cannot hit a safepoint in this function because the "this" pointer can move. - NoSafepointVerifier nsv; - - for (int i=0; i<names_count; i++) { - // Check if the symbol table has been rehashed, if so, need to recalculate - // the hash value. - unsigned int hashValue; - if (use_alternate_hashcode()) { - hashValue = hash_symbol(names[i], lengths[i]); - } else { - hashValue = hashValues[i]; - } - // Since look-up was done lock-free, we need to check if another - // thread beat us in the race to insert the symbol. - int index = hash_to_index(hashValue); - Symbol* test = lookup(index, names[i], lengths[i], hashValue); - if (test != NULL) { - // A race occurred and another thread introduced the symbol, this one - // will be dropped and collected. Use test instead. - cp->symbol_at_put(cp_indices[i], test); - assert(test->refcount() != 0, "lookup should have incremented the count"); - } else { - // Create a new symbol. The null class loader is never unloaded so these - // are allocated specially in a permanent arena. - bool c_heap = !loader_data->is_the_null_class_loader_data(); - Symbol* sym = allocate_symbol((const u1*)names[i], lengths[i], c_heap, CHECK_(false)); - assert(sym->equals(names[i], lengths[i]), "symbol must be properly initialized"); // why wouldn't it be??? - HashtableEntry<Symbol*, mtSymbol>* entry = new_entry(hashValue, sym); - add_entry(index, entry); - cp->symbol_at_put(cp_indices[i], sym); - } - } - return true; -} - - -void SymbolTable::verify() { - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - Symbol* s = (Symbol*)(p->literal()); - guarantee(s != NULL, "symbol is NULL"); - unsigned int h = hash_symbol((char*)s->bytes(), s->utf8_length()); - guarantee(p->hash() == h, "broken hash in symbol table entry"); - guarantee(the_table()->hash_to_index(h) == i, - "wrong index in symbol table"); - } - } -} - -void SymbolTable::dump(outputStream* st, bool verbose) { - if (!verbose) { - the_table()->print_table_statistics(st, "SymbolTable"); - } else { - st->print_cr("VERSION: 1.0"); - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - Symbol* s = (Symbol*)(p->literal()); - const char* utf8_string = (const char*)s->bytes(); - int utf8_length = s->utf8_length(); - st->print("%d %d: ", utf8_length, s->refcount()); - HashtableTextDump::put_utf8(st, utf8_string, utf8_length); - st->cr(); - } - } - } +void SymbolTable::copy_shared_symbol_table(CompactSymbolTableWriter* writer) { + CopyToArchive copy(writer); + SymbolTable::the_table()->_local_table->do_scan(Thread::current(), copy); } void SymbolTable::write_to_archive() { -#if INCLUDE_CDS - _shared_table.reset(); + _shared_table.reset(); - int num_buckets = the_table()->number_of_entries() / - SharedSymbolTableBucketSize; - CompactSymbolTableWriter writer(num_buckets, - &MetaspaceShared::stats()->symbol); - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - Symbol* s = (Symbol*)(p->literal()); - unsigned int fixed_hash = hash_shared_symbol((char*)s->bytes(), s->utf8_length()); - assert(fixed_hash == p->hash(), "must not rehash during dumping"); - writer.add(fixed_hash, s); - } - } + int num_buckets = (int)(SymbolTable::the_table()->_items_count / SharedSymbolTableBucketSize); + // calculation of num_buckets can result in zero buckets, we need at least one + CompactSymbolTableWriter writer(num_buckets > 1 ? num_buckets : 1, + &MetaspaceShared::stats()->symbol); + copy_shared_symbol_table(&writer); + writer.dump(&_shared_table); - writer.dump(&_shared_table); - - // Verify table is correct - Symbol* sym = vmSymbols::java_lang_Object(); - const char* name = (const char*)sym->bytes(); - int len = sym->utf8_length(); - unsigned int hash = hash_symbol(name, len); - assert(sym == _shared_table.lookup(name, hash, len), "sanity"); -#endif + // Verify table is correct + Symbol* sym = vmSymbols::java_lang_Object(); + const char* name = (const char*)sym->bytes(); + int len = sym->utf8_length(); + unsigned int hash = hash_symbol(name, len, SymbolTable::_alt_hash); + assert(sym == _shared_table.lookup(name, hash, len), "sanity"); } void SymbolTable::serialize(SerializeClosure* soc) { -#if INCLUDE_CDS _shared_table.set_type(CompactHashtable<Symbol*, char>::_symbol_table); _shared_table.serialize(soc); @@ -626,7 +649,201 @@ // Sanity. Make sure we don't use the shared table at dump time _shared_table.reset(); } -#endif +} +#endif //INCLUDE_CDS + +// Concurrent work +void SymbolTable::grow(JavaThread* jt) { + SymbolTableHash::GrowTask gt(_local_table); + if (!gt.prepare(jt)) { + return; + } + log_trace(symboltable)("Started to grow"); + { + TraceTime timer("Grow", TRACETIME_LOG(Debug, symboltable, perf)); + while (gt.do_task(jt)) { + gt.pause(jt); + { + ThreadBlockInVM tbivm(jt); + } + gt.cont(jt); + } + } + gt.done(jt); + _current_size = table_size(); + log_debug(symboltable)("Grown to size:" SIZE_FORMAT, _current_size); +} + +struct SymbolTableDoDelete : StackObj { + int _deleted; + SymbolTableDoDelete() : _deleted(0) {} + void operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol *sym = *value; + assert(sym->refcount() == 0, "refcount"); + _deleted++; + } +}; + +struct SymbolTableDeleteCheck : StackObj { + int _processed; + SymbolTableDeleteCheck() : _processed(0) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + _processed++; + Symbol *sym = *value; + return (sym->refcount() == 0); + } +}; + +void SymbolTable::clean_dead_entries(JavaThread* jt) { + SymbolTableHash::BulkDeleteTask bdt(_local_table); + if (!bdt.prepare(jt)) { + return; + } + + SymbolTableDeleteCheck stdc; + SymbolTableDoDelete stdd; + { + TraceTime timer("Clean", TRACETIME_LOG(Debug, symboltable, perf)); + while (bdt.do_task(jt, stdc, stdd)) { + bdt.pause(jt); + { + ThreadBlockInVM tbivm(jt); + } + bdt.cont(jt); + } + SymbolTable::the_table()->set_item_clean_count(0); + bdt.done(jt); + } + + Atomic::add((size_t)stdc._processed, &_symbols_counted); + + log_debug(symboltable)("Cleaned " INT32_FORMAT " of " INT32_FORMAT, + stdd._deleted, stdc._processed); +} + +void SymbolTable::check_concurrent_work() { + if (_has_work) { + return; + } + double load_factor = SymbolTable::get_load_factor(); + double dead_factor = SymbolTable::get_dead_factor(); + // We should clean/resize if we have more dead than alive, + // more items than preferred load factor or + // more dead items than water mark. + if ((dead_factor > load_factor) || + (load_factor > PREF_AVG_LIST_LEN) || + (dead_factor > CLEAN_DEAD_HIGH_WATER_MARK)) { + log_debug(symboltable)("Concurrent work triggered, live factor:%f dead factor:%f", + load_factor, dead_factor); + trigger_concurrent_work(); + } +} + +void SymbolTable::concurrent_work(JavaThread* jt) { + double load_factor = get_load_factor(); + log_debug(symboltable, perf)("Concurrent work, live factor: %g", load_factor); + // We prefer growing, since that also removes dead items + if (load_factor > PREF_AVG_LIST_LEN && !_local_table->is_max_size_reached()) { + grow(jt); + } else { + clean_dead_entries(jt); + } + _has_work = false; +} + +class CountDead : StackObj { + int _count; +public: + CountDead() : _count(0) {} + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol* sym = *value; + if (sym->refcount() == 0) { + _count++; + } + return true; + }; + int get_dead_count() { + return _count; + } +}; + +void SymbolTable::do_check_concurrent_work() { + CountDead counter; + if (!SymbolTable::the_table()->_local_table->try_scan(Thread::current(), counter)) { + log_info(symboltable)("count dead unavailable at this moment"); + } else { + SymbolTable::the_table()->set_item_clean_count(counter.get_dead_count()); + SymbolTable::the_table()->check_concurrent_work(); + } +} + +void SymbolTable::do_concurrent_work(JavaThread* jt) { + SymbolTable::the_table()->concurrent_work(jt); +} + +// Rehash +bool SymbolTable::do_rehash() { + if (!_local_table->is_safepoint_safe()) { + return false; + } + + // We use max size + SymbolTableHash* new_table = new SymbolTableHash(END_SIZE, END_SIZE, REHASH_LEN); + // Use alt hash from now on + _alt_hash = true; + if (!_local_table->try_move_nodes_to(Thread::current(), new_table)) { + _alt_hash = false; + delete new_table; + return false; + } + + // free old table + delete _local_table; + _local_table = new_table; + + return true; +} + +void SymbolTable::try_rehash_table() { + static bool rehashed = false; + log_debug(symboltable)("Table imbalanced, rehashing called."); + + // Grow instead of rehash. + if (get_load_factor() > PREF_AVG_LIST_LEN && + !_local_table->is_max_size_reached()) { + log_debug(symboltable)("Choosing growing over rehashing."); + trigger_concurrent_work(); + _needs_rehashing = false; + return; + } + + // Already rehashed. + if (rehashed) { + log_warning(symboltable)("Rehashing already done, still long lists."); + trigger_concurrent_work(); + _needs_rehashing = false; + return; + } + + murmur_seed = AltHashing::compute_seed(); + + if (do_rehash()) { + rehashed = true; + } else { + log_info(symboltable)("Resizes in progress rehashing skipped."); + } + + _needs_rehashing = false; +} + +void SymbolTable::rehash_table() { + SymbolTable::the_table()->try_rehash_table(); } //--------------------------------------------------------------------------- @@ -634,89 +851,80 @@ #ifndef PRODUCT -void SymbolTable::print_histogram() { - MutexLocker ml(SymbolTable_lock); - const int results_length = 100; - int counts[results_length]; - int sizes[results_length]; - int i,j; - - // initialize results to zero - for (j = 0; j < results_length; j++) { - counts[j] = 0; - sizes[j] = 0; - } - - int total_size = 0; - int total_count = 0; - int total_length = 0; - int max_length = 0; - int out_of_range_count = 0; - int out_of_range_size = 0; - for (i = 0; i < the_table()->table_size(); i++) { - HashtableEntry<Symbol*, mtSymbol>* p = the_table()->bucket(i); - for ( ; p != NULL; p = p->next()) { - int size = p->literal()->size(); - int len = p->literal()->utf8_length(); - if (len < results_length) { - counts[len]++; - sizes[len] += size; - } else { - out_of_range_count++; - out_of_range_size += size; - } - total_count++; - total_size += size; - total_length += len; - max_length = MAX2(max_length, len); +class HistogramIterator : StackObj { +public: + static const size_t results_length = 100; + size_t counts[results_length]; + size_t sizes[results_length]; + size_t total_size; + size_t total_count; + size_t total_length; + size_t max_length; + size_t out_of_range_count; + size_t out_of_range_size; + HistogramIterator() : total_size(0), total_count(0), total_length(0), + max_length(0), out_of_range_count(0), out_of_range_size(0) { + // initialize results to zero + for (size_t i = 0; i < results_length; i++) { + counts[i] = 0; + sizes[i] = 0; } } + bool operator()(Symbol** value) { + assert(value != NULL, "expected valid value"); + assert(*value != NULL, "value should point to a symbol"); + Symbol* sym = *value; + size_t size = sym->size(); + size_t len = sym->utf8_length(); + if (len < results_length) { + counts[len]++; + sizes[len] += size; + } else { + out_of_range_count++; + out_of_range_size += size; + } + total_count++; + total_size += size; + total_length += len; + max_length = MAX2(max_length, len); + + return true; + }; +}; + +void SymbolTable::print_histogram() { + SymbolTable* st = SymbolTable::the_table(); + HistogramIterator hi; + st->_local_table->do_scan(Thread::current(), hi); tty->print_cr("Symbol Table Histogram:"); - tty->print_cr(" Total number of symbols %7d", total_count); - tty->print_cr(" Total size in memory %7dK", - (total_size*wordSize)/1024); - tty->print_cr(" Total counted %7d", _symbols_counted); - tty->print_cr(" Total removed %7d", _symbols_removed); - if (_symbols_counted > 0) { + tty->print_cr(" Total number of symbols " SIZE_FORMAT_W(7), hi.total_count); + tty->print_cr(" Total size in memory " SIZE_FORMAT_W(7) "K", + (hi.total_size * wordSize) / 1024); + tty->print_cr(" Total counted " SIZE_FORMAT_W(7), st->_symbols_counted); + tty->print_cr(" Total removed " SIZE_FORMAT_W(7), st->_symbols_removed); + if (SymbolTable::the_table()->_symbols_counted > 0) { tty->print_cr(" Percent removed %3.2f", - ((float)_symbols_removed/(float)_symbols_counted)* 100); + ((float)st->_symbols_removed / st->_symbols_counted) * 100); } - tty->print_cr(" Reference counts %7d", Symbol::_total_count); - tty->print_cr(" Symbol arena used " SIZE_FORMAT_W(7) "K", arena()->used()/1024); - tty->print_cr(" Symbol arena size " SIZE_FORMAT_W(7) "K", arena()->size_in_bytes()/1024); - tty->print_cr(" Total symbol length %7d", total_length); - tty->print_cr(" Maximum symbol length %7d", max_length); - tty->print_cr(" Average symbol length %7.2f", ((float) total_length / (float) total_count)); + tty->print_cr(" Reference counts " SIZE_FORMAT_W(7), Symbol::_total_count); + tty->print_cr(" Symbol arena used " SIZE_FORMAT_W(7) "K", arena()->used() / 1024); + tty->print_cr(" Symbol arena size " SIZE_FORMAT_W(7) "K", arena()->size_in_bytes() / 1024); + tty->print_cr(" Total symbol length " SIZE_FORMAT_W(7), hi.total_length); + tty->print_cr(" Maximum symbol length " SIZE_FORMAT_W(7), hi.max_length); + tty->print_cr(" Average symbol length %7.2f", ((float)hi.total_length / hi.total_count)); tty->print_cr(" Symbol length histogram:"); tty->print_cr(" %6s %10s %10s", "Length", "#Symbols", "Size"); - for (i = 0; i < results_length; i++) { - if (counts[i] > 0) { - tty->print_cr(" %6d %10d %10dK", i, counts[i], (sizes[i]*wordSize)/1024); + for (size_t i = 0; i < hi.results_length; i++) { + if (hi.counts[i] > 0) { + tty->print_cr(" " SIZE_FORMAT_W(6) " " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) "K", + i, hi.counts[i], (hi.sizes[i] * wordSize) / 1024); } } - tty->print_cr(" >=%6d %10d %10dK\n", results_length, - out_of_range_count, (out_of_range_size*wordSize)/1024); -} - -void SymbolTable::print() { - for (int i = 0; i < the_table()->table_size(); ++i) { - HashtableEntry<Symbol*, mtSymbol>** p = the_table()->bucket_addr(i); - HashtableEntry<Symbol*, mtSymbol>* entry = the_table()->bucket(i); - if (entry != NULL) { - while (entry != NULL) { - tty->print(PTR_FORMAT " ", p2i(entry->literal())); - entry->literal()->print(); - tty->print(" %d", entry->literal()->refcount()); - p = entry->next_addr(); - entry = (HashtableEntry<Symbol*, mtSymbol>*)HashtableEntry<Symbol*, mtSymbol>::make_ptr(*p); - } - tty->cr(); - } - } + tty->print_cr(" >=" SIZE_FORMAT_W(6) " " SIZE_FORMAT_W(10) " " SIZE_FORMAT_W(10) "K\n", + hi.results_length, hi.out_of_range_count, (hi.out_of_range_size*wordSize) / 1024); } #endif // PRODUCT - // Utility for dumping symbols SymboltableDCmd::SymboltableDCmd(outputStream* output, bool heap) : DCmdWithParser(output, heap),
--- a/src/hotspot/share/classfile/symbolTable.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/symbolTable.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,23 +26,11 @@ #define SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP #include "memory/allocation.hpp" +#include "memory/padded.hpp" #include "oops/symbol.hpp" +#include "utilities/concurrentHashTable.hpp" #include "utilities/hashtable.hpp" -// The symbol table holds all Symbol*s and corresponding interned strings. -// Symbol*s and literal strings should be canonicalized. -// -// The interned strings are created lazily. -// -// It is implemented as an open hash table with a fixed number of buckets. -// -// %note: -// - symbolTableEntrys are allocated in blocks to reduce the space overhead. - -class BoolObjectClosure; -class outputStream; -class SerializeClosure; - // TempNewSymbol acts as a handle class in a handle/body idiom and is // responsible for proper resource management of the body (which is a Symbol*). // The body is resource managed by a reference counting scheme. @@ -59,7 +47,7 @@ class TempNewSymbol : public StackObj { Symbol* _temp; - public: +public: TempNewSymbol() : _temp(NULL) {} // Conversion from a Symbol* to a TempNewSymbol. @@ -97,35 +85,69 @@ }; template <class T, class N> class CompactHashtable; +class CompactSymbolTableWriter; +class SerializeClosure; -class SymbolTable : public RehashableHashtable<Symbol*, mtSymbol> { +class SymbolTableConfig; +typedef ConcurrentHashTable<Symbol*, + SymbolTableConfig, mtSymbol> SymbolTableHash; + +class SymbolTableCreateEntry; + +class SymbolTable : public CHeapObj<mtSymbol> { friend class VMStructs; + friend class Symbol; friend class ClassFileParser; + friend class SymbolTableConfig; + friend class SymbolTableCreateEntry; private: + static void delete_symbol(Symbol* sym); + void grow(JavaThread* jt); + void clean_dead_entries(JavaThread* jt); + // The symbol table static SymbolTable* _the_table; - - // Set if one bucket is out of balance due to hash algorithm deficiency - static bool _needs_rehashing; - static bool _lookup_shared_first; + // Shared symbol table. + static CompactHashtable<Symbol*, char> _shared_table; + static volatile bool _lookup_shared_first; + static volatile bool _alt_hash; // For statistics - static int _symbols_removed; - static int _symbols_counted; + volatile size_t _symbols_removed; + volatile size_t _symbols_counted; - // shared symbol table. - static CompactHashtable<Symbol*, char> _shared_table; + SymbolTableHash* _local_table; + size_t _current_size; + volatile bool _has_work; + // Set if one bucket is out of balance due to hash algorithm deficiency + volatile bool _needs_rehashing; - Symbol* allocate_symbol(const u1* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F + volatile size_t _items_count; + volatile size_t _uncleaned_items_count; + + double get_load_factor(); + double get_dead_factor(); + + void check_concurrent_work(); + void trigger_concurrent_work(); + + static void item_added(); + static void item_removed(); + static void set_item_clean_count(size_t ncl); + static void mark_item_clean_count(); + + SymbolTable(); + + Symbol* allocate_symbol(const char* name, int len, bool c_heap, TRAPS); // Assumes no characters larger than 0x7F + Symbol* do_lookup(const char* name, int len, uintx hash); + Symbol* do_add_if_needed(const char* name, int len, uintx hash, bool heap, TRAPS); // Adding elements - Symbol* basic_add(int index, u1* name, int len, unsigned int hashValue, - bool c_heap, TRAPS); - bool basic_add(ClassLoaderData* loader_data, - const constantPoolHandle& cp, int names_count, - const char** names, int* lengths, int* cp_indices, - unsigned int* hashValues, TRAPS); + static void add(ClassLoaderData* loader_data, + const constantPoolHandle& cp, int names_count, + const char** names, int* lengths, int* cp_indices, + unsigned int* hashValues, TRAPS); static void new_symbols(ClassLoaderData* loader_data, const constantPoolHandle& cp, int names_count, @@ -136,15 +158,8 @@ } static Symbol* lookup_shared(const char* name, int len, unsigned int hash); - Symbol* lookup_dynamic(int index, const char* name, int len, unsigned int hash); - Symbol* lookup(int index, const char* name, int len, unsigned int hash); - - SymbolTable() - : RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {} - - SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries) - : RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t, - number_of_entries) {} + Symbol* lookup_dynamic(const char* name, int len, unsigned int hash); + Symbol* lookup_common(const char* name, int len, unsigned int hash); // Arena for permanent symbols (null class loader) that are never unloaded static Arena* _arena; @@ -152,88 +167,45 @@ static void initialize_symbols(int arena_alloc_size = 0); - static volatile int _parallel_claimed_idx; + void concurrent_work(JavaThread* jt); + void print_table_statistics(outputStream* st, const char* table_name); - typedef SymbolTable::BucketUnlinkContext BucketUnlinkContext; - // Release any dead symbols. Unlinked bucket entries are collected in the given - // context to be freed later. - // This allows multiple threads to work on the table at once. - static void buckets_unlink(int start_idx, int end_idx, BucketUnlinkContext* context); + void try_rehash_table(); + bool do_rehash(); + public: + // The symbol table + static SymbolTable* the_table() { return _the_table; } + size_t table_size(); + enum { symbol_alloc_batch_size = 8, // Pick initial size based on java -version size measurements - symbol_alloc_arena_size = 360*K + symbol_alloc_arena_size = 360*K // TODO (revisit) }; - // The symbol table - static SymbolTable* the_table() { return _the_table; } - - // Size of one bucket in the string table. Used when checking for rollover. - static uint bucket_size() { return sizeof(HashtableBucket<mtSymbol>); } - static void create_table() { assert(_the_table == NULL, "One symbol table allowed."); _the_table = new SymbolTable(); initialize_symbols(symbol_alloc_arena_size); } - static unsigned int hash_symbol(const char* s, int len); - static unsigned int hash_shared_symbol(const char* s, int len); + static void unlink() { + do_check_concurrent_work(); + } + static void do_check_concurrent_work(); + static void do_concurrent_work(JavaThread* jt); + static bool has_work() { return the_table()->_has_work; } + // Probing static Symbol* lookup(const char* name, int len, TRAPS); // lookup only, won't add. Also calculate hash. static Symbol* lookup_only(const char* name, int len, unsigned int& hash); - // Only copy to C string to be added if lookup failed. + // adds new symbol if not found static Symbol* lookup(const Symbol* sym, int begin, int end, TRAPS); - - static void release(Symbol* sym); - - // Look up the address of the literal in the SymbolTable for this Symbol* - static Symbol** lookup_symbol_addr(Symbol* sym); - // jchar (UTF16) version of lookups static Symbol* lookup_unicode(const jchar* name, int len, TRAPS); static Symbol* lookup_only_unicode(const jchar* name, int len, unsigned int& hash); - - static void add(ClassLoaderData* loader_data, - const constantPoolHandle& cp, int names_count, - const char** names, int* lengths, int* cp_indices, - unsigned int* hashValues, TRAPS); - - // Release any dead symbols - static void unlink() { - int processed = 0; - int removed = 0; - unlink(&processed, &removed); - } - static void unlink(int* processed, int* removed); - // Release any dead symbols, possibly parallel version - static void possibly_parallel_unlink(int* processed, int* removed); - - // iterate over symbols - static void symbols_do(SymbolClosure *cl); - static void metaspace_pointers_do(MetaspaceClosure* it); - - // Symbol creation - static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) { - assert(utf8_buffer != NULL, "just checking"); - return lookup(utf8_buffer, length, THREAD); - } - static Symbol* new_symbol(const char* name, TRAPS) { - return new_symbol(name, (int)strlen(name), THREAD); - } - static Symbol* new_symbol(const Symbol* sym, int begin, int end, TRAPS) { - assert(begin <= end && end <= sym->utf8_length(), "just checking"); - return lookup(sym, begin, end, THREAD); - } - - // Create a symbol in the arena for symbols that are not deleted - static Symbol* new_permanent_symbol(const char* name, TRAPS); - - // Symbol lookup - static Symbol* lookup(int index, const char* name, int len, TRAPS); - // Needed for preloading classes in signatures when compiling. // Returns the symbol is already present in symbol table, otherwise // NULL. NO ALLOCATION IS GUARANTEED! @@ -246,27 +218,45 @@ return lookup_only_unicode(name, len, ignore_hash); } - // Histogram - static void print_histogram() PRODUCT_RETURN; - static void print() PRODUCT_RETURN; + // Symbol creation + static Symbol* new_symbol(const char* utf8_buffer, int length, TRAPS) { + assert(utf8_buffer != NULL, "just checking"); + return lookup(utf8_buffer, length, THREAD); + } + static Symbol* new_symbol(const char* name, TRAPS) { + return new_symbol(name, (int)strlen(name), THREAD); + } + static Symbol* new_symbol(const Symbol* sym, int begin, int end, TRAPS) { + assert(begin <= end && end <= sym->utf8_length(), "just checking"); + return lookup(sym, begin, end, THREAD); + } + // Create a symbol in the arena for symbols that are not deleted + static Symbol* new_permanent_symbol(const char* name, TRAPS); + // Rehash the string table if it gets out of balance + static void rehash_table(); + static bool needs_rehashing() + { return SymbolTable::the_table()->_needs_rehashing; } + + // Heap dumper and CDS + static void symbols_do(SymbolClosure *cl); + + // Sharing +private: + static void copy_shared_symbol_table(CompactSymbolTableWriter* ch_table); +public: + static void write_to_archive() NOT_CDS_RETURN; + static void serialize(SerializeClosure* soc) NOT_CDS_RETURN; + static void metaspace_pointers_do(MetaspaceClosure* it); + + // Jcmd + static void dump(outputStream* st, bool verbose=false); // Debugging static void verify(); - static void dump(outputStream* st, bool verbose=false); static void read(const char* filename, TRAPS); - // Sharing - static void write_to_archive(); - static void serialize(SerializeClosure* soc); - static u4 encode_shared(Symbol* sym); - static Symbol* decode_shared(u4 offset); - - // Rehash the symbol table if it gets out of balance - static void rehash_table(); - static bool needs_rehashing() { return _needs_rehashing; } - // Parallel chunked scanning - static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; } - static int parallel_claimed_index() { return _parallel_claimed_idx; } + // Histogram + static void print_histogram() PRODUCT_RETURN; }; #endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
--- a/src/hotspot/share/classfile/systemDictionary.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/systemDictionary.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1853,12 +1853,24 @@ // First, mark for unload all ClassLoaderData referencing a dead class loader. unloading_occurred = ClassLoaderDataGraph::do_unloading(do_cleaning); + if (unloading_occurred) { + ClassLoaderDataGraph::clean_module_and_package_info(); + } } + // TODO: just return if !unloading_occurred. if (unloading_occurred) { - GCTraceTime(Debug, gc, phases) t("Dictionary", gc_timer); - constraints()->purge_loader_constraints(); - resolution_errors()->purge_resolution_errors(); + { + GCTraceTime(Debug, gc, phases) t("SymbolTable", gc_timer); + // Check if there's work to do in the SymbolTable + SymbolTable::do_check_concurrent_work(); + } + + { + GCTraceTime(Debug, gc, phases) t("Dictionary", gc_timer); + constraints()->purge_loader_constraints(); + resolution_errors()->purge_resolution_errors(); + } } {
--- a/src/hotspot/share/classfile/systemDictionary.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/systemDictionary.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -187,6 +187,10 @@ do_klass(jdk_internal_loader_ClassLoaders_AppClassLoader_klass, jdk_internal_loader_ClassLoaders_AppClassLoader, Pre ) \ do_klass(jdk_internal_loader_ClassLoaders_PlatformClassLoader_klass, jdk_internal_loader_ClassLoaders_PlatformClassLoader, Pre ) \ do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \ + do_klass(Configuration_klass, java_lang_module_Configuration, Pre ) \ + do_klass(ImmutableCollections_ListN_klass, java_util_ImmutableCollections_ListN, Pre ) \ + do_klass(ImmutableCollections_MapN_klass, java_util_ImmutableCollections_MapN, Pre ) \ + do_klass(ImmutableCollections_SetN_klass, java_util_ImmutableCollections_SetN, Pre ) \ do_klass(ArchivedModuleGraph_klass, jdk_internal_module_ArchivedModuleGraph, Pre ) \ \ do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \ @@ -211,6 +215,7 @@ do_klass(Byte_klass, java_lang_Byte, Pre ) \ do_klass(Short_klass, java_lang_Short, Pre ) \ do_klass(Integer_klass, java_lang_Integer, Pre ) \ + do_klass(Integer_IntegerCache_klass, java_lang_Integer_IntegerCache, Pre ) \ do_klass(Long_klass, java_lang_Long, Pre ) \ \ /* JVMCI classes. These are loaded on-demand. */ \
--- a/src/hotspot/share/classfile/vmSymbols.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/classfile/vmSymbols.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -438,6 +438,7 @@ template(fileToEncodedURL_signature, "(Ljava/io/File;)Ljava/net/URL;") \ template(getProtectionDomain_name, "getProtectionDomain") \ template(getProtectionDomain_signature, "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \ + template(java_lang_Integer_array_signature, "[Ljava/lang/Integer;") \ template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \ template(module_entry_name, "module_entry") \ template(resolved_references_name, "<resolved_references>") \ @@ -648,13 +649,20 @@ JFR_TEMPLATES(template) \ \ /* cds */ \ + template(configuration_signature, "Ljava/lang/module/Configuration;") \ + template(java_lang_module_Configuration, "java/lang/module/Configuration") \ + template(java_util_ImmutableCollections_ListN, "java/util/ImmutableCollections$ListN") \ + template(java_util_ImmutableCollections_MapN, "java/util/ImmutableCollections$MapN") \ + template(java_util_ImmutableCollections_SetN, "java/util/ImmutableCollections$SetN") \ template(jdk_internal_loader_ClassLoaders, "jdk/internal/loader/ClassLoaders") \ - template(jdk_vm_cds_SharedClassInfo, "jdk/vm/cds/SharedClassInfo") \ - template(url_void_signature, "(Ljava/net/URL;)V") \ + template(list_signature, "Ljava/util/List;") \ + template(map_signature, "Ljava/util/Map;") \ + template(moduleFinder_signature, "Ljava/lang/module/ModuleFinder;") \ + template(set_signature, "Ljava/util/Set;") \ + template(systemModules_signature, "Ljdk/internal/module/SystemModules;") \ template(toFileURL_name, "toFileURL") \ template(toFileURL_signature, "(Ljava/lang/String;)Ljava/net/URL;") \ - template(moduleFinder_signature, "Ljava/lang/module/ModuleFinder;") \ - template(systemModules_signature, "Ljdk/internal/module/SystemModules;") \ + template(url_void_signature, "(Ljava/net/URL;)V") \ \ /*end*/
--- a/src/hotspot/share/compiler/compileBroker.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/compiler/compileBroker.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1777,12 +1777,6 @@ possibly_add_compiler_threads(); } - // Give compiler threads an extra quanta. They tend to be bursty and - // this helps the compiler to finish up the job. - if (CompilerThreadHintNoPreempt) { - os::hint_no_preempt(); - } - // Assign the task to the current thread. Mark this compilation // thread as active for the profiler. CompileTaskWrapper ctw(task);
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -48,27 +48,30 @@ class G1BarrierSetC1; class G1BarrierSetC2; -SATBMarkQueueSet G1BarrierSet::_satb_mark_queue_set; -DirtyCardQueueSet G1BarrierSet::_dirty_card_queue_set; - G1BarrierSet::G1BarrierSet(G1CardTable* card_table) : CardTableBarrierSet(make_barrier_set_assembler<G1BarrierSetAssembler>(), make_barrier_set_c1<G1BarrierSetC1>(), make_barrier_set_c2<G1BarrierSetC2>(), card_table, - BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)) {} + BarrierSet::FakeRtti(BarrierSet::G1BarrierSet)), + _satb_mark_queue_set(), + _dirty_card_queue_set() +{} void G1BarrierSet::enqueue(oop pre_val) { // Nulls should have been already filtered. assert(oopDesc::is_oop(pre_val, true), "Error"); - if (!_satb_mark_queue_set.is_active()) return; + G1SATBMarkQueueSet& queue_set = satb_mark_queue_set(); + if (!queue_set.is_active()) { + return; + } Thread* thr = Thread::current(); if (thr->is_Java_thread()) { G1ThreadLocalData::satb_mark_queue(thr).enqueue(pre_val); } else { MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag); - _satb_mark_queue_set.shared_satb_queue()->enqueue(pre_val); + queue_set.shared_satb_queue()->enqueue(pre_val); } }
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -26,7 +26,7 @@ #define SHARE_VM_GC_G1_G1BARRIERSET_HPP #include "gc/g1/dirtyCardQueue.hpp" -#include "gc/g1/satbMarkQueue.hpp" +#include "gc/g1/g1SATBMarkQueueSet.hpp" #include "gc/shared/cardTableBarrierSet.hpp" class DirtyCardQueueSet; @@ -39,8 +39,12 @@ class G1BarrierSet: public CardTableBarrierSet { friend class VMStructs; private: - static SATBMarkQueueSet _satb_mark_queue_set; - static DirtyCardQueueSet _dirty_card_queue_set; + G1SATBMarkQueueSet _satb_mark_queue_set; + DirtyCardQueueSet _dirty_card_queue_set; + + static G1BarrierSet* g1_barrier_set() { + return barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set()); + } public: G1BarrierSet(G1CardTable* table); @@ -75,12 +79,12 @@ virtual void on_thread_attach(JavaThread* thread); virtual void on_thread_detach(JavaThread* thread); - static SATBMarkQueueSet& satb_mark_queue_set() { - return _satb_mark_queue_set; + static G1SATBMarkQueueSet& satb_mark_queue_set() { + return g1_barrier_set()->_satb_mark_queue_set; } static DirtyCardQueueSet& dirty_card_queue_set() { - return _dirty_card_queue_set; + return g1_barrier_set()->_dirty_card_queue_set; } // Callbacks for runtime accesses.
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -25,7 +25,6 @@ #include "precompiled.hpp" #include "classfile/metadataOnStackMark.hpp" #include "classfile/stringTable.hpp" -#include "classfile/symbolTable.hpp" #include "code/codeCache.hpp" #include "code/icBuffer.hpp" #include "gc/g1/g1Allocator.inline.hpp" @@ -52,7 +51,7 @@ #include "gc/g1/g1RemSet.hpp" #include "gc/g1/g1RootClosures.hpp" #include "gc/g1/g1RootProcessor.hpp" -#include "gc/g1/g1SATBMarkQueueFilter.hpp" +#include "gc/g1/g1SATBMarkQueueSet.hpp" #include "gc/g1/g1StringDedup.hpp" #include "gc/g1/g1ThreadLocalData.hpp" #include "gc/g1/g1YCTypes.hpp" @@ -1687,11 +1686,11 @@ // Perform any initialization actions delegated to the policy. g1_policy()->init(this, &_collection_set); - G1SATBMarkQueueFilter* satb_filter = new G1SATBMarkQueueFilter(this); - G1BarrierSet::satb_mark_queue_set().initialize(satb_filter, + G1BarrierSet::satb_mark_queue_set().initialize(this, SATB_Q_CBL_mon, SATB_Q_FL_lock, G1SATBProcessCompletedThreshold, + G1SATBBufferEnqueueingThresholdPercent, Shared_SATB_Q_lock); jint ecode = initialize_concurrent_refinement(); @@ -3256,56 +3255,40 @@ undo_waste * HeapWordSize / K); } -class G1StringAndSymbolCleaningTask : public AbstractGangTask { +class G1StringCleaningTask : public AbstractGangTask { private: BoolObjectClosure* _is_alive; G1StringDedupUnlinkOrOopsDoClosure _dedup_closure; OopStorage::ParState<false /* concurrent */, false /* const */> _par_state_string; int _initial_string_table_size; - int _initial_symbol_table_size; bool _process_strings; int _strings_processed; int _strings_removed; - bool _process_symbols; - int _symbols_processed; - int _symbols_removed; - bool _process_string_dedup; public: - G1StringAndSymbolCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_symbols, bool process_string_dedup) : - AbstractGangTask("String/Symbol Unlinking"), + G1StringCleaningTask(BoolObjectClosure* is_alive, bool process_strings, bool process_string_dedup) : + AbstractGangTask("String Unlinking"), _is_alive(is_alive), _dedup_closure(is_alive, NULL, false), _par_state_string(StringTable::weak_storage()), _process_strings(process_strings), _strings_processed(0), _strings_removed(0), - _process_symbols(process_symbols), _symbols_processed(0), _symbols_removed(0), _process_string_dedup(process_string_dedup) { _initial_string_table_size = (int) StringTable::the_table()->table_size(); - _initial_symbol_table_size = SymbolTable::the_table()->table_size(); - if (process_symbols) { - SymbolTable::clear_parallel_claimed_index(); - } if (process_strings) { StringTable::reset_dead_counter(); } } - ~G1StringAndSymbolCleaningTask() { - guarantee(!_process_symbols || SymbolTable::parallel_claimed_index() >= _initial_symbol_table_size, - "claim value %d after unlink less than initial symbol table size %d", - SymbolTable::parallel_claimed_index(), _initial_symbol_table_size); - + ~G1StringCleaningTask() { log_info(gc, stringtable)( - "Cleaned string and symbol table, " - "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed, " - "symbols: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", - strings_processed(), strings_removed(), - symbols_processed(), symbols_removed()); + "Cleaned string table, " + "strings: " SIZE_FORMAT " processed, " SIZE_FORMAT " removed", + strings_processed(), strings_removed()); if (_process_strings) { StringTable::finish_dead_counter(); } @@ -3314,18 +3297,11 @@ void work(uint worker_id) { int strings_processed = 0; int strings_removed = 0; - int symbols_processed = 0; - int symbols_removed = 0; if (_process_strings) { StringTable::possibly_parallel_unlink(&_par_state_string, _is_alive, &strings_processed, &strings_removed); Atomic::add(strings_processed, &_strings_processed); Atomic::add(strings_removed, &_strings_removed); } - if (_process_symbols) { - SymbolTable::possibly_parallel_unlink(&symbols_processed, &symbols_removed); - Atomic::add(symbols_processed, &_symbols_processed); - Atomic::add(symbols_removed, &_symbols_removed); - } if (_process_string_dedup) { G1StringDedup::parallel_unlink(&_dedup_closure, worker_id); } @@ -3333,9 +3309,6 @@ size_t strings_processed() const { return (size_t)_strings_processed; } size_t strings_removed() const { return (size_t)_strings_removed; } - - size_t symbols_processed() const { return (size_t)_symbols_processed; } - size_t symbols_removed() const { return (size_t)_symbols_removed; } }; class G1CodeCacheUnloadingTask { @@ -3585,7 +3558,7 @@ class G1ParallelCleaningTask : public AbstractGangTask { private: bool _unloading_occurred; - G1StringAndSymbolCleaningTask _string_symbol_task; + G1StringCleaningTask _string_task; G1CodeCacheUnloadingTask _code_cache_task; G1KlassCleaningTask _klass_cleaning_task; G1ResolvedMethodCleaningTask _resolved_method_cleaning_task; @@ -3595,7 +3568,7 @@ G1ParallelCleaningTask(BoolObjectClosure* is_alive, uint num_workers, bool unloading_occurred) : AbstractGangTask("Parallel Cleaning"), _unloading_occurred(unloading_occurred), - _string_symbol_task(is_alive, true, true, G1StringDedup::is_enabled()), + _string_task(is_alive, true, G1StringDedup::is_enabled()), _code_cache_task(num_workers, is_alive, unloading_occurred), _klass_cleaning_task(), _resolved_method_cleaning_task() { @@ -3609,8 +3582,8 @@ // Let the threads mark that the first pass is done. _code_cache_task.barrier_mark(worker_id); - // Clean the Strings and Symbols. - _string_symbol_task.work(worker_id); + // Clean the Strings. + _string_task.work(worker_id); // Clean unreferenced things in the ResolvedMethodTable _resolved_method_cleaning_task.work(); @@ -3642,16 +3615,14 @@ void G1CollectedHeap::partial_cleaning(BoolObjectClosure* is_alive, bool process_strings, - bool process_symbols, bool process_string_dedup) { - if (!process_strings && !process_symbols && !process_string_dedup) { + if (!process_strings && !process_string_dedup) { // Nothing to clean. return; } - G1StringAndSymbolCleaningTask g1_unlink_task(is_alive, process_strings, process_symbols, process_string_dedup); + G1StringCleaningTask g1_unlink_task(is_alive, process_strings, process_string_dedup); workers()->run_task(&g1_unlink_task); - } class G1RedirtyLoggedCardsTask : public AbstractGangTask { @@ -4045,7 +4016,7 @@ process_discovered_references(per_thread_states); // FIXME - // CM's reference processing also cleans up the string and symbol tables. + // CM's reference processing also cleans up the string table. // Should we do that here also? We could, but it is a serial operation // and could significantly increase the pause time.
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -1324,9 +1324,8 @@ // Partial cleaning used when class unloading is disabled. // Let the caller choose what structures to clean out: // - StringTable - // - SymbolTable // - StringDeduplication structures - void partial_cleaning(BoolObjectClosure* is_alive, bool unlink_strings, bool unlink_symbols, bool unlink_string_dedup); + void partial_cleaning(BoolObjectClosure* is_alive, bool unlink_strings, bool unlink_string_dedup); // Complete cleaning used when class unloading is enabled. // Cleans out all structures handled by partial_cleaning and also the CodeCache.
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "classfile/metadataOnStackMark.hpp" -#include "classfile/symbolTable.hpp" #include "code/codeCache.hpp" #include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" @@ -1578,8 +1577,8 @@ // Is alive closure. G1CMIsAliveClosure g1_is_alive(_g1h); - // Inner scope to exclude the cleaning of the string and symbol - // tables from the displayed time. + // Inner scope to exclude the cleaning of the string table + // from the displayed time. { GCTraceTime(Debug, gc, phases) debug("Reference Processing", _gc_timer_cm); @@ -1673,16 +1672,16 @@ WeakProcessor::weak_oops_do(&g1_is_alive, &do_nothing_cl); } - // Unload Klasses, String, Symbols, Code Cache, etc. + // Unload Klasses, String, Code Cache, etc. if (ClassUnloadingWithConcurrentMark) { GCTraceTime(Debug, gc, phases) debug("Class Unloading", _gc_timer_cm); bool purged_classes = SystemDictionary::do_unloading(_gc_timer_cm, false /* Defer cleaning */); _g1h->complete_cleaning(&g1_is_alive, purged_classes); } else { GCTraceTime(Debug, gc, phases) debug("Cleanup", _gc_timer_cm); - // No need to clean string table and symbol table as they are treated as strong roots when + // No need to clean string table as it is treated as strong roots when // class unloading is disabled. - _g1h->partial_cleaning(&g1_is_alive, false, false, G1StringDedup::is_enabled()); + _g1h->partial_cleaning(&g1_is_alive, false, G1StringDedup::is_enabled()); } }
--- a/src/hotspot/share/gc/g1/g1FullCollector.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/g1/g1FullCollector.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -226,8 +226,8 @@ _heap->complete_cleaning(&_is_alive, purged_class); } else { GCTraceTime(Debug, gc, phases) debug("Phase 1: String and Symbol Tables Cleanup", scope()->timer()); - // If no class unloading just clean out strings and symbols. - _heap->partial_cleaning(&_is_alive, true, true, G1StringDedup::is_enabled()); + // If no class unloading just clean out strings. + _heap->partial_cleaning(&_is_alive, true, G1StringDedup::is_enabled()); } scope()->tracer()->report_object_count_after_gc(&_is_alive);
--- a/src/hotspot/share/gc/g1/g1SATBMarkQueueFilter.cpp Thu Aug 09 22:06:11 2018 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,107 +0,0 @@ -/* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/g1SATBMarkQueueFilter.hpp" -#include "gc/g1/heapRegion.hpp" -#include "gc/g1/satbMarkQueue.hpp" -#include "oops/oop.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" - -G1SATBMarkQueueFilter::G1SATBMarkQueueFilter(G1CollectedHeap* g1h) : _g1h(g1h) {} - -// Return true if a SATB buffer entry refers to an object that -// requires marking. -// -// The entry must point into the G1 heap. In particular, it must not -// be a NULL pointer. NULL pointers are pre-filtered and never -// inserted into a SATB buffer. -// -// An entry that is below the NTAMS pointer for the containing heap -// region requires marking. Such an entry must point to a valid object. -// -// An entry that is at least the NTAMS pointer for the containing heap -// region might be any of the following, none of which should be marked. -// -// * A reference to an object allocated since marking started. -// According to SATB, such objects are implicitly kept live and do -// not need to be dealt with via SATB buffer processing. -// -// * A reference to a young generation object. Young objects are -// handled separately and are not marked by concurrent marking. -// -// * A stale reference to a young generation object. If a young -// generation object reference is recorded and not filtered out -// before being moved by a young collection, the reference becomes -// stale. -// -// * A stale reference to an eagerly reclaimed humongous object. If a -// humongous object is recorded and then reclaimed, the reference -// becomes stale. -// -// The stale reference cases are implicitly handled by the NTAMS -// comparison. Because of the possibility of stale references, buffer -// processing must be somewhat circumspect and not assume entries -// in an unfiltered buffer refer to valid objects. - -static inline bool requires_marking(const void* entry, G1CollectedHeap* g1h) { - // Includes rejection of NULL pointers. - assert(g1h->is_in_reserved(entry), - "Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)); - - HeapRegion* region = g1h->heap_region_containing(entry); - assert(region != NULL, "No region for " PTR_FORMAT, p2i(entry)); - if (entry >= region->next_top_at_mark_start()) { - return false; - } - - assert(oopDesc::is_oop(oop(entry), true /* ignore mark word */), - "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry)); - - return true; -} - -static inline bool discard_entry(const void* entry, G1CollectedHeap* g1h) { - return !requires_marking(entry, g1h) || g1h->is_marked_next((oop)entry); -} - -// Workaround for not yet having std::bind. -class G1SATBMarkQueueFilterFn { - G1CollectedHeap* _g1h; - -public: - G1SATBMarkQueueFilterFn(G1CollectedHeap* g1h) : _g1h(g1h) {} - - // Return true if entry should be filtered out (removed), false if - // it should be retained. - bool operator()(const void* entry) const { - return discard_entry(entry, _g1h); - } -}; - -void G1SATBMarkQueueFilter::filter(SATBMarkQueue* queue) { - queue->apply_filter(G1SATBMarkQueueFilterFn(_g1h)); -}
--- a/src/hotspot/share/gc/g1/g1SATBMarkQueueFilter.hpp Thu Aug 09 22:06:11 2018 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,41 +0,0 @@ -/* - * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_GC_G1_G1SATBMARKQUEUEFILTER_HPP -#define SHARE_GC_G1_G1SATBMARKQUEUEFILTER_HPP - -#include "gc/g1/satbMarkQueue.hpp" - -class G1CollectedHeap; - -class G1SATBMarkQueueFilter : public SATBMarkQueueFilter { - G1CollectedHeap* _g1h; - -public: - G1SATBMarkQueueFilter(G1CollectedHeap* g1h); - - virtual void filter(SATBMarkQueue* queue); -}; - -#endif // SHARE_GC_G1_G1SATBMARKQUEUEFILTER_HPP
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc/g1/g1CollectedHeap.inline.hpp" +#include "gc/g1/g1SATBMarkQueueSet.hpp" +#include "gc/g1/g1ThreadLocalData.hpp" +#include "gc/g1/heapRegion.hpp" +#include "gc/g1/satbMarkQueue.hpp" +#include "oops/oop.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +G1SATBMarkQueueSet::G1SATBMarkQueueSet() : _g1h(NULL) {} + +void G1SATBMarkQueueSet::initialize(G1CollectedHeap* g1h, + Monitor* cbl_mon, Mutex* fl_lock, + int process_completed_threshold, + uint buffer_enqueue_threshold_percentage, + Mutex* lock) { + SATBMarkQueueSet::initialize(cbl_mon, fl_lock, + process_completed_threshold, + buffer_enqueue_threshold_percentage, + lock); + _g1h = g1h; +} + +void G1SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) { + G1ThreadLocalData::satb_mark_queue(t).handle_zero_index(); +} + +SATBMarkQueue& G1SATBMarkQueueSet::satb_queue_for_thread(JavaThread* const t) const{ + return G1ThreadLocalData::satb_mark_queue(t); +} + +// Return true if a SATB buffer entry refers to an object that +// requires marking. +// +// The entry must point into the G1 heap. In particular, it must not +// be a NULL pointer. NULL pointers are pre-filtered and never +// inserted into a SATB buffer. +// +// An entry that is below the NTAMS pointer for the containing heap +// region requires marking. Such an entry must point to a valid object. +// +// An entry that is at least the NTAMS pointer for the containing heap +// region might be any of the following, none of which should be marked. +// +// * A reference to an object allocated since marking started. +// According to SATB, such objects are implicitly kept live and do +// not need to be dealt with via SATB buffer processing. +// +// * A reference to a young generation object. Young objects are +// handled separately and are not marked by concurrent marking. +// +// * A stale reference to a young generation object. If a young +// generation object reference is recorded and not filtered out +// before being moved by a young collection, the reference becomes +// stale. +// +// * A stale reference to an eagerly reclaimed humongous object. If a +// humongous object is recorded and then reclaimed, the reference +// becomes stale. +// +// The stale reference cases are implicitly handled by the NTAMS +// comparison. Because of the possibility of stale references, buffer +// processing must be somewhat circumspect and not assume entries +// in an unfiltered buffer refer to valid objects. + +static inline bool requires_marking(const void* entry, G1CollectedHeap* g1h) { + // Includes rejection of NULL pointers. + assert(g1h->is_in_reserved(entry), + "Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)); + + HeapRegion* region = g1h->heap_region_containing(entry); + assert(region != NULL, "No region for " PTR_FORMAT, p2i(entry)); + if (entry >= region->next_top_at_mark_start()) { + return false; + } + + assert(oopDesc::is_oop(oop(entry), true /* ignore mark word */), + "Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry)); + + return true; +} + +static inline bool discard_entry(const void* entry, G1CollectedHeap* g1h) { + return !requires_marking(entry, g1h) || g1h->is_marked_next((oop)entry); +} + +// Workaround for not yet having std::bind. +class G1SATBMarkQueueFilterFn { + G1CollectedHeap* _g1h; + +public: + G1SATBMarkQueueFilterFn(G1CollectedHeap* g1h) : _g1h(g1h) {} + + // Return true if entry should be filtered out (removed), false if + // it should be retained. + bool operator()(const void* entry) const { + return discard_entry(entry, _g1h); + } +}; + +void G1SATBMarkQueueSet::filter(SATBMarkQueue* queue) { + assert(_g1h != NULL, "SATB queue set not initialized"); + apply_filter(G1SATBMarkQueueFilterFn(_g1h), queue); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/gc/g1/g1SATBMarkQueueSet.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP +#define SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP + +#include "gc/g1/satbMarkQueue.hpp" + +class G1CollectedHeap; +class JavaThread; + +class G1SATBMarkQueueSet : public SATBMarkQueueSet { + G1CollectedHeap* _g1h; + +public: + G1SATBMarkQueueSet(); + + void initialize(G1CollectedHeap* g1h, + Monitor* cbl_mon, Mutex* fl_lock, + int process_completed_threshold, + uint buffer_enqueue_threshold_percentage, + Mutex* lock); + + static void handle_zero_index_for_thread(JavaThread* t); + virtual SATBMarkQueue& satb_queue_for_thread(JavaThread* const t) const; + virtual void filter(SATBMarkQueue* queue); +}; + +#endif // SHARE_VM_GC_G1_G1SATBMARKQUEUE_HPP
--- a/src/hotspot/share/gc/g1/satbMarkQueue.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/g1/satbMarkQueue.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -23,14 +23,13 @@ */ #include "precompiled.hpp" -#include "jvm.h" -#include "gc/g1/g1CollectedHeap.inline.hpp" -#include "gc/g1/g1ThreadLocalData.hpp" #include "gc/g1/satbMarkQueue.hpp" #include "gc/shared/collectedHeap.hpp" +#include "logging/log.hpp" #include "memory/allocation.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/mutexLocker.hpp" +#include "runtime/os.hpp" #include "runtime/safepoint.hpp" #include "runtime/thread.hpp" #include "runtime/threadSMR.hpp" @@ -41,7 +40,8 @@ // them with their active field set to false. If a thread is // created during a cycle and its SATB queue needs to be activated // before the thread starts running, we'll need to set its active - // field to true. This is done in G1SBarrierSet::on_thread_attach(). + // field to true. This must be done in the collector-specific + // BarrierSet::on_thread_attach() implementation. PtrQueue(qset, permanent, false /* active */) { } @@ -60,8 +60,6 @@ assert(_lock == NULL || _lock->owned_by_self(), "we should have taken the lock before calling this"); - // If G1SATBBufferEnqueueingThresholdPercent == 0 we could skip filtering. - // This method should only be called if there is a non-NULL buffer // that is full. assert(index() == 0, "pre-condition"); @@ -69,10 +67,15 @@ filter(); - size_t cap = capacity(); - size_t percent_used = ((cap - index()) * 100) / cap; - bool should_enqueue = percent_used > G1SATBBufferEnqueueingThresholdPercent; - return should_enqueue; + SATBMarkQueueSet* satb_qset = static_cast<SATBMarkQueueSet*>(qset()); + size_t threshold = satb_qset->buffer_enqueue_threshold(); + // Ensure we'll enqueue completely full buffers. + assert(threshold > 0, "enqueue threshold = 0"); + // Ensure we won't enqueue empty buffers. + assert(threshold <= capacity(), + "enqueue threshold " SIZE_FORMAT " exceeds capacity " SIZE_FORMAT, + threshold, capacity()); + return index() < threshold; } void SATBMarkQueue::apply_closure_and_empty(SATBBufferClosure* cl) { @@ -105,20 +108,20 @@ SATBMarkQueueSet::SATBMarkQueueSet() : PtrQueueSet(), _shared_satb_queue(this, true /* permanent */), - _filter(NULL) + _buffer_enqueue_threshold(0) {} -void SATBMarkQueueSet::initialize(SATBMarkQueueFilter* filter, - Monitor* cbl_mon, Mutex* fl_lock, +void SATBMarkQueueSet::initialize(Monitor* cbl_mon, Mutex* fl_lock, int process_completed_threshold, + uint buffer_enqueue_threshold_percentage, Mutex* lock) { PtrQueueSet::initialize(cbl_mon, fl_lock, process_completed_threshold, -1); _shared_satb_queue.set_lock(lock); - _filter = filter; -} - -void SATBMarkQueueSet::handle_zero_index_for_thread(JavaThread* t) { - G1ThreadLocalData::satb_mark_queue(t).handle_zero_index(); + assert(buffer_size() != 0, "buffer size not initialized"); + // Minimum threshold of 1 ensures enqueuing of completely full buffers. + size_t size = buffer_size(); + size_t enqueue_qty = (size * buffer_enqueue_threshold_percentage) / 100; + _buffer_enqueue_threshold = MAX2(size - enqueue_qty, (size_t)1); } #ifdef ASSERT @@ -127,7 +130,7 @@ log_error(gc, verify)("Actual SATB active states:"); log_error(gc, verify)(" Queue set: %s", is_active() ? "ACTIVE" : "INACTIVE"); for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { - log_error(gc, verify)(" Thread \"%s\" queue: %s", t->name(), G1ThreadLocalData::satb_mark_queue(t).is_active() ? "ACTIVE" : "INACTIVE"); + log_error(gc, verify)(" Thread \"%s\" queue: %s", t->name(), satb_queue_for_thread(t).is_active() ? "ACTIVE" : "INACTIVE"); } log_error(gc, verify)(" Shared queue: %s", shared_satb_queue()->is_active() ? "ACTIVE" : "INACTIVE"); } @@ -141,7 +144,7 @@ // Verify thread queue states for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { - if (G1ThreadLocalData::satb_mark_queue(t).is_active() != expected_active) { + if (satb_queue_for_thread(t).is_active() != expected_active) { dump_active_states(expected_active); guarantee(false, "Thread SATB queue has an unexpected active state"); } @@ -162,14 +165,14 @@ #endif // ASSERT _all_active = active; for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { - G1ThreadLocalData::satb_mark_queue(t).set_active(active); + satb_queue_for_thread(t).set_active(active); } shared_satb_queue()->set_active(active); } void SATBMarkQueueSet::filter_thread_buffers() { for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { - G1ThreadLocalData::satb_mark_queue(t).filter(); + satb_queue_for_thread(t).filter(); } shared_satb_queue()->filter(); } @@ -215,15 +218,15 @@ int i = 0; while (nd != NULL) { void** buf = BufferNode::make_buffer_from_node(nd); - jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i); + os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Enqueued: %d", i); print_satb_buffer(buffer, buf, nd->index(), buffer_size()); nd = nd->next(); i += 1; } for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { - jio_snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name()); - G1ThreadLocalData::satb_mark_queue(t).print(buffer); + os::snprintf(buffer, SATB_PRINTER_BUFFER_SIZE, "Thread: %s", t->name()); + satb_queue_for_thread(t).print(buffer); } shared_satb_queue()->print("Shared"); @@ -254,7 +257,7 @@ assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint."); // So we can safely manipulate these queues. for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) { - G1ThreadLocalData::satb_mark_queue(t).reset(); + satb_queue_for_thread(t).reset(); } shared_satb_queue()->reset(); }
--- a/src/hotspot/share/gc/g1/satbMarkQueue.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/g1/satbMarkQueue.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -49,13 +49,13 @@ // Filter out unwanted entries from the buffer. inline void filter(); -public: - SATBMarkQueue(SATBMarkQueueSet* qset, bool permanent = false); - // Removes entries from the buffer that are no longer needed. template<typename Filter> inline void apply_filter(Filter filter_out); +public: + SATBMarkQueue(SATBMarkQueueSet* qset, bool permanent = false); + // Process queue entries and free resources. void flush(); @@ -90,30 +90,31 @@ }; -class SATBMarkQueueFilter : public CHeapObj<mtGC> { -public: - virtual ~SATBMarkQueueFilter() {} - virtual void filter(SATBMarkQueue* queue) = 0; -}; - class SATBMarkQueueSet: public PtrQueueSet { SATBMarkQueue _shared_satb_queue; - SATBMarkQueueFilter* _filter; + size_t _buffer_enqueue_threshold; #ifdef ASSERT void dump_active_states(bool expected_active); void verify_active_states(bool expected_active); #endif // ASSERT -public: +protected: SATBMarkQueueSet(); + ~SATBMarkQueueSet() {} - void initialize(SATBMarkQueueFilter* filter, - Monitor* cbl_mon, Mutex* fl_lock, + template<typename Filter> + void apply_filter(Filter filter, SATBMarkQueue* queue) { + queue->apply_filter(filter); + } + + void initialize(Monitor* cbl_mon, Mutex* fl_lock, int process_completed_threshold, + uint buffer_enqueue_threshold_percentage, Mutex* lock); - static void handle_zero_index_for_thread(JavaThread* t); +public: + virtual SATBMarkQueue& satb_queue_for_thread(JavaThread* const t) const = 0; // Apply "set_active(active)" to all SATB queues in the set. It should be // called only with the world stopped. The method will assert that the @@ -121,9 +122,8 @@ // set itself, has an active value same as expected_active. void set_active_all_threads(bool active, bool expected_active); - void filter(SATBMarkQueue* queue) { - _filter->filter(queue); - } + size_t buffer_enqueue_threshold() const { return _buffer_enqueue_threshold; } + virtual void filter(SATBMarkQueue* queue) = 0; // Filter all the currently-active SATB buffers. void filter_thread_buffers();
--- a/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -90,6 +90,13 @@ load_at_resolved(access, result); } +void BarrierSetC1::load(LIRAccess& access, LIR_Opr result) { + DecoratorSet decorators = access.decorators(); + bool in_heap = (decorators & IN_HEAP) != 0; + assert(!in_heap, "consider using load_at"); + load_at_resolved(access, result); +} + LIR_Opr BarrierSetC1::atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value) { DecoratorSet decorators = access.decorators(); bool in_heap = (decorators & IN_HEAP) != 0; @@ -159,13 +166,16 @@ bool is_volatile = (((decorators & MO_SEQ_CST) != 0) || AlwaysAtomicAccesses) && os::is_MP(); bool needs_patching = (decorators & C1_NEEDS_PATCHING) != 0; bool mask_boolean = (decorators & C1_MASK_BOOLEAN) != 0; + bool in_native = (decorators & IN_NATIVE) != 0; if (support_IRIW_for_not_multiple_copy_atomic_cpu && is_volatile) { __ membar(); } LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none; - if (is_volatile && !needs_patching) { + if (in_native) { + __ move_wide(access.resolved_addr()->as_address_ptr(), result); + } else if (is_volatile && !needs_patching) { gen->volatile_field_load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info()); } else { __ load(access.resolved_addr()->as_address_ptr(), result, access.access_emit_info(), patch_code);
--- a/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/shared/c1/barrierSetC1.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -127,6 +127,7 @@ public: virtual void store_at(LIRAccess& access, LIR_Opr value); virtual void load_at(LIRAccess& access, LIR_Opr result); + virtual void load(LIRAccess& access, LIR_Opr result); virtual LIR_Opr atomic_cmpxchg_at(LIRAccess& access, LIRItem& cmp_value, LIRItem& new_value);
--- a/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/shared/threadLocalAllocBuffer.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -226,7 +226,9 @@ // Assuming each thread's active tlab is, on average, // 1/2 full at a GC _target_refills = 100 / (2 * TLABWasteTargetPercent); - _target_refills = MAX2(_target_refills, (unsigned)1U); + // We need to set initial target refills to 2 to avoid a GC which causes VM + // abort during VM initialization. + _target_refills = MAX2(_target_refills, 2U); _global_stats = new GlobalTLABStats();
--- a/src/hotspot/share/gc/z/zGlobals.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/z/zGlobals.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -117,11 +117,8 @@ // Marked state extern uintptr_t ZAddressMetadataMarked; -// Address space for mark stack allocations -const size_t ZMarkStackSpaceSizeShift = 40; // 1TB -const size_t ZMarkStackSpaceSize = (size_t)1 << ZMarkStackSpaceSizeShift; -const uintptr_t ZMarkStackSpaceStart = ZAddressSpaceEnd + ZMarkStackSpaceSize; -const uintptr_t ZMarkStackSpaceEnd = ZMarkStackSpaceStart + ZMarkStackSpaceSize; +// Mark stack space +extern uintptr_t ZMarkStackSpaceStart; const size_t ZMarkStackSpaceExpandSize = (size_t)1 << 25; // 32M // Mark stack and magazine sizes
--- a/src/hotspot/share/gc/z/zMark.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/z/zMark.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -25,6 +25,7 @@ #define SHARE_GC_Z_ZMARK_HPP #include "gc/z/zMarkStack.hpp" +#include "gc/z/zMarkStackAllocator.hpp" #include "gc/z/zMarkTerminate.hpp" #include "oops/oopsHierarchy.hpp" #include "utilities/globalDefinitions.hpp"
--- a/src/hotspot/share/gc/z/zMarkStack.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/z/zMarkStack.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -22,173 +22,11 @@ */ #include "precompiled.hpp" -#include "gc/z/zErrno.hpp" -#include "gc/z/zGlobals.hpp" -#include "gc/z/zLock.inline.hpp" #include "gc/z/zMarkStack.inline.hpp" +#include "gc/z/zMarkStackAllocator.hpp" #include "logging/log.hpp" -#include "runtime/atomic.hpp" #include "utilities/debug.hpp" -#include <sys/mman.h> -#include <sys/types.h> - -ZMarkStackSpace::ZMarkStackSpace() : - _expand_lock(), - _top(0), - _end(0) { - assert(ZMarkStacksMax >= ZMarkStackSpaceExpandSize, "ZMarkStacksMax too small"); - assert(ZMarkStacksMax <= ZMarkStackSpaceSize, "ZMarkStacksMax too large"); - - // Reserve address space - const void* res = mmap((void*)ZMarkStackSpaceStart, ZMarkStackSpaceSize, - PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); - if (res != (void*)ZMarkStackSpaceStart) { - log_error(gc, marking)("Failed to reserve address space for marking stacks"); - return; - } - - // Successfully initialized - _top = _end = ZMarkStackSpaceStart; -} - -bool ZMarkStackSpace::is_initialized() const { - return _top != 0; -} - -void ZMarkStackSpace::expand() { - const size_t max = ZMarkStackSpaceStart + ZMarkStacksMax; - if (_end + ZMarkStackSpaceExpandSize > max) { - // Expansion limit reached. This is a fatal error since we - // currently can't recover from running out of mark stack space. - fatal("Mark stack overflow (current size " SIZE_FORMAT "M, max size " SIZE_FORMAT "M)," - " use -XX:ZMarkStacksMax=<size> to increase this limit", - (_end - ZMarkStackSpaceStart) / M, ZMarkStacksMax / M); - } - - void* const res = mmap((void*)_end, ZMarkStackSpaceExpandSize, - PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_FIXED, -1, 0); - if (res == MAP_FAILED) { - // Failed to map memory. This is a fatal error since we - // currently can't recover from running out of mark stack space. - ZErrno err; - fatal("Failed to map memory for marking stacks (%s)", err.to_string()); - } -} - -uintptr_t ZMarkStackSpace::alloc_space(size_t size) { - uintptr_t top = _top; - - for (;;) { - const uintptr_t new_top = top + size; - if (new_top > _end) { - // Not enough space left - return 0; - } - - const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, top); - if (prev_top == top) { - // Success - return top; - } - - // Retry - top = prev_top; - } -} - -uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) { - ZLocker locker(&_expand_lock); - - // Retry allocation before expanding - uintptr_t addr = alloc_space(size); - if (addr != 0) { - return addr; - } - - // Expand stack space - expand(); - - log_debug(gc, marking)("Expanding mark stack space: " SIZE_FORMAT "M->" SIZE_FORMAT "M", - (_end - ZMarkStackSpaceStart) / M, - (_end - ZMarkStackSpaceStart + ZMarkStackSpaceExpandSize) / M); - - // Increment top before end to make sure another - // thread can't steal out newly expanded space. - addr = Atomic::add(size, &_top) - size; - _end += ZMarkStackSpaceExpandSize; - - return addr; -} - -uintptr_t ZMarkStackSpace::alloc(size_t size) { - const uintptr_t addr = alloc_space(size); - if (addr != 0) { - return addr; - } - - return expand_and_alloc_space(size); -} - -ZMarkStackAllocator::ZMarkStackAllocator() : - _freelist(), - _space() { - guarantee(sizeof(ZMarkStack) == ZMarkStackSize, "Size mismatch"); - guarantee(sizeof(ZMarkStackMagazine) <= ZMarkStackSize, "Size mismatch"); - - // Prime free list to avoid an immediate space - // expansion when marking starts. - if (_space.is_initialized()) { - prime_freelist(); - } -} - -bool ZMarkStackAllocator::is_initialized() const { - return _space.is_initialized(); -} - -void ZMarkStackAllocator::prime_freelist() { - for (size_t size = 0; size < ZMarkStackSpaceExpandSize; size += ZMarkStackMagazineSize) { - const uintptr_t addr = _space.alloc(ZMarkStackMagazineSize); - ZMarkStackMagazine* const magazine = create_magazine_from_space(addr, ZMarkStackMagazineSize); - free_magazine(magazine); - } -} - -ZMarkStackMagazine* ZMarkStackAllocator::create_magazine_from_space(uintptr_t addr, size_t size) { - assert(is_aligned(size, ZMarkStackSize), "Invalid size"); - - // Use first stack as magazine - ZMarkStackMagazine* const magazine = new ((void*)addr) ZMarkStackMagazine(); - for (size_t i = ZMarkStackSize; i < size; i += ZMarkStackSize) { - ZMarkStack* const stack = new ((void*)(addr + i)) ZMarkStack(); - const bool success = magazine->push(stack); - assert(success, "Magazine should never get full"); - } - - return magazine; -} - -ZMarkStackMagazine* ZMarkStackAllocator::alloc_magazine() { - // Try allocating from the free list first - ZMarkStackMagazine* const magazine = _freelist.pop_atomic(); - if (magazine != NULL) { - return magazine; - } - - // Allocate new magazine - const uintptr_t addr = _space.alloc(ZMarkStackMagazineSize); - if (addr == 0) { - return NULL; - } - - return create_magazine_from_space(addr, ZMarkStackMagazineSize); -} - -void ZMarkStackAllocator::free_magazine(ZMarkStackMagazine* magazine) { - _freelist.push_atomic(magazine); -} - ZMarkStripe::ZMarkStripe() : _published(), _overflowed() {}
--- a/src/hotspot/share/gc/z/zMarkStack.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/z/zMarkStack.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -25,9 +25,7 @@ #define SHARE_GC_Z_ZMARKSTACK_HPP #include "gc/z/zGlobals.hpp" -#include "gc/z/zLock.hpp" #include "gc/z/zMarkStackEntry.hpp" -#include "memory/allocation.hpp" #include "utilities/globalDefinitions.hpp" template <typename T, size_t S> @@ -73,42 +71,6 @@ typedef ZStack<ZMarkStack*, ZMarkStackMagazineSlots> ZMarkStackMagazine; typedef ZStackList<ZMarkStackMagazine> ZMarkStackMagazineList; -class ZMarkStackSpace { -private: - ZLock _expand_lock; - volatile uintptr_t _top; - volatile uintptr_t _end; - - void expand(); - - uintptr_t alloc_space(size_t size); - uintptr_t expand_and_alloc_space(size_t size); - -public: - ZMarkStackSpace(); - - bool is_initialized() const; - - uintptr_t alloc(size_t size); -}; - -class ZMarkStackAllocator { -private: - ZMarkStackMagazineList _freelist ATTRIBUTE_ALIGNED(ZCacheLineSize); - ZMarkStackSpace _space ATTRIBUTE_ALIGNED(ZCacheLineSize); - - void prime_freelist(); - ZMarkStackMagazine* create_magazine_from_space(uintptr_t addr, size_t size); - -public: - ZMarkStackAllocator(); - - bool is_initialized() const; - - ZMarkStackMagazine* alloc_magazine(); - void free_magazine(ZMarkStackMagazine* magazine); -}; - class ZMarkStripe { private: ZMarkStackList _published ATTRIBUTE_ALIGNED(ZCacheLineSize); @@ -144,6 +106,8 @@ ZMarkStripe* stripe_for_addr(uintptr_t addr); }; +class ZMarkStackAllocator; + class ZMarkThreadLocalStacks { private: ZMarkStackMagazine* _magazine;
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/gc/z/zMarkStackAllocator.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zLock.inline.hpp" +#include "gc/z/zMarkStack.inline.hpp" +#include "gc/z/zMarkStackAllocator.hpp" +#include "logging/log.hpp" +#include "runtime/atomic.hpp" +#include "runtime/os.hpp" +#include "utilities/debug.hpp" + +uintptr_t ZMarkStackSpaceStart; + +ZMarkStackSpace::ZMarkStackSpace() : + _expand_lock(), + _start(0), + _top(0), + _end(0) { + assert(ZMarkStackSpaceLimit >= ZMarkStackSpaceExpandSize, "ZMarkStackSpaceLimit too small"); + + // Reserve address space + const size_t size = ZMarkStackSpaceLimit; + const size_t alignment = (size_t)os::vm_allocation_granularity(); + const uintptr_t addr = (uintptr_t)os::reserve_memory(size, NULL, alignment, mtGC); + if (addr == 0) { + log_error(gc, marking)("Failed to reserve address space for mark stacks"); + return; + } + + // Successfully initialized + _start = _top = _end = addr; + + // Register mark stack space start + ZMarkStackSpaceStart = _start; +} + +bool ZMarkStackSpace::is_initialized() const { + return _start != 0; +} + +uintptr_t ZMarkStackSpace::alloc_space(size_t size) { + uintptr_t top = Atomic::load(&_top); + + for (;;) { + const uintptr_t end = Atomic::load(&_end); + const uintptr_t new_top = top + size; + if (new_top > end) { + // Not enough space left + return 0; + } + + const uintptr_t prev_top = Atomic::cmpxchg(new_top, &_top, top); + if (prev_top == top) { + // Success + return top; + } + + // Retry + top = prev_top; + } +} + +uintptr_t ZMarkStackSpace::expand_and_alloc_space(size_t size) { + ZLocker locker(&_expand_lock); + + // Retry allocation before expanding + uintptr_t addr = alloc_space(size); + if (addr != 0) { + return addr; + } + + // Check expansion limit + const size_t expand_size = ZMarkStackSpaceExpandSize; + const size_t old_size = _end - _start; + const size_t new_size = old_size + expand_size; + if (new_size > ZMarkStackSpaceLimit) { + // Expansion limit reached. This is a fatal error since we + // currently can't recover from running out of mark stack space. + fatal("Mark stack space exhausted. Use -XX:ZMarkStackSpaceLimit=<size> to increase the " + "maximum number of bytes allocated for mark stacks. Current limit is " SIZE_FORMAT "M.", + ZMarkStackSpaceLimit / M); + } + + log_debug(gc, marking)("Expanding mark stack space: " SIZE_FORMAT "M->" SIZE_FORMAT "M", + old_size / M, new_size / M); + + // Expand + os::commit_memory_or_exit((char*)_end, expand_size, false /* executable */, "Mark stack space"); + + // Increment top before end to make sure another + // thread can't steal out newly expanded space. + addr = Atomic::add(size, &_top) - size; + Atomic::add(expand_size, &_end); + + return addr; +} + +uintptr_t ZMarkStackSpace::alloc(size_t size) { + const uintptr_t addr = alloc_space(size); + if (addr != 0) { + return addr; + } + + return expand_and_alloc_space(size); +} + +ZMarkStackAllocator::ZMarkStackAllocator() : + _freelist(), + _space() { + guarantee(sizeof(ZMarkStack) == ZMarkStackSize, "Size mismatch"); + guarantee(sizeof(ZMarkStackMagazine) <= ZMarkStackSize, "Size mismatch"); + + // Prime free list to avoid an immediate space + // expansion when marking starts. + if (_space.is_initialized()) { + prime_freelist(); + } +} + +bool ZMarkStackAllocator::is_initialized() const { + return _space.is_initialized(); +} + +void ZMarkStackAllocator::prime_freelist() { + for (size_t size = 0; size < ZMarkStackSpaceExpandSize; size += ZMarkStackMagazineSize) { + const uintptr_t addr = _space.alloc(ZMarkStackMagazineSize); + ZMarkStackMagazine* const magazine = create_magazine_from_space(addr, ZMarkStackMagazineSize); + free_magazine(magazine); + } +} + +ZMarkStackMagazine* ZMarkStackAllocator::create_magazine_from_space(uintptr_t addr, size_t size) { + assert(is_aligned(size, ZMarkStackSize), "Invalid size"); + + // Use first stack as magazine + ZMarkStackMagazine* const magazine = new ((void*)addr) ZMarkStackMagazine(); + for (size_t i = ZMarkStackSize; i < size; i += ZMarkStackSize) { + ZMarkStack* const stack = new ((void*)(addr + i)) ZMarkStack(); + const bool success = magazine->push(stack); + assert(success, "Magazine should never get full"); + } + + return magazine; +} + +ZMarkStackMagazine* ZMarkStackAllocator::alloc_magazine() { + // Try allocating from the free list first + ZMarkStackMagazine* const magazine = _freelist.pop_atomic(); + if (magazine != NULL) { + return magazine; + } + + // Allocate new magazine + const uintptr_t addr = _space.alloc(ZMarkStackMagazineSize); + if (addr == 0) { + return NULL; + } + + return create_magazine_from_space(addr, ZMarkStackMagazineSize); +} + +void ZMarkStackAllocator::free_magazine(ZMarkStackMagazine* magazine) { + _freelist.push_atomic(magazine); +}
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/gc/z/zMarkStackAllocator.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZMARKSTACKALLOCATOR_HPP +#define SHARE_GC_Z_ZMARKSTACKALLOCATOR_HPP + +#include "gc/z/zGlobals.hpp" +#include "gc/z/zLock.hpp" +#include "utilities/globalDefinitions.hpp" + +class ZMarkStackSpace { +private: + ZLock _expand_lock; + uintptr_t _start; + volatile uintptr_t _top; + volatile uintptr_t _end; + + void expand(); + + uintptr_t alloc_space(size_t size); + uintptr_t expand_and_alloc_space(size_t size); + +public: + ZMarkStackSpace(); + + bool is_initialized() const; + + uintptr_t alloc(size_t size); +}; + +class ZMarkStackAllocator { +private: + ZMarkStackMagazineList _freelist ATTRIBUTE_ALIGNED(ZCacheLineSize); + ZMarkStackSpace _space ATTRIBUTE_ALIGNED(ZCacheLineSize); + + void prime_freelist(); + ZMarkStackMagazine* create_magazine_from_space(uintptr_t addr, size_t size); + +public: + ZMarkStackAllocator(); + + bool is_initialized() const; + + ZMarkStackMagazine* alloc_magazine(); + void free_magazine(ZMarkStackMagazine* magazine); +}; + +#endif // SHARE_GC_Z_ZMARKSTACKALLOCATOR_HPP
--- a/src/hotspot/share/gc/z/zRootsIterator.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/z/zRootsIterator.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "classfile/classLoaderData.hpp" #include "classfile/stringTable.hpp" -#include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "code/codeCache.hpp" #include "compiler/oopMap.hpp" @@ -74,7 +73,6 @@ static const ZStatSubPhase ZSubPhasePauseWeakRootsJNIWeakHandles("Pause Weak Roots JNIWeakHandles"); static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTIWeakExport("Pause Weak Roots JVMTIWeakExport"); static const ZStatSubPhase ZSubPhasePauseWeakRootsJFRWeak("Pause Weak Roots JFRWeak"); -static const ZStatSubPhase ZSubPhasePauseWeakRootsSymbolTable("Pause Weak Roots SymbolTable"); static const ZStatSubPhase ZSubPhasePauseWeakRootsStringTable("Pause Weak Roots StringTable"); static const ZStatSubPhase ZSubPhaseConcurrentWeakRoots("Concurrent Weak Roots"); @@ -302,11 +300,9 @@ _jfr_weak(this), _vm_weak_handles(this), _jni_weak_handles(this), - _symbol_table(this), _string_table(this) { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); ZStatTimer timer(ZSubPhasePauseWeakRootsSetup); - SymbolTable::clear_parallel_claimed_index(); StringTable::reset_dead_counter(); } @@ -337,12 +333,6 @@ #endif } -void ZWeakRootsIterator::do_symbol_table(BoolObjectClosure* is_alive, OopClosure* cl) { - ZStatTimer timer(ZSubPhasePauseWeakRootsSymbolTable); - int dummy; - SymbolTable::possibly_parallel_unlink(&dummy, &dummy); -} - class ZStringTableDeadCounterBoolObjectClosure : public BoolObjectClosure { private: BoolObjectClosure* const _cl; @@ -375,9 +365,6 @@ void ZWeakRootsIterator::weak_oops_do(BoolObjectClosure* is_alive, OopClosure* cl) { ZStatTimer timer(ZSubPhasePauseWeakRoots); - if (ZSymbolTableUnloading) { - _symbol_table.weak_oops_do(is_alive, cl); - } if (ZWeakRoots) { _jvmti_weak_export.weak_oops_do(is_alive, cl); _jfr_weak.weak_oops_do(is_alive, cl);
--- a/src/hotspot/share/gc/z/zRootsIterator.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/z/zRootsIterator.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -130,14 +130,12 @@ void do_jni_weak_handles(BoolObjectClosure* is_alive, OopClosure* cl); void do_jvmti_weak_export(BoolObjectClosure* is_alive, OopClosure* cl); void do_jfr_weak(BoolObjectClosure* is_alive, OopClosure* cl); - void do_symbol_table(BoolObjectClosure* is_alive, OopClosure* cl); void do_string_table(BoolObjectClosure* is_alive, OopClosure* cl); ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jvmti_weak_export> _jvmti_weak_export; ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jfr_weak> _jfr_weak; ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_vm_weak_handles> _vm_weak_handles; ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jni_weak_handles> _jni_weak_handles; - ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_symbol_table> _symbol_table; ZParallelWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_string_table> _string_table; public:
--- a/src/hotspot/share/gc/z/z_globals.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/gc/z/z_globals.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -53,9 +53,9 @@ "Allow Java threads to stall and wait for GC to complete " \ "instead of immediately throwing an OutOfMemoryError") \ \ - product(size_t, ZMarkStacksMax, NOT_LP64(512*M) LP64_ONLY(8*G), \ - "Maximum number of bytes allocated for marking stacks") \ - range(32*M, NOT_LP64(512*M) LP64_ONLY(1024*G)) \ + product(size_t, ZMarkStackSpaceLimit, 8*G, \ + "Maximum number of bytes allocated for mark stacks") \ + range(32*M, 1024*G) \ \ product(uint, ZCollectionInterval, 0, \ "Force GC at a fixed time interval (in seconds)") \ @@ -79,9 +79,6 @@ diagnostic(bool, ZVerifyForwarding, false, \ "Verify forwarding tables") \ \ - diagnostic(bool, ZSymbolTableUnloading, false, \ - "Unload unused VM symbols") \ - \ diagnostic(bool, ZWeakRoots, true, \ "Treat JNI WeakGlobalRefs and StringTable as weak roots") \ \
--- a/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/jvmci/jvmciCompilerToVM.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -382,7 +382,12 @@ err_msg("Expected interface type, got %s", klass->external_name())); } InstanceKlass* iklass = InstanceKlass::cast(klass); - JVMCIKlassHandle handle(THREAD, iklass->implementor()); + JVMCIKlassHandle handle(THREAD); + { + // Need Compile_lock around implementor() + MutexLocker locker(Compile_lock); + handle = iklass->implementor(); + } oop implementor = CompilerToVM::get_jvmci_type(handle, CHECK_NULL); return JNIHandles::make_local(THREAD, implementor); C2V_END
--- a/src/hotspot/share/logging/logPrefix.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/logging/logPrefix.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -80,6 +80,7 @@ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, reloc)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, start)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, stringtable)) \ + LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, symboltable)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, sweep)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task)) \ LOG_PREFIX(GCId::print_prefix, LOG_TAGS(gc, task, start)) \
--- a/src/hotspot/share/logging/logTag.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/logging/logTag.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -147,6 +147,7 @@ LOG_TAG(stats) \ LOG_TAG(stringdedup) \ LOG_TAG(stringtable) \ + LOG_TAG(symboltable) \ LOG_TAG(stackmap) \ LOG_TAG(subclass) \ LOG_TAG(survivor) \
--- a/src/hotspot/share/memory/arena.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/memory/arena.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -199,12 +199,18 @@ } // Fast delete in area. Common case is: NOP (except for storage reclaimed) - void Afree(void *ptr, size_t size) { + bool Afree(void *ptr, size_t size) { #ifdef ASSERT if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory - if (UseMallocOnly) return; + if (UseMallocOnly) return true; #endif - if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; + if (((char*)ptr) + size == _hwm) { + _hwm = (char*)ptr; + return true; + } else { + // Unable to fast free, so we just drop it. + return false; + } } void *Arealloc( void *old_ptr, size_t old_size, size_t new_size,
--- a/src/hotspot/share/memory/heapShared.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/memory/heapShared.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -104,6 +104,13 @@ } assert(relocated_k->is_shared(), "must be a shared class"); + + if (_k == relocated_k) { + // Don't add the Klass containing the sub-graph to it's own klass + // initialization list. + return; + } + if (relocated_k->is_instance_klass()) { assert(InstanceKlass::cast(relocated_k)->is_shared_boot_class(), "must be boot class"); @@ -498,7 +505,13 @@ #define do_module_object_graph(archive_object_graph_do) \ archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedSystemModules_offset(), T_OBJECT, CHECK); \ archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedModuleFinder_offset(), T_OBJECT, CHECK); \ - archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedMainModule_offset(), T_OBJECT, CHECK) + archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedMainModule_offset(), T_OBJECT, CHECK); \ + archive_object_graph_do(SystemDictionary::ArchivedModuleGraph_klass(), jdk_internal_module_ArchivedModuleGraph::archivedConfiguration_offset(), T_OBJECT, CHECK); \ + archive_object_graph_do(SystemDictionary::ImmutableCollections_ListN_klass(), java_util_ImmutableCollections_ListN::EMPTY_LIST_offset(), T_OBJECT, CHECK); \ + archive_object_graph_do(SystemDictionary::ImmutableCollections_MapN_klass(), java_util_ImmutableCollections_MapN::EMPTY_MAP_offset(), T_OBJECT, CHECK); \ + archive_object_graph_do(SystemDictionary::ImmutableCollections_SetN_klass(), java_util_ImmutableCollections_SetN::EMPTY_SET_offset(), T_OBJECT, CHECK); \ + archive_object_graph_do(SystemDictionary::Integer_IntegerCache_klass(), java_lang_Integer_IntegerCache::archivedCache_offset(), T_OBJECT, CHECK); \ + archive_object_graph_do(SystemDictionary::Configuration_klass(), java_lang_module_Configuration::EMPTY_CONFIGURATION_offset(), T_OBJECT, CHECK) void HeapShared::archive_module_graph_objects(Thread* THREAD) { do_module_object_graph(archive_reachable_objects_from_static_field);
--- a/src/hotspot/share/memory/metaspaceShared.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/memory/metaspaceShared.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -423,47 +423,12 @@ StringTable::serialize(soc); soc->do_tag(--tag); - serialize_well_known_classes(soc); + JavaClasses::serialize_offsets(soc); soc->do_tag(--tag); soc->do_tag(666); } -void MetaspaceShared::serialize_well_known_classes(SerializeClosure* soc) { - java_lang_Class::serialize(soc); - java_lang_String::serialize(soc); - java_lang_System::serialize(soc); - java_lang_ClassLoader::serialize(soc); - java_lang_Throwable::serialize(soc); - java_lang_Thread::serialize(soc); - java_lang_ThreadGroup::serialize(soc); - java_lang_AssertionStatusDirectives::serialize(soc); - java_lang_ref_SoftReference::serialize(soc); - java_lang_invoke_MethodHandle::serialize(soc); - java_lang_invoke_DirectMethodHandle::serialize(soc); - java_lang_invoke_MemberName::serialize(soc); - java_lang_invoke_ResolvedMethodName::serialize(soc); - java_lang_invoke_LambdaForm::serialize(soc); - java_lang_invoke_MethodType::serialize(soc); - java_lang_invoke_CallSite::serialize(soc); - java_lang_invoke_MethodHandleNatives_CallSiteContext::serialize(soc); - java_security_AccessControlContext::serialize(soc); - java_lang_reflect_AccessibleObject::serialize(soc); - java_lang_reflect_Method::serialize(soc); - java_lang_reflect_Constructor::serialize(soc); - java_lang_reflect_Field::serialize(soc); - java_nio_Buffer::serialize(soc); - reflect_ConstantPool::serialize(soc); - reflect_UnsafeStaticFieldAccessorImpl::serialize(soc); - java_lang_reflect_Parameter::serialize(soc); - java_lang_Module::serialize(soc); - java_lang_StackTraceElement::serialize(soc); - java_lang_StackFrameInfo::serialize(soc); - java_lang_LiveStackFrameInfo::serialize(soc); - java_util_concurrent_locks_AbstractOwnableSynchronizer::serialize(soc); - jdk_internal_module_ArchivedModuleGraph::serialize(soc); -} - address MetaspaceShared::cds_i2i_entry_code_buffers(size_t total_size) { if (DumpSharedSpaces) { if (_cds_i2i_entry_code_buffers == NULL) {
--- a/src/hotspot/share/memory/metaspaceShared.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/memory/metaspaceShared.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -204,7 +204,6 @@ static void patch_cpp_vtable_pointers(); static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false); static void serialize(SerializeClosure* sc) NOT_CDS_RETURN; - static void serialize_well_known_classes(SerializeClosure* soc) NOT_CDS_RETURN; static MetaspaceSharedStats* stats() { return &_stats;
--- a/src/hotspot/share/oops/instanceKlass.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/oops/instanceKlass.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1051,6 +1051,38 @@ } } +Klass* InstanceKlass::implementor() const { + assert_locked_or_safepoint(Compile_lock); + Klass** k = adr_implementor(); + if (k == NULL) { + return NULL; + } else { + return *k; + } +} + +void InstanceKlass::set_implementor(Klass* k) { + assert_lock_strong(Compile_lock); + assert(is_interface(), "not interface"); + Klass** addr = adr_implementor(); + assert(addr != NULL, "null addr"); + if (addr != NULL) { + *addr = k; + } +} + +int InstanceKlass::nof_implementors() const { + assert_lock_strong(Compile_lock); + Klass* k = implementor(); + if (k == NULL) { + return 0; + } else if (k != this) { + return 1; + } else { + return 2; + } +} + // The embedded _implementor field can only record one implementor. // When there are more than one implementors, the _implementor field // is set to the interface Klass* itself. Following are the possible @@ -1061,7 +1093,7 @@ // // The _implementor field only exists for interfaces. void InstanceKlass::add_implementor(Klass* k) { - assert(Compile_lock->owned_by_self(), ""); + assert_lock_strong(Compile_lock); assert(is_interface(), "not interface"); // Filter out my subinterfaces. // (Note: Interfaces are never on the subklass list.) @@ -2270,7 +2302,10 @@ if (is_linked()) { unlink_class(); } - init_implementor(); + { + MutexLocker ml(Compile_lock); + init_implementor(); + } constants()->remove_unshareable_info(); @@ -3089,6 +3124,7 @@ st->cr(); if (is_interface()) { + MutexLocker ml(Compile_lock); st->print_cr(BULLET"nof implementors: %d", nof_implementors()); if (nof_implementors() == 1) { st->print_cr(BULLET"implementor: "); @@ -3496,14 +3532,8 @@ guarantee(sib->super() == super, "siblings should have same superklass"); } - // Verify implementor fields - Klass* im = implementor(); - if (im != NULL) { - guarantee(is_interface(), "only interfaces should have implementor set"); - guarantee(im->is_klass(), "should be klass"); - guarantee(!im->is_interface() || im == this, - "implementors cannot be interfaces"); - } + // Verify implementor fields requires the Compile_lock, but this is sometimes + // called inside a safepoint, so don't verify. // Verify local interfaces if (local_interfaces()) {
--- a/src/hotspot/share/oops/instanceKlass.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/oops/instanceKlass.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -1014,36 +1014,9 @@ #endif // Access to the implementor of an interface. - Klass* implementor() const - { - Klass** k = adr_implementor(); - if (k == NULL) { - return NULL; - } else { - return *k; - } - } - - void set_implementor(Klass* k) { - assert(is_interface(), "not interface"); - Klass** addr = adr_implementor(); - assert(addr != NULL, "null addr"); - if (addr != NULL) { - *addr = k; - } - } - - int nof_implementors() const { - Klass* k = implementor(); - if (k == NULL) { - return 0; - } else if (k != this) { - return 1; - } else { - return 2; - } - } - + Klass* implementor() const; + void set_implementor(Klass* k); + int nof_implementors() const; void add_implementor(Klass* k); // k is a new class that implements this interface void init_implementor(); // initialize
--- a/src/hotspot/share/oops/symbol.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/oops/symbol.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -318,4 +318,4 @@ } // SymbolTable prints this in its statistics -NOT_PRODUCT(int Symbol::_total_count = 0;) +NOT_PRODUCT(size_t Symbol::_total_count = 0;)
--- a/src/hotspot/share/oops/symbol.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/oops/symbol.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -256,7 +256,7 @@ // only for getting its vtable pointer. Symbol() { } - static int _total_count; + static size_t _total_count; #endif };
--- a/src/hotspot/share/opto/library_call.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/opto/library_call.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -1815,7 +1815,7 @@ set_result(_gvn.transform(new MulDNode(base, base))); return true; } - return StubRoutines::dexp() != NULL ? + return StubRoutines::dpow() != NULL ? runtime_math(OptoRuntime::Math_DD_D_Type(), StubRoutines::dpow(), "dpow") : runtime_math(OptoRuntime::Math_DD_D_Type(), FN_PTR(SharedRuntime::dpow), "POW"); }
--- a/src/hotspot/share/runtime/compilationPolicy.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/runtime/compilationPolicy.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -37,9 +37,9 @@ #include "runtime/frame.hpp" #include "runtime/handles.inline.hpp" #include "runtime/rframe.hpp" -#include "runtime/simpleThresholdPolicy.hpp" #include "runtime/stubRoutines.hpp" #include "runtime/thread.hpp" +#include "runtime/tieredThresholdPolicy.hpp" #include "runtime/timer.hpp" #include "runtime/vframe.hpp" #include "runtime/vm_operations.hpp" @@ -68,7 +68,7 @@ break; case 2: #ifdef TIERED - CompilationPolicy::set_policy(new SimpleThresholdPolicy()); + CompilationPolicy::set_policy(new TieredThresholdPolicy()); #else Unimplemented(); #endif
--- a/src/hotspot/share/runtime/globals.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/runtime/globals.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -2078,12 +2078,6 @@ "(-1 means no change)") \ range(-1, 127) \ \ - product(bool, CompilerThreadHintNoPreempt, false, \ - "(Solaris only) Give compiler threads an extra quanta") \ - \ - product(bool, VMThreadHintNoPreempt, false, \ - "(Solaris only) Give VM thread an extra quanta") \ - \ product(intx, JavaPriority1_To_OSPriority, -1, \ "Map Java priorities to OS priorities") \ range(-1, 127) \
--- a/src/hotspot/share/runtime/mutex.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/runtime/mutex.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -370,11 +370,6 @@ // CONSIDER: Delay += 1 + (Delay/4); Delay &= 0x7FF ; } - // Consider checking _owner's schedctl state, if OFFPROC abort spin. - // If the owner is OFFPROC then it's unlike that the lock will be dropped - // in a timely fashion, which suggests that spinning would not be fruitful - // or profitable. - // Stall for "Delay" time units - iterations in the current implementation. // Avoid generating coherency traffic while stalled. // Possible ways to delay: @@ -553,7 +548,6 @@ // Only the holder of OnDeck can manipulate EntryList or detach the RATs from cxq. // Avoid ABA - allow multiple concurrent producers (enqueue via push-CAS) // but only one concurrent consumer (detacher of RATs). - // Consider protecting this critical section with schedctl on Solaris. // Unlike a normal lock, however, the exiting thread "locks" OnDeck, // picks a successor and marks that thread as OnDeck. That successor // thread will then clear OnDeck once it eventually acquires the outer lock.
--- a/src/hotspot/share/runtime/objectMonitor.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/runtime/objectMonitor.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -2088,8 +2088,7 @@ // NotRunnable() -- informed spinning // // Don't bother spinning if the owner is not eligible to drop the lock. -// Peek at the owner's schedctl.sc_state and Thread._thread_values and -// spin only if the owner thread is _thread_in_Java or _thread_in_vm. +// Spin only if the owner thread is _thread_in_Java or _thread_in_vm. // The thread must be runnable in order to drop the lock in timely fashion. // If the _owner is not runnable then spinning will not likely be // successful (profitable). @@ -2097,7 +2096,7 @@ // Beware -- the thread referenced by _owner could have died // so a simply fetch from _owner->_thread_state might trap. // Instead, we use SafeFetchXX() to safely LD _owner->_thread_state. -// Because of the lifecycle issues the schedctl and _thread_state values +// Because of the lifecycle issues, the _thread_state values // observed by NotRunnable() might be garbage. NotRunnable must // tolerate this and consider the observed _thread_state value // as advisory. @@ -2105,18 +2104,12 @@ // Beware too, that _owner is sometimes a BasicLock address and sometimes // a thread pointer. // Alternately, we might tag the type (thread pointer vs basiclock pointer) -// with the LSB of _owner. Another option would be to probablistically probe +// with the LSB of _owner. Another option would be to probabilistically probe // the putative _owner->TypeTag value. // // Checking _thread_state isn't perfect. Even if the thread is // in_java it might be blocked on a page-fault or have been preempted -// and sitting on a ready/dispatch queue. _thread state in conjunction -// with schedctl.sc_state gives us a good picture of what the -// thread is doing, however. -// -// TODO: check schedctl.sc_state. -// We'll need to use SafeFetch32() to read from the schedctl block. -// See RFE #5004247 and http://sac.sfbay.sun.com/Archives/CaseLog/arc/PSARC/2005/351/ +// and sitting on a ready/dispatch queue. // // The return value from NotRunnable() is *advisory* -- the // result is based on sampling and is not necessarily coherent.
--- a/src/hotspot/share/runtime/os.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/runtime/os.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -897,7 +897,6 @@ static int java_to_os_priority[CriticalPriority + 1]; // Hint to the underlying OS that a task switch would not be good. // Void return because it's a hint and can fail. - static void hint_no_preempt(); static const char* native_thread_creation_failed_msg() { return OS_NATIVE_THREAD_CREATION_FAILED_MSG; }
--- a/src/hotspot/share/runtime/safepoint.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/runtime/safepoint.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -354,26 +354,24 @@ // See the comments in synchronizer.cpp for additional remarks on spinning. // // In the future we might: - // 1. Modify the safepoint scheme to avoid potentially unbounded spinning. + // -- Modify the safepoint scheme to avoid potentially unbounded spinning. // This is tricky as the path used by a thread exiting the JVM (say on // on JNI call-out) simply stores into its state field. The burden // is placed on the VM thread, which must poll (spin). - // 2. Find something useful to do while spinning. If the safepoint is GC-related + // -- Find something useful to do while spinning. If the safepoint is GC-related // we might aggressively scan the stacks of threads that are already safe. - // 3. Use Solaris schedctl to examine the state of the still-running mutators. - // If all the mutators are ONPROC there's no reason to sleep or yield. - // 4. YieldTo() any still-running mutators that are ready but OFFPROC. - // 5. Check system saturation. If the system is not fully saturated then + // -- YieldTo() any still-running mutators that are ready but OFFPROC. + // -- Check system saturation. If the system is not fully saturated then // simply spin and avoid sleep/yield. - // 6. As still-running mutators rendezvous they could unpark the sleeping + // -- As still-running mutators rendezvous they could unpark the sleeping // VMthread. This works well for still-running mutators that become // safe. The VMthread must still poll for mutators that call-out. - // 7. Drive the policy on time-since-begin instead of iterations. - // 8. Consider making the spin duration a function of the # of CPUs: + // -- Drive the policy on time-since-begin instead of iterations. + // -- Consider making the spin duration a function of the # of CPUs: // Spin = (((ncpus-1) * M) + K) + F(still_running) // Alternately, instead of counting iterations of the outer loop // we could count the # of threads visited in the inner loop, above. - // 9. On windows consider using the return value from SwitchThreadTo() + // -- On windows consider using the return value from SwitchThreadTo() // to drive subsequent spin/SwitchThreadTo()/Sleep(N) decisions. if (int(iterations) == -1) { // overflow - something is wrong. @@ -561,20 +559,6 @@ // Start suspended threads jtiwh.rewind(); for (; JavaThread *current = jtiwh.next(); ) { - // A problem occurring on Solaris is when attempting to restart threads - // the first #cpus - 1 go well, but then the VMThread is preempted when we get - // to the next one (since it has been running the longest). We then have - // to wait for a cpu to become available before we can continue restarting - // threads. - // FIXME: This causes the performance of the VM to degrade when active and with - // large numbers of threads. Apparently this is due to the synchronous nature - // of suspending threads. - // - // TODO-FIXME: the comments above are vestigial and no longer apply. - // Furthermore, using solaris' schedctl in this particular context confers no benefit - if (VMThreadHintNoPreempt) { - os::hint_no_preempt(); - } ThreadSafepointState* cur_state = current->safepoint_state(); assert(cur_state->type() != ThreadSafepointState::_running, "Thread not suspended at safepoint"); cur_state->restart();
--- a/src/hotspot/share/runtime/serviceThread.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/runtime/serviceThread.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "classfile/stringTable.hpp" +#include "classfile/symbolTable.hpp" #include "runtime/interfaceSupport.inline.hpp" #include "runtime/javaCalls.hpp" #include "runtime/serviceThread.hpp" @@ -84,6 +85,7 @@ bool has_dcmd_notification_event = false; bool acs_notify = false; bool stringtable_work = false; + bool symboltable_work = false; JvmtiDeferredEvent jvmti_event; { // Need state transition ThreadBlockInVM so that this thread @@ -101,7 +103,8 @@ !(has_jvmti_events = JvmtiDeferredEventQueue::has_events()) && !(has_gc_notification_event = GCNotifier::has_event()) && !(has_dcmd_notification_event = DCmdFactory::has_pending_jmx_notification()) && - !(stringtable_work = StringTable::has_work())) { + !(stringtable_work = StringTable::has_work()) && + !(symboltable_work = SymbolTable::has_work())) { // wait until one of the sensors has pending requests, or there is a // pending JVMTI event or JMX GC notification to post Service_lock->wait(Mutex::_no_safepoint_check_flag); @@ -116,6 +119,10 @@ StringTable::do_concurrent_work(jt); } + if (symboltable_work) { + SymbolTable::do_concurrent_work(jt); + } + if (has_jvmti_events) { jvmti_event.post(); }
--- a/src/hotspot/share/runtime/simpleThresholdPolicy.cpp Thu Aug 09 22:06:11 2018 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,904 +0,0 @@ -/* - * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "compiler/compileBroker.hpp" -#include "memory/resourceArea.hpp" -#include "runtime/arguments.hpp" -#include "runtime/handles.inline.hpp" -#include "runtime/safepointVerifiers.hpp" -#include "runtime/simpleThresholdPolicy.hpp" -#include "runtime/simpleThresholdPolicy.inline.hpp" -#include "code/scopeDesc.hpp" -#if INCLUDE_JVMCI -#include "jvmci/jvmciRuntime.hpp" -#endif - -#ifdef TIERED - -void SimpleThresholdPolicy::print_counters(const char* prefix, const methodHandle& mh) { - int invocation_count = mh->invocation_count(); - int backedge_count = mh->backedge_count(); - MethodData* mdh = mh->method_data(); - int mdo_invocations = 0, mdo_backedges = 0; - int mdo_invocations_start = 0, mdo_backedges_start = 0; - if (mdh != NULL) { - mdo_invocations = mdh->invocation_count(); - mdo_backedges = mdh->backedge_count(); - mdo_invocations_start = mdh->invocation_count_start(); - mdo_backedges_start = mdh->backedge_count_start(); - } - tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix, - invocation_count, backedge_count, prefix, - mdo_invocations, mdo_invocations_start, - mdo_backedges, mdo_backedges_start); - tty->print(" %smax levels=%d,%d", prefix, - mh->highest_comp_level(), mh->highest_osr_comp_level()); -} - -// Print an event. -void SimpleThresholdPolicy::print_event(EventType type, const methodHandle& mh, const methodHandle& imh, - int bci, CompLevel level) { - bool inlinee_event = mh() != imh(); - - ttyLocker tty_lock; - tty->print("%lf: [", os::elapsedTime()); - - switch(type) { - case CALL: - tty->print("call"); - break; - case LOOP: - tty->print("loop"); - break; - case COMPILE: - tty->print("compile"); - break; - case REMOVE_FROM_QUEUE: - tty->print("remove-from-queue"); - break; - case UPDATE_IN_QUEUE: - tty->print("update-in-queue"); - break; - case REPROFILE: - tty->print("reprofile"); - break; - case MAKE_NOT_ENTRANT: - tty->print("make-not-entrant"); - break; - default: - tty->print("unknown"); - } - - tty->print(" level=%d ", level); - - ResourceMark rm; - char *method_name = mh->name_and_sig_as_C_string(); - tty->print("[%s", method_name); - if (inlinee_event) { - char *inlinee_name = imh->name_and_sig_as_C_string(); - tty->print(" [%s]] ", inlinee_name); - } - else tty->print("] "); - tty->print("@%d queues=%d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile), - CompileBroker::queue_size(CompLevel_full_optimization)); - - print_specific(type, mh, imh, bci, level); - - if (type != COMPILE) { - print_counters("", mh); - if (inlinee_event) { - print_counters("inlinee ", imh); - } - tty->print(" compilable="); - bool need_comma = false; - if (!mh->is_not_compilable(CompLevel_full_profile)) { - tty->print("c1"); - need_comma = true; - } - if (!mh->is_not_osr_compilable(CompLevel_full_profile)) { - if (need_comma) tty->print(","); - tty->print("c1-osr"); - need_comma = true; - } - if (!mh->is_not_compilable(CompLevel_full_optimization)) { - if (need_comma) tty->print(","); - tty->print("c2"); - need_comma = true; - } - if (!mh->is_not_osr_compilable(CompLevel_full_optimization)) { - if (need_comma) tty->print(","); - tty->print("c2-osr"); - } - tty->print(" status="); - if (mh->queued_for_compilation()) { - tty->print("in-queue"); - } else tty->print("idle"); - } - tty->print_cr("]"); -} - -void SimpleThresholdPolicy::initialize() { - int count = CICompilerCount; -#ifdef _LP64 - // Turn on ergonomic compiler count selection - if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) { - FLAG_SET_DEFAULT(CICompilerCountPerCPU, true); - } - if (CICompilerCountPerCPU) { - // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n - int log_cpu = log2_intptr(os::active_processor_count()); - int loglog_cpu = log2_intptr(MAX2(log_cpu, 1)); - count = MAX2(log_cpu * loglog_cpu * 3 / 2, 2); - FLAG_SET_ERGO(intx, CICompilerCount, count); - } -#else - // On 32-bit systems, the number of compiler threads is limited to 3. - // On these systems, the virtual address space available to the JVM - // is usually limited to 2-4 GB (the exact value depends on the platform). - // As the compilers (especially C2) can consume a large amount of - // memory, scaling the number of compiler threads with the number of - // available cores can result in the exhaustion of the address space - /// available to the VM and thus cause the VM to crash. - if (FLAG_IS_DEFAULT(CICompilerCount)) { - count = 3; - FLAG_SET_ERGO(intx, CICompilerCount, count); - } -#endif - - if (TieredStopAtLevel < CompLevel_full_optimization) { - // No C2 compiler thread required - set_c1_count(count); - } else { - set_c1_count(MAX2(count / 3, 1)); - set_c2_count(MAX2(count - c1_count(), 1)); - } - assert(count == c1_count() + c2_count(), "inconsistent compiler thread count"); - - // Some inlining tuning -#ifdef X86 - if (FLAG_IS_DEFAULT(InlineSmallCode)) { - FLAG_SET_DEFAULT(InlineSmallCode, 2000); - } -#endif - -#if defined SPARC || defined AARCH64 - if (FLAG_IS_DEFAULT(InlineSmallCode)) { - FLAG_SET_DEFAULT(InlineSmallCode, 2500); - } -#endif - - set_increase_threshold_at_ratio(); - set_start_time(os::javaTimeMillis()); -} - -void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) { - if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) { - counter->set_carry_flag(); - } -} - -// Set carry flags on the counters if necessary -void SimpleThresholdPolicy::handle_counter_overflow(Method* method) { - MethodCounters *mcs = method->method_counters(); - if (mcs != NULL) { - set_carry_if_necessary(mcs->invocation_counter()); - set_carry_if_necessary(mcs->backedge_counter()); - } - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - set_carry_if_necessary(mdo->invocation_counter()); - set_carry_if_necessary(mdo->backedge_counter()); - } -} - -// Called with the queue locked and with at least one element -CompileTask* SimpleThresholdPolicy::select_task(CompileQueue* compile_queue) { - CompileTask *max_blocking_task = NULL; - CompileTask *max_task = NULL; - Method* max_method = NULL; - jlong t = os::javaTimeMillis(); - // Iterate through the queue and find a method with a maximum rate. - for (CompileTask* task = compile_queue->first(); task != NULL;) { - CompileTask* next_task = task->next(); - Method* method = task->method(); - update_rate(t, method); - if (max_task == NULL) { - max_task = task; - max_method = method; - } else { - // If a method has been stale for some time, remove it from the queue. - // Blocking tasks and tasks submitted from whitebox API don't become stale - if (task->can_become_stale() && is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) { - if (PrintTieredEvents) { - print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level()); - } - compile_queue->remove_and_mark_stale(task); - method->clear_queued_for_compilation(); - task = next_task; - continue; - } - - // Select a method with a higher rate - if (compare_methods(method, max_method)) { - max_task = task; - max_method = method; - } - } - - if (task->is_blocking()) { - if (max_blocking_task == NULL || compare_methods(method, max_blocking_task->method())) { - max_blocking_task = task; - } - } - - task = next_task; - } - - if (max_blocking_task != NULL) { - // In blocking compilation mode, the CompileBroker will make - // compilations submitted by a JVMCI compiler thread non-blocking. These - // compilations should be scheduled after all blocking compilations - // to service non-compiler related compilations sooner and reduce the - // chance of such compilations timing out. - max_task = max_blocking_task; - max_method = max_task->method(); - } - - if (max_task != NULL && max_task->comp_level() == CompLevel_full_profile && - TieredStopAtLevel > CompLevel_full_profile && - max_method != NULL && is_method_profiled(max_method)) { - max_task->set_comp_level(CompLevel_limited_profile); - if (PrintTieredEvents) { - print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level()); - } - } - - return max_task; -} - -void SimpleThresholdPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { - for (ScopeDesc* sd = trap_scope;; sd = sd->sender()) { - if (PrintTieredEvents) { - methodHandle mh(sd->method()); - print_event(REPROFILE, mh, mh, InvocationEntryBci, CompLevel_none); - } - MethodData* mdo = sd->method()->method_data(); - if (mdo != NULL) { - mdo->reset_start_counters(); - } - if (sd->is_top()) break; - } -} - -nmethod* SimpleThresholdPolicy::event(const methodHandle& method, const methodHandle& inlinee, - int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread) { - if (comp_level == CompLevel_none && - JvmtiExport::can_post_interpreter_events() && - thread->is_interp_only_mode()) { - return NULL; - } - if (CompileTheWorld || ReplayCompiles) { - // Don't trigger other compiles in testing mode - return NULL; - } - - handle_counter_overflow(method()); - if (method() != inlinee()) { - handle_counter_overflow(inlinee()); - } - - if (PrintTieredEvents) { - print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level); - } - - if (bci == InvocationEntryBci) { - method_invocation_event(method, inlinee, comp_level, nm, thread); - } else { - // method == inlinee if the event originated in the main method - method_back_branch_event(method, inlinee, bci, comp_level, nm, thread); - // Check if event led to a higher level OSR compilation - nmethod* osr_nm = inlinee->lookup_osr_nmethod_for(bci, comp_level, false); - if (osr_nm != NULL && osr_nm->comp_level() > comp_level) { - // Perform OSR with new nmethod - return osr_nm; - } - } - return NULL; -} - -// Check if the method can be compiled, change level if necessary -void SimpleThresholdPolicy::compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { - assert(level <= TieredStopAtLevel, "Invalid compilation level"); - if (level == CompLevel_none) { - return; - } - if (level == CompLevel_aot) { - if (mh->has_aot_code()) { - if (PrintTieredEvents) { - print_event(COMPILE, mh, mh, bci, level); - } - MutexLocker ml(Compile_lock); - NoSafepointVerifier nsv; - if (mh->has_aot_code() && mh->code() != mh->aot_code()) { - mh->aot_code()->make_entrant(); - if (mh->has_compiled_code()) { - mh->code()->make_not_entrant(); - } - Method::set_code(mh, mh->aot_code()); - } - } - return; - } - - // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling - // in the interpreter and then compile with C2 (the transition function will request that, - // see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with - // pure C1. - if (!can_be_compiled(mh, level)) { - if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) { - compile(mh, bci, CompLevel_simple, thread); - } - return; - } - if (bci != InvocationEntryBci && mh->is_not_osr_compilable(level)) { - return; - } - if (!CompileBroker::compilation_is_in_queue(mh)) { - if (PrintTieredEvents) { - print_event(COMPILE, mh, mh, bci, level); - } - submit_compile(mh, bci, level, thread); - } -} - -// Update the rate and submit compile -void SimpleThresholdPolicy::submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread) { - int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count(); - update_rate(os::javaTimeMillis(), mh()); - CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread); -} - -// Print an event. -void SimpleThresholdPolicy::print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, - int bci, CompLevel level) { - tty->print(" rate="); - if (mh->prev_time() == 0) tty->print("n/a"); - else tty->print("%f", mh->rate()); - - tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback), - threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback)); - -} - -// update_rate() is called from select_task() while holding a compile queue lock. -void SimpleThresholdPolicy::update_rate(jlong t, Method* m) { - // Skip update if counters are absent. - // Can't allocate them since we are holding compile queue lock. - if (m->method_counters() == NULL) return; - - if (is_old(m)) { - // We don't remove old methods from the queue, - // so we can just zero the rate. - m->set_rate(0); - return; - } - - // We don't update the rate if we've just came out of a safepoint. - // delta_s is the time since last safepoint in milliseconds. - jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); - jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement - // How many events were there since the last time? - int event_count = m->invocation_count() + m->backedge_count(); - int delta_e = event_count - m->prev_event_count(); - - // We should be running for at least 1ms. - if (delta_s >= TieredRateUpdateMinTime) { - // And we must've taken the previous point at least 1ms before. - if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) { - m->set_prev_time(t); - m->set_prev_event_count(event_count); - m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond - } else { - if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) { - // If nothing happened for 25ms, zero the rate. Don't modify prev values. - m->set_rate(0); - } - } - } -} - -// Check if this method has been stale from a given number of milliseconds. -// See select_task(). -bool SimpleThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) { - jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); - jlong delta_t = t - m->prev_time(); - if (delta_t > timeout && delta_s > timeout) { - int event_count = m->invocation_count() + m->backedge_count(); - int delta_e = event_count - m->prev_event_count(); - // Return true if there were no events. - return delta_e == 0; - } - return false; -} - -// We don't remove old methods from the compile queue even if they have -// very low activity. See select_task(). -bool SimpleThresholdPolicy::is_old(Method* method) { - return method->invocation_count() > 50000 || method->backedge_count() > 500000; -} - -double SimpleThresholdPolicy::weight(Method* method) { - return (double)(method->rate() + 1) * - (method->invocation_count() + 1) * (method->backedge_count() + 1); -} - -// Apply heuristics and return true if x should be compiled before y -bool SimpleThresholdPolicy::compare_methods(Method* x, Method* y) { - if (x->highest_comp_level() > y->highest_comp_level()) { - // recompilation after deopt - return true; - } else - if (x->highest_comp_level() == y->highest_comp_level()) { - if (weight(x) > weight(y)) { - return true; - } - } - return false; -} - -// Is method profiled enough? -bool SimpleThresholdPolicy::is_method_profiled(Method* method) { - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - int i = mdo->invocation_count_delta(); - int b = mdo->backedge_count_delta(); - return call_predicate_helper<CompLevel_full_profile>(i, b, 1, method); - } - return false; -} - -double SimpleThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) { - double queue_size = CompileBroker::queue_size(level); - int comp_count = compiler_count(level); - double k = queue_size / (feedback_k * comp_count) + 1; - - // Increase C1 compile threshold when the code cache is filled more - // than specified by IncreaseFirstTierCompileThresholdAt percentage. - // The main intention is to keep enough free space for C2 compiled code - // to achieve peak performance if the code cache is under stress. - if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) { - double current_reverse_free_ratio = CodeCache::reverse_free_ratio(CodeCache::get_code_blob_type(level)); - if (current_reverse_free_ratio > _increase_threshold_at_ratio) { - k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio); - } - } - return k; -} - -// Call and loop predicates determine whether a transition to a higher -// compilation level should be performed (pointers to predicate functions -// are passed to common()). -// Tier?LoadFeedback is basically a coefficient that determines of -// how many methods per compiler thread can be in the queue before -// the threshold values double. -bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) { - switch(cur_level) { - case CompLevel_aot: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return loop_predicate_helper<CompLevel_aot>(i, b, k, method); - } - case CompLevel_none: - case CompLevel_limited_profile: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return loop_predicate_helper<CompLevel_none>(i, b, k, method); - } - case CompLevel_full_profile: { - double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); - return loop_predicate_helper<CompLevel_full_profile>(i, b, k, method); - } - default: - return true; - } -} - -bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) { - switch(cur_level) { - case CompLevel_aot: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return call_predicate_helper<CompLevel_aot>(i, b, k, method); - } - case CompLevel_none: - case CompLevel_limited_profile: { - double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); - return call_predicate_helper<CompLevel_none>(i, b, k, method); - } - case CompLevel_full_profile: { - double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback); - return call_predicate_helper<CompLevel_full_profile>(i, b, k, method); - } - default: - return true; - } -} - -// Determine is a method is mature. -bool SimpleThresholdPolicy::is_mature(Method* method) { - if (is_trivial(method)) return true; - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - int i = mdo->invocation_count(); - int b = mdo->backedge_count(); - double k = ProfileMaturityPercentage / 100.0; - return call_predicate_helper<CompLevel_full_profile>(i, b, k, method) || - loop_predicate_helper<CompLevel_full_profile>(i, b, k, method); - } - return false; -} - -// If a method is old enough and is still in the interpreter we would want to -// start profiling without waiting for the compiled method to arrive. -// We also take the load on compilers into the account. -bool SimpleThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) { - if (cur_level == CompLevel_none && - CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { - int i = method->invocation_count(); - int b = method->backedge_count(); - double k = Tier0ProfilingStartPercentage / 100.0; - return call_predicate_helper<CompLevel_none>(i, b, k, method) || loop_predicate_helper<CompLevel_none>(i, b, k, method); - } - return false; -} - -// Inlining control: if we're compiling a profiled method with C1 and the callee -// is known to have OSRed in a C2 version, don't inline it. -bool SimpleThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) { - CompLevel comp_level = (CompLevel)env->comp_level(); - if (comp_level == CompLevel_full_profile || - comp_level == CompLevel_limited_profile) { - return callee->highest_osr_comp_level() == CompLevel_full_optimization; - } - return false; -} - -// Create MDO if necessary. -void SimpleThresholdPolicy::create_mdo(const methodHandle& mh, JavaThread* THREAD) { - if (mh->is_native() || - mh->is_abstract() || - mh->is_accessor() || - mh->is_constant_getter()) { - return; - } - if (mh->method_data() == NULL) { - Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR); - } -} - - -/* - * Method states: - * 0 - interpreter (CompLevel_none) - * 1 - pure C1 (CompLevel_simple) - * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile) - * 3 - C1 with full profiling (CompLevel_full_profile) - * 4 - C2 (CompLevel_full_optimization) - * - * Common state transition patterns: - * a. 0 -> 3 -> 4. - * The most common path. But note that even in this straightforward case - * profiling can start at level 0 and finish at level 3. - * - * b. 0 -> 2 -> 3 -> 4. - * This case occurs when the load on C2 is deemed too high. So, instead of transitioning - * into state 3 directly and over-profiling while a method is in the C2 queue we transition to - * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs. - * - * c. 0 -> (3->2) -> 4. - * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough - * to enable the profiling to fully occur at level 0. In this case we change the compilation level - * of the method to 2 while the request is still in-queue, because it'll allow it to run much faster - * without full profiling while c2 is compiling. - * - * d. 0 -> 3 -> 1 or 0 -> 2 -> 1. - * After a method was once compiled with C1 it can be identified as trivial and be compiled to - * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1. - * - * e. 0 -> 4. - * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter) - * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because - * the compiled version already exists). - * - * Note that since state 0 can be reached from any other state via deoptimization different loops - * are possible. - * - */ - -// Common transition function. Given a predicate determines if a method should transition to another level. -CompLevel SimpleThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) { - CompLevel next_level = cur_level; - int i = method->invocation_count(); - int b = method->backedge_count(); - - if (is_trivial(method)) { - next_level = CompLevel_simple; - } else { - switch(cur_level) { - default: break; - case CompLevel_aot: { - // If we were at full profile level, would we switch to full opt? - if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { - next_level = CompLevel_full_optimization; - } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - (this->*p)(i, b, cur_level, method))) { - next_level = CompLevel_full_profile; - } - } - break; - case CompLevel_none: - // If we were at full profile level, would we switch to full opt? - if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { - next_level = CompLevel_full_optimization; - } else if ((this->*p)(i, b, cur_level, method)) { -#if INCLUDE_JVMCI - if (EnableJVMCI && UseJVMCICompiler) { - // Since JVMCI takes a while to warm up, its queue inevitably backs up during - // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root - // compilation method and all potential inlinees have mature profiles (which - // includes type profiling). If it sees immature profiles, JVMCI's inliner - // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to - // exploring/inlining too many graphs). Since a rewrite of the inliner is - // in progress, we simply disable the dialing back heuristic for now and will - // revisit this decision once the new inliner is completed. - next_level = CompLevel_full_profile; - } else -#endif - { - // C1-generated fully profiled code is about 30% slower than the limited profile - // code that has only invocation and backedge counters. The observation is that - // if C2 queue is large enough we can spend too much time in the fully profiled code - // while waiting for C2 to pick the method from the queue. To alleviate this problem - // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long - // we choose to compile a limited profiled version and then recompile with full profiling - // when the load on C2 goes down. - if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > - Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { - next_level = CompLevel_limited_profile; - } else { - next_level = CompLevel_full_profile; - } - } - } - break; - case CompLevel_limited_profile: - if (is_method_profiled(method)) { - // Special case: we got here because this method was fully profiled in the interpreter. - next_level = CompLevel_full_optimization; - } else { - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - if (mdo->would_profile()) { - if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - (this->*p)(i, b, cur_level, method))) { - next_level = CompLevel_full_profile; - } - } else { - next_level = CompLevel_full_optimization; - } - } else { - // If there is no MDO we need to profile - if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= - Tier3DelayOff * compiler_count(CompLevel_full_optimization) && - (this->*p)(i, b, cur_level, method))) { - next_level = CompLevel_full_profile; - } - } - } - break; - case CompLevel_full_profile: - { - MethodData* mdo = method->method_data(); - if (mdo != NULL) { - if (mdo->would_profile()) { - int mdo_i = mdo->invocation_count_delta(); - int mdo_b = mdo->backedge_count_delta(); - if ((this->*p)(mdo_i, mdo_b, cur_level, method)) { - next_level = CompLevel_full_optimization; - } - } else { - next_level = CompLevel_full_optimization; - } - } - } - break; - } - } - return MIN2(next_level, (CompLevel)TieredStopAtLevel); -} - -// Determine if a method should be compiled with a normal entry point at a different level. -CompLevel SimpleThresholdPolicy::call_event(Method* method, CompLevel cur_level, JavaThread * thread) { - CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(), - common(&SimpleThresholdPolicy::loop_predicate, method, cur_level, true)); - CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level); - - // If OSR method level is greater than the regular method level, the levels should be - // equalized by raising the regular method level in order to avoid OSRs during each - // invocation of the method. - if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) { - MethodData* mdo = method->method_data(); - guarantee(mdo != NULL, "MDO should not be NULL"); - if (mdo->invocation_count() >= 1) { - next_level = CompLevel_full_optimization; - } - } else { - next_level = MAX2(osr_level, next_level); - } -#if INCLUDE_JVMCI - if (UseJVMCICompiler) { - next_level = JVMCIRuntime::adjust_comp_level(method, false, next_level, thread); - } -#endif - return next_level; -} - -// Determine if we should do an OSR compilation of a given method. -CompLevel SimpleThresholdPolicy::loop_event(Method* method, CompLevel cur_level, JavaThread* thread) { - CompLevel next_level = common(&SimpleThresholdPolicy::loop_predicate, method, cur_level, true); - if (cur_level == CompLevel_none) { - // If there is a live OSR method that means that we deopted to the interpreter - // for the transition. - CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level); - if (osr_level > CompLevel_none) { - return osr_level; - } - } -#if INCLUDE_JVMCI - if (UseJVMCICompiler) { - next_level = JVMCIRuntime::adjust_comp_level(method, true, next_level, thread); - } -#endif - return next_level; -} - -bool SimpleThresholdPolicy::maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) { - if (UseAOT && !delay_compilation_during_startup()) { - if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) { - // If the current level is full profile or interpreter and we're switching to any other level, - // activate the AOT code back first so that we won't waste time overprofiling. - compile(mh, InvocationEntryBci, CompLevel_aot, thread); - // Fall through for JIT compilation. - } - if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) { - // If the next level is limited profile, use the aot code (if there is any), - // since it's essentially the same thing. - compile(mh, InvocationEntryBci, CompLevel_aot, thread); - // Not need to JIT, we're done. - return true; - } - } - return false; -} - - -// Handle the invocation event. -void SimpleThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, - CompLevel level, CompiledMethod* nm, JavaThread* thread) { - if (should_create_mdo(mh(), level)) { - create_mdo(mh, thread); - } - CompLevel next_level = call_event(mh(), level, thread); - if (next_level != level) { - if (maybe_switch_to_aot(mh, level, next_level, thread)) { - // No JITting necessary - return; - } - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, next_level, thread); - } - } -} - -// Handle the back branch event. Notice that we can compile the method -// with a regular entry from here. -void SimpleThresholdPolicy::method_back_branch_event(const methodHandle& mh, const methodHandle& imh, - int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread) { - if (should_create_mdo(mh(), level)) { - create_mdo(mh, thread); - } - // Check if MDO should be created for the inlined method - if (should_create_mdo(imh(), level)) { - create_mdo(imh, thread); - } - - if (is_compilation_enabled()) { - CompLevel next_osr_level = loop_event(imh(), level, thread); - CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level(); - // At the very least compile the OSR version - if (!CompileBroker::compilation_is_in_queue(imh) && (next_osr_level != level)) { - compile(imh, bci, next_osr_level, thread); - } - - // Use loop event as an opportunity to also check if there's been - // enough calls. - CompLevel cur_level, next_level; - if (mh() != imh()) { // If there is an enclosing method - if (level == CompLevel_aot) { - // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling. - if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread); - } - } else { - // Current loop event level is not AOT - guarantee(nm != NULL, "Should have nmethod here"); - cur_level = comp_level(mh()); - next_level = call_event(mh(), cur_level, thread); - - if (max_osr_level == CompLevel_full_optimization) { - // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts - bool make_not_entrant = false; - if (nm->is_osr_method()) { - // This is an osr method, just make it not entrant and recompile later if needed - make_not_entrant = true; - } else { - if (next_level != CompLevel_full_optimization) { - // next_level is not full opt, so we need to recompile the - // enclosing method without the inlinee - cur_level = CompLevel_none; - make_not_entrant = true; - } - } - if (make_not_entrant) { - if (PrintTieredEvents) { - int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; - print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); - } - nm->make_not_entrant(); - } - } - // Fix up next_level if necessary to avoid deopts - if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { - next_level = CompLevel_full_profile; - } - if (cur_level != next_level) { - if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, next_level, thread); - } - } - } - } else { - cur_level = comp_level(mh()); - next_level = call_event(mh(), cur_level, thread); - if (next_level != cur_level) { - if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { - compile(mh, InvocationEntryBci, next_level, thread); - } - } - } - } -} - -#endif
--- a/src/hotspot/share/runtime/simpleThresholdPolicy.hpp Thu Aug 09 22:06:11 2018 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,277 +0,0 @@ -/* - * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_HPP -#define SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_HPP - -#include "code/nmethod.hpp" -#include "oops/methodData.hpp" -#include "runtime/compilationPolicy.hpp" -#include "utilities/globalDefinitions.hpp" - -#ifdef TIERED - -class CompileTask; -class CompileQueue; -/* - * The system supports 5 execution levels: - * * level 0 - interpreter - * * level 1 - C1 with full optimization (no profiling) - * * level 2 - C1 with invocation and backedge counters - * * level 3 - C1 with full profiling (level 2 + MDO) - * * level 4 - C2 - * - * Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters - * (invocation counters and backedge counters). The frequency of these notifications is - * different at each level. These notifications are used by the policy to decide what transition - * to make. - * - * Execution starts at level 0 (interpreter), then the policy can decide either to compile the - * method at level 3 or level 2. The decision is based on the following factors: - * 1. The length of the C2 queue determines the next level. The observation is that level 2 - * is generally faster than level 3 by about 30%, therefore we would want to minimize the time - * a method spends at level 3. We should only spend the time at level 3 that is necessary to get - * adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to - * level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile - * request makes its way through the long queue. When the load on C2 recedes we are going to - * recompile at level 3 and start gathering profiling information. - * 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce - * additional filtering if the compiler is overloaded. The rationale is that by the time a - * method gets compiled it can become unused, so it doesn't make sense to put too much onto the - * queue. - * - * After profiling is completed at level 3 the transition is made to level 4. Again, the length - * of the C2 queue is used as a feedback to adjust the thresholds. - * - * After the first C1 compile some basic information is determined about the code like the number - * of the blocks and the number of the loops. Based on that it can be decided that a method - * is trivial and compiling it with C1 will yield the same code. In this case the method is - * compiled at level 1 instead of 4. - * - * We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of - * the code and the C2 queue is sufficiently small we can decide to start profiling in the - * interpreter (and continue profiling in the compiled code once the level 3 version arrives). - * If the profiling at level 0 is fully completed before level 3 version is produced, a level 2 - * version is compiled instead in order to run faster waiting for a level 4 version. - * - * Compile queues are implemented as priority queues - for each method in the queue we compute - * the event rate (the number of invocation and backedge counter increments per unit of time). - * When getting an element off the queue we pick the one with the largest rate. Maintaining the - * rate also allows us to remove stale methods (the ones that got on the queue but stopped - * being used shortly after that). -*/ - -/* Command line options: - * - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method - * invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread - * makes a call into the runtime. - * - * - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control - * compilation thresholds. - * Level 2 thresholds are not used and are provided for option-compatibility and potential future use. - * Other thresholds work as follows: - * - * Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when - * the following predicate is true (X is the level): - * - * i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s), - * - * where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling - * coefficient that will be discussed further. - * The intuition is to equalize the time that is spend profiling each method. - * The same predicate is used to control the transition from level 3 to level 4 (C2). It should be - * noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come - * from Method* and for 3->4 transition they come from MDO (since profiled invocations are - * counted separately). Finally, if a method does not contain anything worth profiling, a transition - * from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than - * what is specified by Tier4InvocationThreshold). - * - * OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates. - * - * - Tier?LoadFeedback options are used to automatically scale the predicates described above depending - * on the compiler load. The scaling coefficients are computed as follows: - * - * s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1, - * - * where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X - * is the number of level X compiler threads. - * - * Basically these parameters describe how many methods should be in the compile queue - * per compiler thread before the scaling coefficient increases by one. - * - * This feedback provides the mechanism to automatically control the flow of compilation requests - * depending on the machine speed, mutator load and other external factors. - * - * - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop. - * Consider the following observation: a method compiled with full profiling (level 3) - * is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO). - * Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue - * gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues - * executing at level 3 for much longer time than is required by the predicate and at suboptimal speed. - * The idea is to dynamically change the behavior of the system in such a way that if a substantial - * load on C2 is detected we would first do the 0->2 transition allowing a method to run faster. - * And then when the load decreases to allow 2->3 transitions. - * - * Tier3Delay* parameters control this switching mechanism. - * Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy - * no longer does 0->3 transitions but does 0->2 transitions instead. - * Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue - * per compiler thread falls below the specified amount. - * The hysteresis is necessary to avoid jitter. - * - * - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue. - * Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to - * compile from the compile queue, we also can detect stale methods for which the rate has been - * 0 for some time in the same iteration. Stale methods can appear in the queue when an application - * abruptly changes its behavior. - * - * - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick - * to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything - * with pure c1. - * - * - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the - * 0->3 predicate are already exceeded by the given percentage but the level 3 version of the - * method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled - * version in time. This reduces the overall transition to level 4 and decreases the startup time. - * Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long - * these is not reason to start profiling prematurely. - * - * - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation. - * Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered - * to be zero if no events occurred in TieredRateUpdateMaxTime. - */ - -class SimpleThresholdPolicy : public CompilationPolicy { - jlong _start_time; - int _c1_count, _c2_count; - - // Check if the counter is big enough and set carry (effectively infinity). - inline void set_carry_if_necessary(InvocationCounter *counter); - // Set carry flags in the counters (in Method* and MDO). - inline void handle_counter_overflow(Method* method); - // Call and loop predicates determine whether a transition to a higher compilation - // level should be performed (pointers to predicate functions are passed to common_TF(). - // Predicates also take compiler load into account. - typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, Method* method); - bool call_predicate(int i, int b, CompLevel cur_level, Method* method); - bool loop_predicate(int i, int b, CompLevel cur_level, Method* method); - // Common transition function. Given a predicate determines if a method should transition to another level. - CompLevel common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback = false); - // Transition functions. - // call_event determines if a method should be compiled at a different - // level with a regular invocation entry. - CompLevel call_event(Method* method, CompLevel cur_level, JavaThread* thread); - // loop_event checks if a method should be OSR compiled at a different - // level. - CompLevel loop_event(Method* method, CompLevel cur_level, JavaThread* thread); - void print_counters(const char* prefix, const methodHandle& mh); - // Has a method been long around? - // We don't remove old methods from the compile queue even if they have - // very low activity (see select_task()). - inline bool is_old(Method* method); - // Was a given method inactive for a given number of milliseconds. - // If it is, we would remove it from the queue (see select_task()). - inline bool is_stale(jlong t, jlong timeout, Method* m); - // Compute the weight of the method for the compilation scheduling - inline double weight(Method* method); - // Apply heuristics and return true if x should be compiled before y - inline bool compare_methods(Method* x, Method* y); - // Compute event rate for a given method. The rate is the number of event (invocations + backedges) - // per millisecond. - inline void update_rate(jlong t, Method* m); - // Compute threshold scaling coefficient - inline double threshold_scale(CompLevel level, int feedback_k); - // If a method is old enough and is still in the interpreter we would want to - // start profiling without waiting for the compiled method to arrive. This function - // determines whether we should do that. - inline bool should_create_mdo(Method* method, CompLevel cur_level); - // Create MDO if necessary. - void create_mdo(const methodHandle& mh, JavaThread* thread); - // Is method profiled enough? - bool is_method_profiled(Method* method); - - double _increase_threshold_at_ratio; - - bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread); - -protected: - int c1_count() const { return _c1_count; } - int c2_count() const { return _c2_count; } - void set_c1_count(int x) { _c1_count = x; } - void set_c2_count(int x) { _c2_count = x; } - - enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT }; - void print_event(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level); - // Print policy-specific information if necessary - virtual void print_specific(EventType type, const methodHandle& mh, const methodHandle& imh, int bci, CompLevel level); - // Check if the method can be compiled, change level if necessary - void compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread); - // Submit a given method for compilation - virtual void submit_compile(const methodHandle& mh, int bci, CompLevel level, JavaThread* thread); - // Simple methods are as good being compiled with C1 as C2. - // This function tells if it's such a function. - inline bool is_trivial(Method* method); - - // Predicate helpers are used by .*_predicate() methods as well as others. - // They check the given counter values, multiplied by the scale against the thresholds. - template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale, Method* method); - template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale, Method* method); - - // Get a compilation level for a given method. - static CompLevel comp_level(Method* method); - virtual void method_invocation_event(const methodHandle& method, const methodHandle& inlinee, - CompLevel level, CompiledMethod* nm, JavaThread* thread); - virtual void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee, - int bci, CompLevel level, CompiledMethod* nm, JavaThread* thread); - - void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); } - void set_start_time(jlong t) { _start_time = t; } - jlong start_time() const { return _start_time; } - -public: - SimpleThresholdPolicy() : _start_time(0), _c1_count(0), _c2_count(0) { } - virtual int compiler_count(CompLevel comp_level) { - if (is_c1_compile(comp_level)) return c1_count(); - if (is_c2_compile(comp_level)) return c2_count(); - return 0; - } - virtual CompLevel initial_compile_level() { return MIN2((CompLevel)TieredStopAtLevel, CompLevel_initial_compile); } - virtual void do_safepoint_work() { } - virtual void delay_compilation(Method* method) { } - virtual void disable_compilation(Method* method) { } - virtual void reprofile(ScopeDesc* trap_scope, bool is_osr); - virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, - int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, JavaThread* thread); - // Select task is called by CompileBroker. We should return a task or NULL. - virtual CompileTask* select_task(CompileQueue* compile_queue); - // Tell the runtime if we think a given method is adequately profiled. - virtual bool is_mature(Method* method); - // Initialize: set compiler thread count - virtual void initialize(); - virtual bool should_not_inline(ciEnv* env, ciMethod* callee); -}; - -#endif // TIERED - -#endif // SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_HPP
--- a/src/hotspot/share/runtime/simpleThresholdPolicy.inline.hpp Thu Aug 09 22:06:11 2018 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,108 +0,0 @@ -/* - * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP -#define SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP - -#include "compiler/compilerOracle.hpp" -#include "oops/method.inline.hpp" - -#ifdef TIERED - -template<CompLevel level> -bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) { - double threshold_scaling; - if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) { - scale *= threshold_scaling; - } - switch(level) { - case CompLevel_aot: - return (i >= Tier3AOTInvocationThreshold * scale) || - (i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale); - case CompLevel_none: - case CompLevel_limited_profile: - return (i >= Tier3InvocationThreshold * scale) || - (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale); - case CompLevel_full_profile: - return (i >= Tier4InvocationThreshold * scale) || - (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale); - } - return true; -} - -template<CompLevel level> -bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) { - double threshold_scaling; - if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) { - scale *= threshold_scaling; - } - switch(level) { - case CompLevel_aot: - return b >= Tier3AOTBackEdgeThreshold * scale; - case CompLevel_none: - case CompLevel_limited_profile: - return b >= Tier3BackEdgeThreshold * scale; - case CompLevel_full_profile: - return b >= Tier4BackEdgeThreshold * scale; - } - return true; -} - -// Simple methods are as good being compiled with C1 as C2. -// Determine if a given method is such a case. -bool SimpleThresholdPolicy::is_trivial(Method* method) { - if (method->is_accessor() || - method->is_constant_getter()) { - return true; - } -#if INCLUDE_JVMCI - if (UseJVMCICompiler) { - AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization); - if (TieredCompilation && comp != NULL && comp->is_trivial(method)) { - return true; - } - } -#endif - if (method->has_loops() || method->code_size() >= 15) { - return false; - } - MethodData* mdo = method->method_data(); - if (mdo != NULL && !mdo->would_profile() && - (method->code_size() < 5 || (mdo->num_blocks() < 4))) { - return true; - } - return false; -} - -inline CompLevel SimpleThresholdPolicy::comp_level(Method* method) { - CompiledMethod *nm = method->code(); - if (nm != NULL && nm->is_in_use()) { - return (CompLevel)nm->comp_level(); - } - return CompLevel_none; -} - -#endif // TIERED - -#endif // SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP
--- a/src/hotspot/share/runtime/thread.cpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/runtime/thread.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -282,7 +282,6 @@ _hashStateW = 273326509; _OnTrap = 0; - _schedctl = NULL; _Stalled = 0; _TypeTag = 0x2BAD; @@ -4802,7 +4801,6 @@ // (List, LOCKBIT:1). We could also add a SUCCBIT or an explicit _succ variable // to provide the usual futile-wakeup optimization. // See RTStt for details. -// * Consider schedctl.sc_nopreempt to cover the critical section. //
--- a/src/hotspot/share/runtime/thread.hpp Thu Aug 09 22:06:11 2018 +0200 +++ b/src/hotspot/share/runtime/thread.hpp Thu Aug 16 22:05:57 2018 +0200 @@ -726,8 +726,6 @@ jint _hashStateX; // thread-specific hashCode generator state jint _hashStateY; jint _hashStateZ; - void * _schedctl; - volatile jint rng[4]; // RNG for spin loop
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/hotspot/share/runtime/tieredThresholdPolicy.cpp Thu Aug 16 22:05:57 2018 +0200 @@ -0,0 +1,978 @@ +/* + * Copyright (c) 2010, 2018, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "compiler/compileBroker.hpp" +#include "compiler/compilerOracle.hpp" +#include "memory/resourceArea.hpp" +#include "runtime/arguments.hpp" +#include "runtime/handles.inline.hpp" +#include "runtime/safepointVerifiers.hpp" +#include "runtime/tieredThresholdPolicy.hpp" +#include "code/scopeDesc.hpp" +#include "oops/method.inline.hpp" +#if INCLUDE_JVMCI +#include "jvmci/jvmciRuntime.hpp" +#endif + +#ifdef TIERED + +template<CompLevel level> +bool TieredThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) { + double threshold_scaling; + if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) { + scale *= threshold_scaling; + } + switch(level) { + case CompLevel_aot: + return (i >= Tier3AOTInvocationThreshold * scale) || + (i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale); + case CompLevel_none: + case CompLevel_limited_profile: + return (i >= Tier3InvocationThreshold * scale) || + (i >= Tier3MinInvocationThreshold * scale && i + b >= Tier3CompileThreshold * scale); + case CompLevel_full_profile: + return (i >= Tier4InvocationThreshold * scale) || + (i >= Tier4MinInvocationThreshold * scale && i + b >= Tier4CompileThreshold * scale); + } + return true; +} + +template<CompLevel level> +bool TieredThresholdPolicy::loop_predicate_helper(int i, int b, double scale, Method* method) { + double threshold_scaling; + if (CompilerOracle::has_option_value(method, "CompileThresholdScaling", threshold_scaling)) { + scale *= threshold_scaling; + } + switch(level) { + case CompLevel_aot: + return b >= Tier3AOTBackEdgeThreshold * scale; + case CompLevel_none: + case CompLevel_limited_profile: + return b >= Tier3BackEdgeThreshold * scale; + case CompLevel_full_profile: + return b >= Tier4BackEdgeThreshold * scale; + } + return true; +} + +// Simple methods are as good being compiled with C1 as C2. +// Determine if a given method is such a case. +bool TieredThresholdPolicy::is_trivial(Method* method) { + if (method->is_accessor() || + method->is_constant_getter()) { + return true; + } +#if INCLUDE_JVMCI + if (UseJVMCICompiler) { + AbstractCompiler* comp = CompileBroker::compiler(CompLevel_full_optimization); + if (TieredCompilation && comp != NULL && comp->is_trivial(method)) { + return true; + } + } +#endif + if (method->has_loops() || method->code_size() >= 15) { + return false; + } + MethodData* mdo = method->method_data(); + if (mdo != NULL && !mdo->would_profile() && + (method->code_size() < 5 || (mdo->num_blocks() < 4))) { + return true; + } + return false; +} + +CompLevel TieredThresholdPolicy::comp_level(Method* method) { + CompiledMethod *nm = method->code(); + if (nm != NULL && nm->is_in_use()) { + return (CompLevel)nm->comp_level(); + } + return CompLevel_none; +} + +void TieredThresholdPolicy::print_counters(const char* prefix, const methodHandle& mh) { + int invocation_count = mh->invocation_count(); + int backedge_count = mh->backedge_count(); + MethodData* mdh = mh->method_data(); + int mdo_invocations = 0, mdo_backedges = 0; + int mdo_invocations_start = 0, mdo_backedges_start = 0; + if (mdh != NULL) { + mdo_invocations = mdh->invocation_count(); + mdo_backedges = mdh->backedge_count(); + mdo_invocations_start = mdh->invocation_count_start(); + mdo_backedges_start = mdh->backedge_count_start(); + } + tty->print(" %stotal=%d,%d %smdo=%d(%d),%d(%d)", prefix, + invocation_count, backedge_count, prefix, + mdo_invocations, mdo_invocations_start, + mdo_backedges, mdo_backedges_start); + tty->print(" %smax levels=%d,%d", prefix, + mh->highest_comp_level(), mh->highest_osr_comp_level()); +} + +// Print an event. +void TieredThresholdPolicy::print_event(EventType type, const methodHandle& mh, const methodHandle& imh, + int bci, CompLevel level) { + bool inlinee_event = mh() != imh(); + + ttyLocker tty_lock; + tty->print("%lf: [", os::elapsedTime()); + + switch(type) { + case CALL: + tty->print("call"); + break; + case LOOP: + tty->print("loop"); + break; + case COMPILE: + tty->print("compile"); + break; + case REMOVE_FROM_QUEUE: + tty->print("remove-from-queue"); + break; + case UPDATE_IN_QUEUE: + tty->print("update-in-queue"); + break; + case REPROFILE: + tty->print("reprofile"); + break; + case MAKE_NOT_ENTRANT: + tty->print("make-not-entrant"); + break; + default: + tty->print("unknown"); + } + + tty->print(" level=%d ", level); + + ResourceMark rm; + char *method_name = mh->name_and_sig_as_C_string(); + tty->print("[%s", method_name); + if (inlinee_event) { + char *inlinee_name = imh->name_and_sig_as_C_string();<