changeset 51098:e2acd1ba1ee5

Merge
author prr
date Wed, 06 Jun 2018 09:41:16 -0700
parents f8c15a2f2ae9 64e4b1686141
children 143c539c00dc
files src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.inline.hpp src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.inline.hpp src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.inline.hpp src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.inline.hpp src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.inline.hpp src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp src/hotspot/share/runtime/orderAccess.inline.hpp src/jdk.internal.opt/share/classes/jdk/internal/joptsimple/MissingRequiredOptionException.java src/jdk.internal.opt/share/classes/jdk/internal/joptsimple/UnacceptableNumberOfNonOptionsException.java src/jdk.internal.opt/share/classes/jdk/internal/joptsimple/internal/Objects.java src/jdk.javadoc/share/classes/jdk/javadoc/internal/doclets/formats/html/resources/jquery/jquery-1.10.2.js test/hotspot/jtreg/runtime/appcds/jigsaw/limitmods/LimitModsHelper.java test/hotspot/jtreg/runtime/appcds/jigsaw/limitmods/LimitModsTests.java test/hotspot/jtreg/runtime/appcds/test-classes/jdk/dynalink/DynamicLinker.jasm
diffstat 961 files changed, 86928 insertions(+), 21529 deletions(-) [+]
line wrap: on
line diff
--- a/make/CompileJavaModules.gmk	Mon Jun 04 16:11:21 2018 +0200
+++ b/make/CompileJavaModules.gmk	Wed Jun 06 09:41:16 2018 -0700
@@ -325,6 +325,10 @@
 
 ################################################################################
 
+jdk.internal.opt_COPY += .properties
+
+################################################################################
+
 jdk.jcmd_COPY += _options
 
 ################################################################################
--- a/make/data/currency/CurrencyData.properties	Mon Jun 04 16:11:21 2018 +0200
+++ b/make/data/currency/CurrencyData.properties	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -32,7 +32,7 @@
 # Version of the currency code information in this class.
 # It is a serial number that accompanies with each amendment.
 
-dataVersion=164
+dataVersion=167
 
 # List of all valid ISO 4217 currency codes.
 # To ensure compatibility, do not remove codes.
@@ -47,7 +47,7 @@
     HRK191-HTG332-HUF348-IDR360-IEP372-ILS376-INR356-IQD368-IRR364-ISK352-\
     ITL380-JMD388-JOD400-JPY392-KES404-KGS417-KHR116-KMF174-KPW408-KRW410-\
     KWD414-KYD136-KZT398-LAK418-LBP422-LKR144-LRD430-LSL426-LTL440-LUF442-\
-    LVL428-LYD434-MAD504-MDL498-MGA969-MGF450-MKD807-MMK104-MNT496-MOP446-MRO478-\
+    LVL428-LYD434-MAD504-MDL498-MGA969-MGF450-MKD807-MMK104-MNT496-MOP446-MRO478-MRU929-\
     MTL470-MUR480-MVR462-MWK454-MXN484-MXV979-MYR458-MZM508-MZN943-NAD516-NGN566-\
     NIO558-NLG528-NOK578-NPR524-NZD554-OMR512-PAB590-PEN604-PGK598-PHP608-\
     PKR586-PLN985-PTE620-PYG600-QAR634-ROL946-RON946-RSD941-RUB643-RUR810-RWF646-SAR682-\
@@ -324,7 +324,7 @@
 # LAO PEOPLE'S DEMOCRATIC REPUBLIC (THE)
 LA=LAK
 # LATVIA
-LV=LVL;2013-12-31-22-00-00;EUR
+LV=EUR
 # LEBANON
 LB=LBP
 # LESOTHO
@@ -336,7 +336,7 @@
 # LIECHTENSTEIN
 LI=CHF
 # LITHUANIA
-LT=LTL;2014-12-31-22-00-00;EUR
+LT=EUR
 # LUXEMBOURG
 LU=EUR
 # MACAU
@@ -360,7 +360,7 @@
 # MARTINIQUE
 MQ=EUR
 # MAURITANIA
-MR=MRO
+MR=MRU
 # MAURITIUS
 MU=MUR
 # MAYOTTE
--- a/make/data/lsrdata/language-subtag-registry.txt	Mon Jun 04 16:11:21 2018 +0200
+++ b/make/data/lsrdata/language-subtag-registry.txt	Wed Jun 06 09:41:16 2018 -0700
@@ -1,4 +1,4 @@
-File-Date: 2017-08-15
+File-Date: 2018-04-23
 %%
 Type: language
 Subtag: aa
@@ -378,6 +378,7 @@
 Description: Armenian
 Added: 2005-10-16
 Suppress-Script: Armn
+Comments: see also hyw
 %%
 Type: language
 Subtag: hz
@@ -525,6 +526,7 @@
 %%
 Type: language
 Subtag: km
+Description: Khmer
 Description: Central Khmer
 Added: 2005-10-16
 Suppress-Script: Khmr
@@ -957,6 +959,7 @@
 Description: Serbian
 Added: 2005-10-16
 Macrolanguage: sh
+Comments: see cnr for Montenegrin
 %%
 Type: language
 Subtag: ss
@@ -1531,6 +1534,7 @@
 %%
 Type: language
 Subtag: add
+Description: Lidzonka
 Description: Dzodinka
 Added: 2009-07-29
 %%
@@ -2114,7 +2118,7 @@
 %%
 Type: language
 Subtag: aja
-Description: Aja (Sudan)
+Description: Aja (South Sudan)
 Added: 2009-07-29
 %%
 Type: language
@@ -3097,6 +3101,7 @@
 %%
 Type: language
 Subtag: asf
+Description: Auslan
 Description: Australian Sign Language
 Added: 2009-07-29
 %%
@@ -4240,7 +4245,7 @@
 %%
 Type: language
 Subtag: bdh
-Description: Baka (Sudan)
+Description: Baka (South Sudan)
 Added: 2009-07-29
 %%
 Type: language
@@ -4250,6 +4255,7 @@
 %%
 Type: language
 Subtag: bdj
+Description: Bai (South Sudan)
 Description: Bai
 Added: 2009-07-29
 %%
@@ -5293,7 +5299,7 @@
 %%
 Type: language
 Subtag: blm
-Description: Beli (Sudan)
+Description: Beli (South Sudan)
 Added: 2009-07-29
 %%
 Type: language
@@ -8104,6 +8110,13 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: cnr
+Description: Montenegrin
+Added: 2018-01-23
+Macrolanguage: sh
+Comments: see sr for Serbian
+%%
+Type: language
 Subtag: cns
 Description: Central Asmat
 Added: 2009-07-29
@@ -8768,6 +8781,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: cuy
+Description: Cuitlatec
+Added: 2018-03-08
+%%
+Type: language
 Subtag: cvg
 Description: Chug
 Added: 2009-07-29
@@ -11089,7 +11107,7 @@
 %%
 Type: language
 Subtag: fap
-Description: Palor
+Description: Paloor
 Added: 2009-07-29
 %%
 Type: language
@@ -12282,6 +12300,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: gkd
+Description: Magɨ (Madang Province)
+Added: 2018-03-08
+%%
+Type: language
 Subtag: gke
 Description: Ndai
 Added: 2009-07-29
@@ -12494,6 +12517,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: gnj
+Description: Ngen
+Added: 2018-03-08
+%%
+Type: language
 Subtag: gnk
 Description: //Gana
 Description: ǁGana
@@ -13224,6 +13252,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: gyo
+Description: Gyalsumdo
+Added: 2018-03-08
+%%
+Type: language
 Subtag: gyr
 Description: Guarayu
 Added: 2009-07-29
@@ -13584,6 +13617,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: hkn
+Description: Mel-Khaonh
+Added: 2018-03-08
+%%
+Type: language
 Subtag: hks
 Description: Hong Kong Sign Language
 Description: Heung Kong Sau Yue
@@ -14238,6 +14276,12 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: hyw
+Description: Western Armenian
+Added: 2018-03-08
+Comments: see also hy
+%%
+Type: language
 Subtag: hyx
 Description: Armenian (family)
 Added: 2009-07-29
@@ -14860,6 +14904,7 @@
 %%
 Type: language
 Subtag: iri
+Description: Rigwe
 Description: Irigwe
 Added: 2009-07-29
 %%
@@ -20313,7 +20358,7 @@
 %%
 Type: language
 Subtag: lno
-Description: Lango (Sudan)
+Description: Lango (South Sudan)
 Added: 2009-07-29
 %%
 Type: language
@@ -20579,6 +20624,7 @@
 Subtag: lsg
 Description: Lyons Sign Language
 Added: 2009-07-29
+Deprecated: 2018-03-08
 %%
 Type: language
 Subtag: lsh
@@ -20850,6 +20896,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: lws
+Description: Malawian Sign Language
+Added: 2018-03-08
+%%
+Type: language
 Subtag: lwt
 Description: Lewotobi
 Added: 2009-07-29
@@ -20904,6 +20955,7 @@
 Subtag: maa
 Description: San Jerónimo Tecóatl Mazatec
 Added: 2009-07-29
+Comments: see also pbm
 %%
 Type: language
 Subtag: mab
@@ -23799,11 +23851,13 @@
 Subtag: mwx
 Description: Mediak
 Added: 2009-07-29
+Deprecated: 2018-03-08
 %%
 Type: language
 Subtag: mwy
 Description: Mosiro
 Added: 2009-07-29
+Deprecated: 2018-03-08
 %%
 Type: language
 Subtag: mwz
@@ -24527,6 +24581,8 @@
 Subtag: ncp
 Description: Ndaktup
 Added: 2009-07-29
+Deprecated: 2018-03-08
+Preferred-Value: kdz
 %%
 Type: language
 Subtag: ncq
@@ -25458,6 +25514,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: nlm
+Description: Mankiyali
+Added: 2018-03-08
+%%
+Type: language
 Subtag: nln
 Description: Durango Nahuatl
 Added: 2009-07-29
@@ -26693,6 +26754,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: nzd
+Description: Nzadi
+Added: 2018-03-08
+%%
+Type: language
 Subtag: nzi
 Description: Nzima
 Added: 2005-10-16
@@ -27757,6 +27823,12 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: pbm
+Description: Puebla Mazatec
+Added: 2018-03-08
+Comments: see also maa
+%%
+Type: language
 Subtag: pbn
 Description: Kpasam
 Added: 2009-07-29
@@ -30902,6 +30974,7 @@
 %%
 Type: language
 Subtag: scp
+Description: Hyolmo
 Description: Helambu Sherpa
 Added: 2009-07-29
 %%
@@ -33049,6 +33122,7 @@
 %%
 Type: language
 Subtag: sxg
+Description: Shuhi
 Description: Shixing
 Added: 2009-07-29
 %%
@@ -33835,6 +33909,11 @@
 Added: 2009-07-29
 %%
 Type: language
+Subtag: tez
+Description: Tetserret
+Added: 2018-03-08
+%%
+Type: language
 Subtag: tfi
 Description: Tofin Gbe
 Added: 2009-07-29
@@ -34399,7 +34478,7 @@
 Type: language
 Subtag: tlh
 Description: Klingon
-Description: tlhIngan-Hol
+Description: tlhIngan Hol
 Added: 2005-10-16
 %%
 Type: language
@@ -42199,6 +42278,7 @@
 %%
 Type: extlang
 Subtag: asf
+Description: Auslan
 Description: Australian Sign Language
 Added: 2009-07-29
 Preferred-Value: asf
@@ -42927,7 +43007,7 @@
 Subtag: lsg
 Description: Lyons Sign Language
 Added: 2009-07-29
-Preferred-Value: lsg
+Deprecated: 2018-03-08
 Prefix: sgn
 %%
 Type: extlang
@@ -42983,6 +43063,13 @@
 Macrolanguage: lv
 %%
 Type: extlang
+Subtag: lws
+Description: Malawian Sign Language
+Added: 2018-03-08
+Preferred-Value: lws
+Prefix: sgn
+%%
+Type: extlang
 Subtag: lzh
 Description: Literary Chinese
 Added: 2009-07-29
@@ -44493,6 +44580,11 @@
 Added: 2006-10-17
 %%
 Type: script
+Subtag: Rohg
+Description: Hanifi Rohingya
+Added: 2017-12-13
+%%
+Type: script
 Subtag: Roro
 Description: Rongorongo
 Added: 2005-10-16
@@ -44563,6 +44655,16 @@
 Added: 2005-10-16
 %%
 Type: script
+Subtag: Sogd
+Description: Sogdian
+Added: 2017-12-13
+%%
+Type: script
+Subtag: Sogo
+Description: Old Sogdian
+Added: 2017-12-13
+%%
+Type: script
 Subtag: Sora
 Description: Sora Sompeng
 Added: 2011-01-07
@@ -46412,15 +46514,26 @@
   not brought into effect until 2009
 %%
 Type: variant
+Subtag: aranes
+Description: Aranese
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in the Val d'Aran
+%%
+Type: variant
 Subtag: arevela
 Description: Eastern Armenian
 Added: 2006-09-18
+Deprecated: 2018-03-24
+Preferred-Value: hy
 Prefix: hy
 %%
 Type: variant
 Subtag: arevmda
 Description: Western Armenian
 Added: 2006-09-18
+Deprecated: 2018-03-24
+Preferred-Value: hyw
 Prefix: hy
 %%
 Type: variant
@@ -46431,6 +46544,13 @@
 Prefix: tw
 %%
 Type: variant
+Subtag: auvern
+Description: Auvergnat
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Auvergne
+%%
+Type: variant
 Subtag: baku1926
 Description: Unified Turkic Latin Alphabet (Historical)
 Added: 2007-04-18
@@ -46510,6 +46630,13 @@
 Comments: Jargon embedded in American English
 %%
 Type: variant
+Subtag: cisaup
+Description: Cisalpine
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in northwestern Italy
+%%
+Type: variant
 Subtag: colb1945
 Description: Portuguese-Brazilian Orthographic Convention of 1945
   (Convenção Ortográfica Luso-Brasileira de 1945)
@@ -46528,6 +46655,12 @@
 Prefix: en
 %%
 Type: variant
+Subtag: creiss
+Description: Occitan variants of the Croissant area
+Added: 2018-04-22
+Prefix: oc
+%%
+Type: variant
 Subtag: dajnko
 Description: Slovene in Dajnko alphabet
 Added: 2012-06-27
@@ -46556,6 +46689,11 @@
 Added: 2006-12-11
 %%
 Type: variant
+Subtag: fonkirsh
+Description: Kirshenbaum Phonetic Alphabet
+Added: 2018-04-22
+%%
+Type: variant
 Subtag: fonnapa
 Description: North American Phonetic Alphabet
 Description: Americanist Phonetic Notation
@@ -46573,6 +46711,36 @@
 Comments: Indicates that the content is transcribed according to X-SAMPA
 %%
 Type: variant
+Subtag: gascon
+Description: Gascon
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Gascony
+%%
+Type: variant
+Subtag: grclass
+Description: Classical Occitan orthography
+Added: 2018-04-22
+Prefix: oc
+Comments: Classical written standard for Occitan developed in 1935 by
+  Alibèrt
+%%
+Type: variant
+Subtag: grital
+Description: Italian-inspired Occitan orthography
+Added: 2018-04-22
+Prefix: oc
+%%
+Type: variant
+Subtag: grmistr
+Description: Mistralian or Mistralian-inspired Occitan orthography
+Added: 2018-04-22
+Prefix: oc
+Comments: Written standard developed by Romanilha in 1853 and used by
+  Mistral and the Félibres, including derived standards such as Escolo
+  dóu Po, Escolo Gaston Febus, and others
+%%
+Type: variant
 Subtag: hepburn
 Description: Hepburn romanization
 Added: 2009-10-01
@@ -46617,6 +46785,13 @@
 Prefix: sa
 %%
 Type: variant
+Subtag: ivanchov
+Description: Bulgarian in 1899 orthography
+Added: 2017-12-13
+Prefix: bg
+Comments: Bulgarian orthography introduced by Todor Ivanchov in 1899
+%%
+Type: variant
 Subtag: jauer
 Description: Jauer dialect of Romansh
 Added: 2010-06-29
@@ -46659,6 +46834,20 @@
 Prefix: sa
 %%
 Type: variant
+Subtag: lemosin
+Description: Limousin
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Limousin
+%%
+Type: variant
+Subtag: lengadoc
+Description: Languedocien
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Languedoc
+%%
+Type: variant
 Subtag: lipaw
 Description: The Lipovaz dialect of Resian
 Description: The Lipovec dialect of Resian
@@ -46712,6 +46901,13 @@
 Prefix: en-CA
 %%
 Type: variant
+Subtag: nicard
+Description: Niçard
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Nice
+%%
+Type: variant
 Subtag: njiva
 Description: The Gniva dialect of Resian
 Description: The Njiva dialect of Resian
@@ -46798,6 +46994,13 @@
 Prefix: el
 %%
 Type: variant
+Subtag: provenc
+Description: Provençal
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in Provence
+%%
+Type: variant
 Subtag: puter
 Description: Puter idiom of Romansh
 Added: 2010-06-29
@@ -46959,6 +47162,13 @@
   "idioms" of the Romansh language.
 %%
 Type: variant
+Subtag: vivaraup
+Description: Vivaro-Alpine
+Added: 2018-04-22
+Prefix: oc
+Comments: Occitan variant spoken in northeastern Occitania
+%%
+Type: variant
 Subtag: wadegile
 Description: Wade-Giles romanization
 Added: 2008-10-03
--- a/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk	Mon Jun 04 16:11:21 2018 +0200
+++ b/make/gensrc/Gensrc-jdk.internal.vm.compiler.gmk	Wed Jun 06 09:41:16 2018 -0700
@@ -103,7 +103,7 @@
 $(GENSRC_DIR)/_gensrc_proc_done: $(PROC_SRCS) $(PROCESSOR_JARS)
 	$(call MakeDir, $(@D))
 	$(eval $(call ListPathsSafely,PROC_SRCS,$(@D)/_gensrc_proc_files))
-	$(JAVA_SMALL) $(NEW_JAVAC) \
+	$(JAVA) $(NEW_JAVAC) \
 	    -XDignore.symbol.file \
 	    --upgrade-module-path $(JDK_OUTPUTDIR)/modules --system none \
 	    $(ADD_EXPORTS) \
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Wed Jun 06 09:41:16 2018 -0700
@@ -3792,69 +3792,7 @@
   return false;
 }
 
-// Transform:
-// (AddP base (AddP base address (LShiftL index con)) offset)
-// into:
-// (AddP base (AddP base offset) (LShiftL index con))
-// to take full advantage of ARM's addressing modes
 void Compile::reshape_address(AddPNode* addp) {
-  Node *addr = addp->in(AddPNode::Address);
-  if (addr->is_AddP() && addr->in(AddPNode::Base) == addp->in(AddPNode::Base)) {
-    const AddPNode *addp2 = addr->as_AddP();
-    if ((addp2->in(AddPNode::Offset)->Opcode() == Op_LShiftL &&
-         addp2->in(AddPNode::Offset)->in(2)->is_Con() &&
-         size_fits_all_mem_uses(addp, addp2->in(AddPNode::Offset)->in(2)->get_int())) ||
-        addp2->in(AddPNode::Offset)->Opcode() == Op_ConvI2L) {
-
-      // Any use that can't embed the address computation?
-      for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) {
-        Node* u = addp->fast_out(i);
-        if (!u->is_Mem()) {
-          return;
-        }
-        if (u->is_LoadVector() || u->is_StoreVector() || u->Opcode() == Op_StoreCM) {
-          return;
-        }
-        if (addp2->in(AddPNode::Offset)->Opcode() != Op_ConvI2L) {
-          int scale = 1 << addp2->in(AddPNode::Offset)->in(2)->get_int();
-          if (VM_Version::expensive_load(u->as_Mem()->memory_size(), scale)) {
-            return;
-          }
-        }
-      }
-
-      Node* off = addp->in(AddPNode::Offset);
-      Node* addr2 = addp2->in(AddPNode::Address);
-      Node* base = addp->in(AddPNode::Base);
-
-      Node* new_addr = NULL;
-      // Check whether the graph already has the new AddP we need
-      // before we create one (no GVN available here).
-      for (DUIterator_Fast imax, i = addr2->fast_outs(imax); i < imax; i++) {
-        Node* u = addr2->fast_out(i);
-        if (u->is_AddP() &&
-            u->in(AddPNode::Base) == base &&
-            u->in(AddPNode::Address) == addr2 &&
-            u->in(AddPNode::Offset) == off) {
-          new_addr = u;
-          break;
-        }
-      }
-
-      if (new_addr == NULL) {
-        new_addr = new AddPNode(base, addr2, off);
-      }
-      Node* new_off = addp2->in(AddPNode::Offset);
-      addp->set_req(AddPNode::Address, new_addr);
-      if (addr->outcnt() == 0) {
-        addr->disconnect_inputs(NULL, this);
-      }
-      addp->set_req(AddPNode::Offset, new_off);
-      if (off->outcnt() == 0) {
-        off->disconnect_inputs(NULL, this);
-      }
-    }
-  }
 }
 
 // helper for encoding java_to_runtime calls on sim
--- a/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/c1_CodeStubs_aarch64.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_CodeStubs.hpp"
 #include "c1/c1_FrameMap.hpp"
 #include "c1/c1_LIRAssembler.hpp"
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "asm/assembler.hpp"
 #include "c1/c1_CodeStubs.hpp"
 #include "c1/c1_Compilation.hpp"
--- a/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/c1_LIRGenerator_aarch64.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_FrameMap.hpp"
 #include "c1/c1_Instruction.hpp"
--- a/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/gc/g1/g1BarrierSetAssembler_aarch64.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -29,6 +29,7 @@
 #include "gc/g1/c1/g1BarrierSetC1.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -60,9 +61,9 @@
       __ mov(c_rarg1, count);
     }
     if (UseCompressedOops) {
-      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), 2);
+      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2);
     } else {
-      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), 2);
+      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2);
     }
     __ pop(saved_regs, sp);
   }
@@ -78,7 +79,7 @@
   __ lsr(scratch, scratch, LogBytesPerHeapOop);  // convert to element count
   __ mov(c_rarg0, start);
   __ mov(c_rarg1, scratch);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), 2);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2);
   __ pop(saved_regs, sp);
 }
 
@@ -161,9 +162,9 @@
 
   if (expand_call) {
     assert(pre_val != c_rarg1, "smashed arg");
-    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
   } else {
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
   }
 
   __ pop(saved, sp);
@@ -245,7 +246,7 @@
   // save the live input values
   RegSet saved = RegSet::of(store_addr, new_val);
   __ push(saved, sp);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
   __ pop(saved, sp);
 
   __ bind(done);
@@ -398,7 +399,7 @@
   __ bind(runtime);
   __ push_call_clobbered_registers();
   __ load_parameter(0, pre_val);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
   __ pop_call_clobbered_registers();
   __ bind(done);
 
@@ -468,7 +469,7 @@
 
   __ bind(runtime);
   __ push_call_clobbered_registers();
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
   __ pop_call_clobbered_registers();
   __ bind(done);
   __ epilogue();
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interp_masm_aarch64.hpp"
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -27,7 +27,6 @@
 #define CPU_AARCH64_VM_INTERP_MASM_AARCH64_64_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
 #include "interpreter/invocationCounter.hpp"
 #include "runtime/frame.hpp"
 
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1020,7 +1020,7 @@
   address trampoline_call(Address entry, CodeBuffer *cbuf = NULL);
 
   static bool far_branches() {
-    return ReservedCodeCacheSize > branch_range;
+    return ReservedCodeCacheSize > branch_range || UseAOT;
   }
 
   // Jumps that can reach anywhere in the code cache.
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -234,8 +234,12 @@
   }
 
 #if INCLUDE_AOT
+  // Return true iff a call from instr to target is out of range.
+  // Used for calls from JIT- to AOT-compiled code.
   static bool is_far_call(address instr, address target) {
-    return !Assembler::reachable_from_branch_at(instr, target);
+    // On AArch64 we use trampolines which can reach anywhere in the
+    // address space, so calls are never out of range.
+    return false;
   }
 #endif
 
--- a/src/hotspot/cpu/aarch64/register_definitions_aarch64.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/register_definitions_aarch64.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -25,6 +25,7 @@
 
 #include "precompiled.hpp"
 #include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "asm/register.hpp"
 #include "register_aarch64.hpp"
 # include "interp_masm_aarch64.hpp"
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,7 +24,7 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
--- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,7 +24,7 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
--- a/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/aarch64/vtableStubs_aarch64.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2014, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -24,7 +24,7 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "assembler_aarch64.inline.hpp"
 #include "code/vtableStubs.hpp"
 #include "interp_masm_aarch64.hpp"
--- a/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/c1_CodeStubs_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_CodeStubs.hpp"
 #include "c1/c1_FrameMap.hpp"
 #include "c1/c1_LIRAssembler.hpp"
--- a/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/c1_LIRAssembler_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_LIRAssembler.hpp"
 #include "c1/c1_MacroAssembler.hpp"
--- a/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/c1_LIRGenerator_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_FrameMap.hpp"
 #include "c1/c1_Instruction.hpp"
--- a/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/c1_Runtime1_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Defs.hpp"
 #include "c1/c1_LIRAssembler.hpp"
 #include "c1/c1_MacroAssembler.hpp"
--- a/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/gc/g1/g1BarrierSetAssembler_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -26,6 +26,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
@@ -74,7 +75,7 @@
       __ mov(R0, addr);
     }
 #ifdef AARCH64
-    __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_pre_*_entry takes size_t
+    __ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_pre_*_entry takes size_t
 #else
     if (count != R1) {
       __ mov(R1, count);
@@ -82,9 +83,9 @@
 #endif // AARCH64
 
     if (UseCompressedOops) {
-      __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry));
+      __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry));
     } else {
-      __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry));
+      __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry));
     }
 
 #ifdef AARCH64
@@ -106,7 +107,7 @@
     __ mov(R0, addr);
   }
 #ifdef AARCH64
-  __ zero_extend(R1, count, 32); // G1BarrierSet::write_ref_array_post_entry takes size_t
+  __ zero_extend(R1, count, 32); // G1BarrierSetRuntime::write_ref_array_post_entry takes size_t
 #else
   if (count != R1) {
     __ mov(R1, count);
@@ -120,7 +121,7 @@
   __ push(R9);
 #endif // !R9_IS_SCRATCHED
 #endif // !AARCH64
-  __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry));
+  __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry));
 #ifndef AARCH64
 #if R9_IS_SCRATCHED
   __ pop(R9);
@@ -205,7 +206,7 @@
   }
   __ mov(R1, Rthread);
 
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), R0, R1);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), R0, R1);
 
 #ifdef AARCH64
   if (store_addr != noreg) {
@@ -296,7 +297,7 @@
     __ mov(R0, card_addr);
   }
   __ mov(R1, Rthread);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), R0, R1);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), R0, R1);
 
   __ bind(done);
 }
@@ -467,7 +468,7 @@
 
   assert(r_pre_val_0 == c_rarg0, "pre_val should be in R0");
   __ mov(c_rarg1, Rthread);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, c_rarg1);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), c_rarg0, c_rarg1);
 
   __ restore_live_registers_without_return();
 
@@ -574,7 +575,7 @@
 
   assert(r_card_addr_0 == c_rarg0, "card_addr should be in R0");
   __ mov(c_rarg1, Rthread);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), c_rarg0, c_rarg1);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), c_rarg0, c_rarg1);
 
   __ restore_live_registers_without_return();
 
--- a/src/hotspot/cpu/arm/interp_masm_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/interp_masm_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "jvm.h"
+#include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSet.hpp"
 #include "gc/shared/cardTable.hpp"
 #include "gc/shared/cardTableBarrierSet.inline.hpp"
--- a/src/hotspot/cpu/arm/interp_masm_arm.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/interp_masm_arm.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,7 +26,6 @@
 #define CPU_ARM_VM_INTERP_MASM_ARM_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
 #include "interpreter/invocationCounter.hpp"
 #include "runtime/frame.hpp"
 #include "prims/jvmtiExport.hpp"
--- a/src/hotspot/cpu/arm/interpreterRT_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/interpreterRT_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
--- a/src/hotspot/cpu/arm/relocInfo_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/relocInfo_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
 #include "nativeInst_arm.hpp"
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/safepoint.hpp"
 
 void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
--- a/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/templateInterpreterGenerator_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "interpreter/interpreter.hpp"
--- a/src/hotspot/cpu/arm/templateTable_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/templateTable_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "interpreter/interpreter.hpp"
--- a/src/hotspot/cpu/arm/vtableStubs_arm.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/arm/vtableStubs_arm.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 #include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "assembler_arm.inline.hpp"
 #include "code/vtableStubs.hpp"
 #include "interp_masm_arm.hpp"
--- a/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/ppc/c1_CodeStubs_ppc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_CodeStubs.hpp"
 #include "c1/c1_FrameMap.hpp"
 #include "c1/c1_LIRAssembler.hpp"
--- a/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_LIRAssembler.hpp"
 #include "c1/c1_MacroAssembler.hpp"
--- a/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/ppc/c1_LIRGenerator_ppc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_FrameMap.hpp"
 #include "c1/c1_Instruction.hpp"
--- a/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/ppc/c1_MacroAssembler_ppc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
 #include "classfile/systemDictionary.hpp"
--- a/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/ppc/c1_Runtime1_ppc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Defs.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
--- a/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/ppc/gc/g1/g1BarrierSetAssembler_ppc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -27,6 +27,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -72,9 +73,9 @@
     if (preserve2 != noreg) { __ std(preserve2, frame_size - (++slot_nr) * wordSize, R1_SP); }
 
     if (UseCompressedOops) {
-      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), to, count);
+      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), to, count);
     } else {
-      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), to, count);
+      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), to, count);
     }
 
     slot_nr = 0;
@@ -98,7 +99,7 @@
   __ save_LR_CR(R0);
   __ push_frame(frame_size, R0);
   if (preserve != noreg) { __ std(preserve, frame_size - 1 * wordSize, R1_SP); }
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), addr, count);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), addr, count);
   if (preserve != noreg) { __ ld(preserve, frame_size - 1 * wordSize, R1_SP); }
   __ addi(R1_SP, R1_SP, frame_size); // pop_frame();
   __ restore_LR_CR(R0);
@@ -191,7 +192,7 @@
   }
 
   if (pre_val->is_volatile() && preloaded) { __ mr(nv_save, pre_val); } // Save pre_val across C call if it was preloaded.
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, R16_thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, R16_thread);
   if (pre_val->is_volatile() && preloaded) { __ mr(pre_val, nv_save); } // restore
 
   if (needs_frame) {
@@ -272,7 +273,7 @@
   __ bind(runtime);
 
   // Save the live input values.
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, R16_thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), Rcard_addr, R16_thread);
 
   __ bind(filtered);
 }
--- a/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/ppc/nativeInst_ppc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright (c) 2012, 2015 SAP SE. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -30,7 +30,7 @@
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.hpp"
 #include "runtime/handles.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "utilities/ostream.hpp"
--- a/src/hotspot/cpu/s390/assembler_s390.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/s390/assembler_s390.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -2967,6 +2967,7 @@
 
   // branch never (nop)
   inline void z_nop();
+  inline void nop(); // Used by shared code.
 
   // ===============================================================================================
 
--- a/src/hotspot/cpu/s390/assembler_s390.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/s390/assembler_s390.inline.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1311,6 +1311,7 @@
 
 // branch never (nop), branch always
 inline void Assembler::z_nop() { z_bcr(bcondNop, Z_R0); }
+inline void Assembler::nop() { z_nop(); }
 inline void Assembler::z_br(Register r2) { assert(r2 != Z_R0, "nop if target is Z_R0, use z_nop() instead"); z_bcr(bcondAlways, r2 ); }
 
 inline void Assembler::z_exrl(Register r1, Label& L) { z_exrl(r1, target(L)); }  // z10
--- a/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/s390/c1_CodeStubs_s390.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_CodeStubs.hpp"
 #include "c1/c1_FrameMap.hpp"
 #include "c1/c1_LIRAssembler.hpp"
--- a/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/s390/c1_LIRAssembler_s390.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_LIRAssembler.hpp"
 #include "c1/c1_MacroAssembler.hpp"
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
 #include "classfile/systemDictionary.hpp"
--- a/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/s390/c1_MacroAssembler_s390.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -95,8 +95,6 @@
   void invalidate_registers(Register preserve1 = noreg, Register preserve2 = noreg,
                             Register preserve3 = noreg) PRODUCT_RETURN;
 
-  void nop() { z_nop(); }
-
   // This platform only uses signal-based null checks. The Label is not needed.
   void null_check(Register r, Label *Lnull = NULL) { MacroAssembler::null_check(r); }
 
--- a/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/s390/c1_Runtime1_s390.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,6 +24,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Defs.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
--- a/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/s390/gc/g1/g1BarrierSetAssembler_s390.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -29,6 +29,7 @@
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegion.hpp"
 #include "interpreter/interp_masm.hpp"
@@ -66,9 +67,9 @@
     RegisterSaver::save_live_registers(masm, RegisterSaver::arg_registers); // Creates frame.
 
     if (UseCompressedOops) {
-      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), addr, count);
+      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), addr, count);
     } else {
-      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), addr, count);
+      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), addr, count);
     }
 
     RegisterSaver::restore_live_registers(masm, RegisterSaver::arg_registers);
@@ -79,7 +80,7 @@
 
 void G1BarrierSetAssembler::gen_write_ref_array_post_barrier(MacroAssembler* masm, DecoratorSet decorators,
                                                              Register addr, Register count, bool do_return) {
-  address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry);
+  address entry_point = CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry);
   if (!do_return) {
     assert_different_registers(addr,  Z_R0_scratch);  // would be destroyed by push_frame()
     assert_different_registers(count, Z_R0_scratch);  // would be destroyed by push_frame()
@@ -234,7 +235,7 @@
   __ push_frame_abi160(0); // Will use Z_R0 as tmp.
 
   // Rpre_val may be destroyed by push_frame().
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), Rpre_save, Z_thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), Rpre_save, Z_thread);
 
   __ pop_frame();
   __ restore_return_pc();
@@ -359,7 +360,7 @@
   }
 
   // Save the live input values.
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), Rcard_addr, Z_thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), Rcard_addr, Z_thread);
 
   if (needs_frame) {
     __ pop_frame();
--- a/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/c1_CodeStubs_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_CodeStubs.hpp"
 #include "c1/c1_FrameMap.hpp"
 #include "c1/c1_LIRAssembler.hpp"
--- a/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/c1_LIRAssembler_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_LIRAssembler.hpp"
 #include "c1/c1_MacroAssembler.hpp"
--- a/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/c1_LIRGenerator_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_FrameMap.hpp"
 #include "c1/c1_Instruction.hpp"
--- a/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/c1_MacroAssembler_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
 #include "classfile/systemDictionary.hpp"
--- a/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/c1_Runtime1_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "c1/c1_Defs.hpp"
 #include "c1/c1_MacroAssembler.hpp"
 #include "c1/c1_Runtime1.hpp"
--- a/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/gc/g1/g1BarrierSetAssembler_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -26,6 +26,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -68,8 +69,8 @@
     }
     __ mov(addr->after_save(), O0);
     // Get the count into O1
-    address slowpath = UseCompressedOops ? CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry)
-                                         : CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry);
+    address slowpath = UseCompressedOops ? CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry)
+                                         : CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry);
     __ call(slowpath);
     __ delayed()->mov(count->after_save(), O1);
     if (addr->is_global()) {
@@ -90,7 +91,7 @@
   // Get some new fresh output registers.
   __ save_frame(0);
   __ mov(addr->after_save(), O0);
-  __ call(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry));
+  __ call(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry));
   __ delayed()->mov(count->after_save(), O1);
   __ restore();
 }
--- a/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/gc/shared/barrierSetAssembler_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/interp_masm.hpp"
 #include "runtime/jniHandles.hpp"
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "interp_masm_sparc.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
--- a/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/interp_masm_sparc.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -25,7 +25,7 @@
 #ifndef CPU_SPARC_VM_INTERP_MASM_SPARC_HPP
 #define CPU_SPARC_VM_INTERP_MASM_SPARC_HPP
 
-#include "asm/macroAssembler.inline.hpp"
+#include "asm/macroAssembler.hpp"
 #include "interpreter/invocationCounter.hpp"
 
 // This file specializes the assember with interpreter-specific macros
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -3338,6 +3338,12 @@
   _masm->bind(_label);
 }
 
+void MacroAssembler::bang_stack_with_offset(int offset) {
+  // stack grows down, caller passes positive offset
+  assert(offset > 0, "must bang with negative offset");
+  set((-offset)+STACK_BIAS, G3_scratch);
+  st(G0, SP, G3_scratch);
+}
 
 // Writes to stack successive pages until offset reached to check for
 // stack overflow + shadow pages.  This clobbers tsp and scratch.
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1303,7 +1303,7 @@
   // Stack overflow checking
 
   // Note: this clobbers G3_scratch
-  inline void bang_stack_with_offset(int offset);
+  void bang_stack_with_offset(int offset);
 
   // Writes to stack successive pages until offset reached to check for
   // stack overflow + shadow pages.  Clobbers tsp and scratch registers.
--- a/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/macroAssembler_sparc.inline.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -724,12 +724,4 @@
   if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d        ); }
   else               {                          swap(a.base(), a.disp() + offset, d); }
 }
-
-inline void MacroAssembler::bang_stack_with_offset(int offset) {
-  // stack grows down, caller passes positive offset
-  assert(offset > 0, "must bang with negative offset");
-  set((-offset)+STACK_BIAS, G3_scratch);
-  st(G0, SP, G3_scratch);
-}
-
 #endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_INLINE_HPP
--- a/src/hotspot/cpu/sparc/memset_with_concurrent_readers_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/memset_with_concurrent_readers_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -24,6 +24,7 @@
 
 #include "precompiled.hpp"
 
+#include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "runtime/prefetch.inline.hpp"
 #include "utilities/align.hpp"
--- a/src/hotspot/cpu/sparc/methodHandles_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/methodHandles_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -24,7 +24,7 @@
 
 #include "precompiled.hpp"
 #include "jvm.h"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "classfile/javaClasses.inline.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interp_masm.hpp"
--- a/src/hotspot/cpu/sparc/relocInfo_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/relocInfo_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1998, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_sparc.hpp"
 #include "oops/compressedOops.inline.hpp"
--- a/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/templateInterpreterGenerator_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,7 +23,7 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/macroAssembler.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/bytecodeHistogram.hpp"
 #include "interpreter/interpreter.hpp"
--- a/src/hotspot/cpu/sparc/templateTable_sparc.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/sparc/templateTable_sparc.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/macroAssembler.inline.hpp"
 #include "gc/shared/barrierSetAssembler.hpp"
 #include "interpreter/interpreter.hpp"
 #include "interpreter/interpreterRuntime.hpp"
--- a/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/x86/gc/g1/g1BarrierSetAssembler_x86.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -26,6 +26,7 @@
 #include "asm/macroAssembler.inline.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
 #include "gc/g1/g1BarrierSetAssembler.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -80,12 +81,12 @@
       __ movptr(c_rarg1, count);
     }
     if (UseCompressedOops) {
-      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_narrow_oop_entry), 2);
+      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry), 2);
     } else {
-      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry), 2);
+      __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry), 2);
     }
 #else
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_pre_oop_entry),
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_pre_oop_entry),
                     addr, count);
 #endif
     __ popa();
@@ -107,9 +108,9 @@
     __ mov(c_rarg0, addr);
     __ mov(c_rarg1, count);
   }
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry), 2);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry), 2);
 #else
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSet::write_ref_array_post_entry),
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_array_post_entry),
                   addr, count);
 #endif
   __ popa();
@@ -238,9 +239,9 @@
     __ push(thread);
     __ push(pre_val);
 #endif
-    __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
+    __ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), 2);
   } else {
-    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
+    __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), pre_val, thread);
   }
 
   NOT_LP64( __ pop(thread); )
@@ -333,10 +334,10 @@
   __ push(store_addr);
   __ push(new_val);
 #ifdef _LP64
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, r15_thread);
 #else
   __ push(thread);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
   __ pop(thread);
 #endif
   __ pop(new_val);
@@ -500,7 +501,7 @@
 
   // load the pre-value
   __ load_parameter(0, rcx);
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), rcx, thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), rcx, thread);
 
   __ restore_live_registers(true);
 
@@ -577,7 +578,7 @@
 
   __ save_live_registers_no_oop_map(true);
 
-  __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
+  __ call_VM_leaf(CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), card_addr, thread);
 
   __ restore_live_registers(true);
 
--- a/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/x86/gc/shared/barrierSetAssembler_x86.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -34,6 +34,7 @@
   bool on_heap = (decorators & IN_HEAP) != 0;
   bool on_root = (decorators & IN_ROOT) != 0;
   bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
+  bool atomic = (decorators & MO_RELAXED) != 0;
 
   switch (type) {
   case T_OBJECT:
@@ -58,6 +59,37 @@
     }
     break;
   }
+  case T_BOOLEAN: __ load_unsigned_byte(dst, src);  break;
+  case T_BYTE:    __ load_signed_byte(dst, src);    break;
+  case T_CHAR:    __ load_unsigned_short(dst, src); break;
+  case T_SHORT:   __ load_signed_short(dst, src);   break;
+  case T_INT:     __ movl  (dst, src);              break;
+  case T_ADDRESS: __ movptr(dst, src);              break;
+  case T_FLOAT:
+    assert(dst == noreg, "only to ftos");
+    __ load_float(src);
+    break;
+  case T_DOUBLE:
+    assert(dst == noreg, "only to dtos");
+    __ load_double(src);
+    break;
+  case T_LONG:
+    assert(dst == noreg, "only to ltos");
+#ifdef _LP64
+    __ movq(rax, src);
+#else
+    if (atomic) {
+      __ fild_d(src);               // Must load atomically
+      __ subptr(rsp,2*wordSize);    // Make space for store
+      __ fistp_d(Address(rsp,0));
+      __ pop(rax);
+      __ pop(rdx);
+    } else {
+      __ movl(rax, src);
+      __ movl(rdx, src.plus_disp(wordSize));
+    }
+#endif
+    break;
   default: Unimplemented();
   }
 }
@@ -67,6 +99,7 @@
   bool on_heap = (decorators & IN_HEAP) != 0;
   bool on_root = (decorators & IN_ROOT) != 0;
   bool oop_not_null = (decorators & OOP_NOT_NULL) != 0;
+  bool atomic = (decorators & MO_RELAXED) != 0;
 
   switch (type) {
   case T_OBJECT:
@@ -106,6 +139,50 @@
     }
     break;
   }
+  case T_BOOLEAN:
+    __ andl(val, 0x1);  // boolean is true if LSB is 1
+    __ movb(dst, val);
+    break;
+  case T_BYTE:
+    __ movb(dst, val);
+    break;
+  case T_SHORT:
+    __ movw(dst, val);
+    break;
+  case T_CHAR:
+    __ movw(dst, val);
+    break;
+  case T_INT:
+    __ movl(dst, val);
+    break;
+  case T_LONG:
+    assert(val == noreg, "only tos");
+#ifdef _LP64
+    __ movq(dst, rax);
+#else
+    if (atomic) {
+      __ push(rdx);
+      __ push(rax);                 // Must update atomically with FIST
+      __ fild_d(Address(rsp,0));    // So load into FPU register
+      __ fistp_d(dst);              // and put into memory atomically
+      __ addptr(rsp, 2*wordSize);
+    } else {
+      __ movptr(dst, rax);
+      __ movptr(dst.plus_disp(wordSize), rdx);
+    }
+#endif
+    break;
+  case T_FLOAT:
+    assert(val == noreg, "only tos");
+    __ store_float(dst);
+    break;
+  case T_DOUBLE:
+    assert(val == noreg, "only tos");
+    __ store_double(dst);
+    break;
+  case T_ADDRESS:
+    __ movptr(dst, val);
+    break;
   default: Unimplemented();
   }
 }
--- a/src/hotspot/cpu/x86/interp_masm_x86.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/x86/interp_masm_x86.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -26,7 +26,6 @@
 #define CPU_X86_VM_INTERP_MASM_X86_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
 #include "interpreter/invocationCounter.hpp"
 #include "runtime/frame.hpp"
 
--- a/src/hotspot/cpu/x86/methodHandles_x86.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/x86/methodHandles_x86.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -175,7 +175,9 @@
   __ verify_oop(method_temp);
   __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::method_offset_in_bytes())), temp2);
   __ verify_oop(method_temp);
-  __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())));
+  __ access_load_at(T_ADDRESS, IN_HEAP, method_temp,
+                    Address(method_temp, NONZERO(java_lang_invoke_ResolvedMethodName::vmtarget_offset_in_bytes())),
+                    noreg, noreg);
 
   if (VerifyMethodHandles && !for_compiler_entry) {
     // make sure recv is already on stack
@@ -390,7 +392,7 @@
         verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
       }
       __ load_heap_oop(rbx_method, member_vmtarget);
-      __ movptr(rbx_method, vmtarget_method);
+      __ access_load_at(T_ADDRESS, IN_HEAP, rbx_method, vmtarget_method, noreg, noreg);
       break;
 
     case vmIntrinsics::_linkToStatic:
@@ -398,7 +400,7 @@
         verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
       }
       __ load_heap_oop(rbx_method, member_vmtarget);
-      __ movptr(rbx_method, vmtarget_method);
+      __ access_load_at(T_ADDRESS, IN_HEAP, rbx_method, vmtarget_method, noreg, noreg);
       break;
 
     case vmIntrinsics::_linkToVirtual:
@@ -412,7 +414,7 @@
 
       // pick out the vtable index from the MemberName, and then we can discard it:
       Register temp2_index = temp2;
-      __ movptr(temp2_index, member_vmindex);
+      __ access_load_at(T_ADDRESS, IN_HEAP, temp2_index, member_vmindex, noreg, noreg);
 
       if (VerifyMethodHandles) {
         Label L_index_ok;
@@ -446,7 +448,7 @@
       __ verify_klass_ptr(temp3_intf);
 
       Register rbx_index = rbx_method;
-      __ movptr(rbx_index, member_vmindex);
+      __ access_load_at(T_ADDRESS, IN_HEAP, rbx_index, member_vmindex, noreg, noreg);
       if (VerifyMethodHandles) {
         Label L;
         __ cmpl(rbx_index, 0);
--- a/src/hotspot/cpu/x86/templateTable_x86.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/x86/templateTable_x86.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -770,9 +770,10 @@
   // rax: index
   // rdx: array
   index_check(rdx, rax); // kills rbx
-  __ movl(rax, Address(rdx, rax,
-                       Address::times_4,
-                       arrayOopDesc::base_offset_in_bytes(T_INT)));
+  __ access_load_at(T_INT, IN_HEAP | IN_HEAP_ARRAY, rax,
+                    Address(rdx, rax, Address::times_4,
+                            arrayOopDesc::base_offset_in_bytes(T_INT)),
+                    noreg, noreg);
 }
 
 void TemplateTable::laload() {
@@ -782,8 +783,10 @@
   index_check(rdx, rax); // kills rbx
   NOT_LP64(__ mov(rbx, rax));
   // rbx,: index
-  __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
-  NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
+  __ access_load_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY, noreg /* ltos */,
+                    Address(rdx, rbx, Address::times_8,
+                            arrayOopDesc::base_offset_in_bytes(T_LONG)),
+                    noreg, noreg);
 }
 
 
@@ -793,9 +796,11 @@
   // rax: index
   // rdx: array
   index_check(rdx, rax); // kills rbx
-  __ load_float(Address(rdx, rax,
-                        Address::times_4,
-                        arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
+  __ access_load_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY, noreg /* ftos */,
+                    Address(rdx, rax,
+                            Address::times_4,
+                            arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
+                    noreg, noreg);
 }
 
 void TemplateTable::daload() {
@@ -803,9 +808,11 @@
   // rax: index
   // rdx: array
   index_check(rdx, rax); // kills rbx
-  __ load_double(Address(rdx, rax,
-                         Address::times_8,
-                         arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
+  __ access_load_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY, noreg /* dtos */,
+                    Address(rdx, rax,
+                            Address::times_8,
+                            arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
+                    noreg, noreg);
 }
 
 void TemplateTable::aaload() {
@@ -826,7 +833,9 @@
   // rax: index
   // rdx: array
   index_check(rdx, rax); // kills rbx
-  __ load_signed_byte(rax, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
+  __ access_load_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY, rax,
+                    Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
+                    noreg, noreg);
 }
 
 void TemplateTable::caload() {
@@ -834,7 +843,9 @@
   // rax: index
   // rdx: array
   index_check(rdx, rax); // kills rbx
-  __ load_unsigned_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+  __ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, rax,
+                    Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
+                    noreg, noreg);
 }
 
 // iload followed by caload frequent pair
@@ -847,10 +858,9 @@
   // rax: index
   // rdx: array
   index_check(rdx, rax); // kills rbx
-  __ load_unsigned_short(rax,
-                         Address(rdx, rax,
-                                 Address::times_2,
-                                 arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+  __ access_load_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY, rax,
+                    Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
+                    noreg, noreg);
 }
 
 
@@ -859,7 +869,9 @@
   // rax: index
   // rdx: array
   index_check(rdx, rax); // kills rbx
-  __ load_signed_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
+  __ access_load_at(T_SHORT, IN_HEAP | IN_HEAP_ARRAY, rax,
+                    Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
+                    noreg, noreg);
 }
 
 void TemplateTable::iload(int n) {
@@ -1051,10 +1063,10 @@
   // rbx: index
   // rdx: array
   index_check(rdx, rbx); // prefer index in rbx
-  __ movl(Address(rdx, rbx,
-                  Address::times_4,
-                  arrayOopDesc::base_offset_in_bytes(T_INT)),
-          rax);
+  __ access_store_at(T_INT, IN_HEAP | IN_HEAP_ARRAY,
+                     Address(rdx, rbx, Address::times_4,
+                             arrayOopDesc::base_offset_in_bytes(T_INT)),
+                     rax, noreg, noreg);
 }
 
 void TemplateTable::lastore() {
@@ -1065,8 +1077,10 @@
   // rdx: high(value)
   index_check(rcx, rbx);  // prefer index in rbx,
   // rbx,: index
-  __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
-  NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
+  __ access_store_at(T_LONG, IN_HEAP | IN_HEAP_ARRAY,
+                     Address(rcx, rbx, Address::times_8,
+                             arrayOopDesc::base_offset_in_bytes(T_LONG)),
+                     noreg /* ltos */, noreg, noreg);
 }
 
 
@@ -1077,7 +1091,10 @@
   // rbx:  index
   // rdx:  array
   index_check(rdx, rbx); // prefer index in rbx
-  __ store_float(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
+  __ access_store_at(T_FLOAT, IN_HEAP | IN_HEAP_ARRAY,
+                     Address(rdx, rbx, Address::times_4,
+                             arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
+                     noreg /* ftos */, noreg, noreg);
 }
 
 void TemplateTable::dastore() {
@@ -1087,7 +1104,10 @@
   // rbx:  index
   // rdx:  array
   index_check(rdx, rbx); // prefer index in rbx
-  __ store_double(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
+  __ access_store_at(T_DOUBLE, IN_HEAP | IN_HEAP_ARRAY,
+                     Address(rdx, rbx, Address::times_8,
+                             arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
+                     noreg /* dtos */, noreg, noreg);
 }
 
 void TemplateTable::aastore() {
@@ -1160,10 +1180,10 @@
   __ jccb(Assembler::zero, L_skip);
   __ andl(rax, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
   __ bind(L_skip);
-  __ movb(Address(rdx, rbx,
-                  Address::times_1,
-                  arrayOopDesc::base_offset_in_bytes(T_BYTE)),
-          rax);
+  __ access_store_at(T_BYTE, IN_HEAP | IN_HEAP_ARRAY,
+                     Address(rdx, rbx,Address::times_1,
+                             arrayOopDesc::base_offset_in_bytes(T_BYTE)),
+                     rax, noreg, noreg);
 }
 
 void TemplateTable::castore() {
@@ -1173,10 +1193,10 @@
   // rbx: index
   // rdx: array
   index_check(rdx, rbx);  // prefer index in rbx
-  __ movw(Address(rdx, rbx,
-                  Address::times_2,
-                  arrayOopDesc::base_offset_in_bytes(T_CHAR)),
-          rax);
+  __ access_store_at(T_CHAR, IN_HEAP | IN_HEAP_ARRAY,
+                     Address(rdx, rbx, Address::times_2,
+                             arrayOopDesc::base_offset_in_bytes(T_CHAR)),
+                     rax, noreg, noreg);
 }
 
 
@@ -2852,7 +2872,6 @@
   if (!is_static) pop_and_check_object(obj);
 
   const Address field(obj, off, Address::times_1, 0*wordSize);
-  NOT_LP64(const Address hi(obj, off, Address::times_1, 1*wordSize));
 
   Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
 
@@ -2864,7 +2883,7 @@
 
   __ jcc(Assembler::notZero, notByte);
   // btos
-  __ load_signed_byte(rax, field);
+  __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
   __ push(btos);
   // Rewrite bytecode to be faster
   if (!is_static && rc == may_rewrite) {
@@ -2877,7 +2896,7 @@
   __ jcc(Assembler::notEqual, notBool);
 
   // ztos (same code as btos)
-  __ load_signed_byte(rax, field);
+  __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
   __ push(ztos);
   // Rewrite bytecode to be faster
   if (!is_static && rc == may_rewrite) {
@@ -2901,7 +2920,7 @@
   __ cmpl(flags, itos);
   __ jcc(Assembler::notEqual, notInt);
   // itos
-  __ movl(rax, field);
+  __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
   __ push(itos);
   // Rewrite bytecode to be faster
   if (!is_static && rc == may_rewrite) {
@@ -2913,7 +2932,7 @@
   __ cmpl(flags, ctos);
   __ jcc(Assembler::notEqual, notChar);
   // ctos
-  __ load_unsigned_short(rax, field);
+  __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
   __ push(ctos);
   // Rewrite bytecode to be faster
   if (!is_static && rc == may_rewrite) {
@@ -2925,7 +2944,7 @@
   __ cmpl(flags, stos);
   __ jcc(Assembler::notEqual, notShort);
   // stos
-  __ load_signed_short(rax, field);
+  __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
   __ push(stos);
   // Rewrite bytecode to be faster
   if (!is_static && rc == may_rewrite) {
@@ -2937,19 +2956,9 @@
   __ cmpl(flags, ltos);
   __ jcc(Assembler::notEqual, notLong);
   // ltos
-
-#ifndef _LP64
-  // Generate code as if volatile.  There just aren't enough registers to
-  // save that information and this code is faster than the test.
-  __ fild_d(field);                // Must load atomically
-  __ subptr(rsp,2*wordSize);    // Make space for store
-  __ fistp_d(Address(rsp,0));
-  __ pop(rax);
-  __ pop(rdx);
-#else
-  __ movq(rax, field);
-#endif
-
+    // Generate code as if volatile (x86_32).  There just aren't enough registers to
+    // save that information and this code is faster than the test.
+  __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
   __ push(ltos);
   // Rewrite bytecode to be faster
   LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
@@ -2960,7 +2969,7 @@
   __ jcc(Assembler::notEqual, notFloat);
   // ftos
 
-  __ load_float(field);
+  __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
   __ push(ftos);
   // Rewrite bytecode to be faster
   if (!is_static && rc == may_rewrite) {
@@ -2974,7 +2983,7 @@
   __ jcc(Assembler::notEqual, notDouble);
 #endif
   // dtos
-  __ load_double(field);
+  __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
   __ push(dtos);
   // Rewrite bytecode to be faster
   if (!is_static && rc == may_rewrite) {
@@ -3133,7 +3142,7 @@
   {
     __ pop(btos);
     if (!is_static) pop_and_check_object(obj);
-    __ movb(field, rax);
+    __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
     }
@@ -3148,8 +3157,7 @@
   {
     __ pop(ztos);
     if (!is_static) pop_and_check_object(obj);
-    __ andl(rax, 0x1);
-    __ movb(field, rax);
+    __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
     }
@@ -3180,7 +3188,7 @@
   {
     __ pop(itos);
     if (!is_static) pop_and_check_object(obj);
-    __ movl(field, rax);
+    __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
     }
@@ -3195,7 +3203,7 @@
   {
     __ pop(ctos);
     if (!is_static) pop_and_check_object(obj);
-    __ movw(field, rax);
+    __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
     }
@@ -3210,7 +3218,7 @@
   {
     __ pop(stos);
     if (!is_static) pop_and_check_object(obj);
-    __ movw(field, rax);
+    __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
     }
@@ -3226,7 +3234,7 @@
   {
     __ pop(ltos);
     if (!is_static) pop_and_check_object(obj);
-    __ movq(field, rax);
+    __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos*/, noreg, noreg);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
     }
@@ -3242,11 +3250,7 @@
     if (!is_static) pop_and_check_object(obj);
 
     // Replace with real volatile test
-    __ push(rdx);
-    __ push(rax);                 // Must update atomically with FIST
-    __ fild_d(Address(rsp,0));    // So load into FPU register
-    __ fistp_d(field);            // and put into memory atomically
-    __ addptr(rsp, 2*wordSize);
+    __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos */, noreg, noreg);
     // volatile_barrier();
     volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
                                                  Assembler::StoreStore));
@@ -3257,8 +3261,7 @@
 
     __ pop(ltos);  // overwrites rdx
     if (!is_static) pop_and_check_object(obj);
-    __ movptr(hi, rdx);
-    __ movptr(field, rax);
+    __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
     // Don't rewrite to _fast_lputfield for potential volatile case.
     __ jmp(notVolatile);
   }
@@ -3272,7 +3275,7 @@
   {
     __ pop(ftos);
     if (!is_static) pop_and_check_object(obj);
-    __ store_float(field);
+    __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
     }
@@ -3289,7 +3292,7 @@
   {
     __ pop(dtos);
     if (!is_static) pop_and_check_object(obj);
-    __ store_double(field);
+    __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
     if (!is_static && rc == may_rewrite) {
       patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
     }
@@ -3422,30 +3425,31 @@
     break;
   case Bytecodes::_fast_lputfield:
 #ifdef _LP64
-  __ movq(field, rax);
+    __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
 #else
   __ stop("should not be rewritten");
 #endif
     break;
   case Bytecodes::_fast_iputfield:
-    __ movl(field, rax);
+    __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
     break;
   case Bytecodes::_fast_zputfield:
-    __ andl(rax, 0x1);  // boolean is true if LSB is 1
-    // fall through to bputfield
+    __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
+    break;
   case Bytecodes::_fast_bputfield:
-    __ movb(field, rax);
+    __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
     break;
   case Bytecodes::_fast_sputfield:
-    // fall through
+    __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
+    break;
   case Bytecodes::_fast_cputfield:
-    __ movw(field, rax);
+    __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
     break;
   case Bytecodes::_fast_fputfield:
-    __ store_float(field);
+    __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg);
     break;
   case Bytecodes::_fast_dputfield:
-    __ store_double(field);
+    __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg);
     break;
   default:
     ShouldNotReachHere();
@@ -3512,28 +3516,28 @@
     break;
   case Bytecodes::_fast_lgetfield:
 #ifdef _LP64
-  __ movq(rax, field);
+    __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
 #else
   __ stop("should not be rewritten");
 #endif
     break;
   case Bytecodes::_fast_igetfield:
-    __ movl(rax, field);
+    __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
     break;
   case Bytecodes::_fast_bgetfield:
-    __ movsbl(rax, field);
+    __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
     break;
   case Bytecodes::_fast_sgetfield:
-    __ load_signed_short(rax, field);
+    __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
     break;
   case Bytecodes::_fast_cgetfield:
-    __ load_unsigned_short(rax, field);
+    __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
     break;
   case Bytecodes::_fast_fgetfield:
-    __ load_float(field);
+    __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
     break;
   case Bytecodes::_fast_dgetfield:
-    __ load_double(field);
+    __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
     break;
   default:
     ShouldNotReachHere();
@@ -3566,14 +3570,14 @@
   const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
   switch (state) {
   case itos:
-    __ movl(rax, field);
+    __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
     break;
   case atos:
     do_oop_load(_masm, field, rax);
     __ verify_oop(rax);
     break;
   case ftos:
-    __ load_float(field);
+    __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
     break;
   default:
     ShouldNotReachHere();
--- a/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/zero/cppInterpreter_zero.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -43,7 +43,7 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/stubRoutines.hpp"
 #include "runtime/synchronizer.hpp"
--- a/src/hotspot/cpu/zero/interp_masm_zero.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/zero/interp_masm_zero.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -26,7 +26,8 @@
 #ifndef CPU_ZERO_VM_INTERP_MASM_ZERO_HPP
 #define CPU_ZERO_VM_INTERP_MASM_ZERO_HPP
 
-#include "assembler_zero.inline.hpp"
+#include "asm/codeBuffer.hpp"
+#include "asm/macroAssembler.hpp"
 #include "interpreter/invocationCounter.hpp"
 
 // This file specializes the assember with interpreter-specific macros
--- a/src/hotspot/cpu/zero/relocInfo_zero.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/zero/relocInfo_zero.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007, 2009, 2010, 2011 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -24,8 +24,7 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.inline.hpp"
-#include "assembler_zero.inline.hpp"
+#include "asm/codeBuffer.hpp"
 #include "code/relocInfo.hpp"
 #include "nativeInst_zero.hpp"
 #include "oops/oop.inline.hpp"
--- a/src/hotspot/cpu/zero/vtableStubs_zero.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/cpu/zero/vtableStubs_zero.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
@@ -24,18 +24,8 @@
  */
 
 #include "precompiled.hpp"
-#include "asm/assembler.hpp"
-#include "assembler_zero.inline.hpp"
 #include "code/vtableStubs.hpp"
-#include "interp_masm_zero.hpp"
-#include "memory/resourceArea.hpp"
-#include "oops/instanceKlass.hpp"
-#include "oops/klassVtable.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "vmreg_zero.inline.hpp"
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
+#include "utilities/debug.hpp"
 
 VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
   ShouldNotCallThis();
--- a/src/hotspot/os/aix/os_aix.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/os/aix/os_aix.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -59,7 +59,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
--- a/src/hotspot/os/bsd/os_bsd.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/os/bsd/os_bsd.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -49,7 +49,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/semaphore.hpp"
--- a/src/hotspot/os/linux/os_linux.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/os/linux/os_linux.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -51,7 +51,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/os/solaris/os_solaris.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/os/solaris/os_solaris.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -49,7 +49,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
--- a/src/hotspot/os/windows/os_windows.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/os/windows/os_windows.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -52,7 +52,7 @@
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
 #include "runtime/objectMonitor.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/osThread.hpp"
 #include "runtime/perfMemory.hpp"
 #include "runtime/sharedRuntime.hpp"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_HPP
+#define OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_HPP
+
+// Included in orderAccess.hpp header file.
+
+// Compiler version last used for testing: xlc 12
+// Please update this information when this file changes
+
+// Implementation of class OrderAccess.
+
+//
+// Machine barrier instructions:
+//
+// - sync            Two-way memory barrier, aka fence.
+// - lwsync          orders  Store|Store,
+//                            Load|Store,
+//                            Load|Load,
+//                   but not Store|Load
+// - eieio           orders  Store|Store
+// - isync           Invalidates speculatively executed instructions,
+//                   but isync may complete before storage accesses
+//                   associated with instructions preceding isync have
+//                   been performed.
+//
+// Semantic barrier instructions:
+// (as defined in orderAccess.hpp)
+//
+// - release         orders Store|Store,       (maps to lwsync)
+//                           Load|Store
+// - acquire         orders  Load|Store,       (maps to lwsync)
+//                           Load|Load
+// - fence           orders Store|Store,       (maps to sync)
+//                           Load|Store,
+//                           Load|Load,
+//                          Store|Load
+//
+
+#define inlasm_sync()     __asm__ __volatile__ ("sync"   : : : "memory");
+#define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
+#define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
+#define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
+// Use twi-isync for load_acquire (faster than lwsync).
+// ATTENTION: seems like xlC 10.1 has problems with this inline assembler macro (VerifyMethodHandles found "bad vminfo in AMH.conv"):
+// #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
+#define inlasm_acquire_reg(X) inlasm_lwsync();
+
+inline void OrderAccess::loadload()   { inlasm_lwsync(); }
+inline void OrderAccess::storestore() { inlasm_lwsync(); }
+inline void OrderAccess::loadstore()  { inlasm_lwsync(); }
+inline void OrderAccess::storeload()  { inlasm_sync();   }
+
+inline void OrderAccess::acquire()    { inlasm_lwsync(); }
+inline void OrderAccess::release()    { inlasm_lwsync(); }
+inline void OrderAccess::fence()      { inlasm_sync();   }
+
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
+};
+
+#undef inlasm_sync
+#undef inlasm_lwsync
+#undef inlasm_eieio
+#undef inlasm_isync
+
+#endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_HPP
--- a/src/hotspot/os_cpu/aix_ppc/orderAccess_aix_ppc.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
-#define OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
-
-#include "runtime/orderAccess.hpp"
-
-// Compiler version last used for testing: xlc 12
-// Please update this information when this file changes
-
-// Implementation of class OrderAccess.
-
-//
-// Machine barrier instructions:
-//
-// - sync            Two-way memory barrier, aka fence.
-// - lwsync          orders  Store|Store,
-//                            Load|Store,
-//                            Load|Load,
-//                   but not Store|Load
-// - eieio           orders  Store|Store
-// - isync           Invalidates speculatively executed instructions,
-//                   but isync may complete before storage accesses
-//                   associated with instructions preceding isync have
-//                   been performed.
-//
-// Semantic barrier instructions:
-// (as defined in orderAccess.hpp)
-//
-// - release         orders Store|Store,       (maps to lwsync)
-//                           Load|Store
-// - acquire         orders  Load|Store,       (maps to lwsync)
-//                           Load|Load
-// - fence           orders Store|Store,       (maps to sync)
-//                           Load|Store,
-//                           Load|Load,
-//                          Store|Load
-//
-
-#define inlasm_sync()     __asm__ __volatile__ ("sync"   : : : "memory");
-#define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
-#define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
-#define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
-// Use twi-isync for load_acquire (faster than lwsync).
-// ATTENTION: seems like xlC 10.1 has problems with this inline assembler macro (VerifyMethodHandles found "bad vminfo in AMH.conv"):
-// #define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
-#define inlasm_acquire_reg(X) inlasm_lwsync();
-
-inline void OrderAccess::loadload()   { inlasm_lwsync(); }
-inline void OrderAccess::storestore() { inlasm_lwsync(); }
-inline void OrderAccess::loadstore()  { inlasm_lwsync(); }
-inline void OrderAccess::storeload()  { inlasm_sync();   }
-
-inline void OrderAccess::acquire()    { inlasm_lwsync(); }
-inline void OrderAccess::release()    { inlasm_lwsync(); }
-inline void OrderAccess::fence()      { inlasm_sync();   }
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
-};
-
-#undef inlasm_sync
-#undef inlasm_lwsync
-#undef inlasm_eieio
-#undef inlasm_isync
-
-#endif // OS_CPU_AIX_OJDKPPC_VM_ORDERACCESS_AIX_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,112 @@
+/*
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_HPP
+#define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_HPP
+
+// Included in orderAccess.hpp header file.
+
+// Compiler version last used for testing: clang 5.1
+// Please update this information when this file changes
+
+// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
+static inline void compiler_barrier() {
+  __asm__ volatile ("" : : : "memory");
+}
+
+// x86 is TSO and hence only needs a fence for storeload
+// However, a compiler barrier is still needed to prevent reordering
+// between volatile and non-volatile memory accesses.
+
+// Implementation of class OrderAccess.
+
+inline void OrderAccess::loadload()   { compiler_barrier(); }
+inline void OrderAccess::storestore() { compiler_barrier(); }
+inline void OrderAccess::loadstore()  { compiler_barrier(); }
+inline void OrderAccess::storeload()  { fence();            }
+
+inline void OrderAccess::acquire()    { compiler_barrier(); }
+inline void OrderAccess::release()    { compiler_barrier(); }
+
+inline void OrderAccess::fence() {
+  // always use locked addl since mfence is sometimes expensive
+#ifdef AMD64
+  __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
+#else
+  __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
+#endif
+  compiler_barrier();
+}
+
+template<>
+struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgb (%2),%0"
+                      : "=q" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgw (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgl (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+#ifdef AMD64
+template<>
+struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgq (%2), %0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+#endif // AMD64
+
+#endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_HPP
--- a/src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
-#define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
-
-// Compiler version last used for testing: clang 5.1
-// Please update this information when this file changes
-
-// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
-static inline void compiler_barrier() {
-  __asm__ volatile ("" : : : "memory");
-}
-
-// x86 is TSO and hence only needs a fence for storeload
-// However, a compiler barrier is still needed to prevent reordering
-// between volatile and non-volatile memory accesses.
-
-// Implementation of class OrderAccess.
-
-inline void OrderAccess::loadload()   { compiler_barrier(); }
-inline void OrderAccess::storestore() { compiler_barrier(); }
-inline void OrderAccess::loadstore()  { compiler_barrier(); }
-inline void OrderAccess::storeload()  { fence();            }
-
-inline void OrderAccess::acquire()    { compiler_barrier(); }
-inline void OrderAccess::release()    { compiler_barrier(); }
-
-inline void OrderAccess::fence() {
-  if (os::is_MP()) {
-    // always use locked addl since mfence is sometimes expensive
-#ifdef AMD64
-    __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
-#else
-    __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
-#endif
-  }
-  compiler_barrier();
-}
-
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgb (%2),%0"
-                      : "=q" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgw (%2),%0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgl (%2),%0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-#ifdef AMD64
-template<>
-struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgq (%2), %0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-#endif // AMD64
-
-#endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_HPP
+#define OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_HPP
+
+// Included in orderAccess.hpp header file.
+
+#ifdef ARM
+
+/*
+ * ARM Kernel helper for memory barrier.
+ * Using __asm __volatile ("":::"memory") does not work reliable on ARM
+ * and gcc __sync_synchronize(); implementation does not use the kernel
+ * helper for all gcc versions so it is unreliable to use as well.
+ */
+typedef void (__kernel_dmb_t) (void);
+#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0)
+
+#define FULL_MEM_BARRIER __kernel_dmb()
+#define LIGHT_MEM_BARRIER __kernel_dmb()
+
+#else // ARM
+
+#define FULL_MEM_BARRIER __sync_synchronize()
+
+#ifdef PPC
+
+#ifdef __NO_LWSYNC__
+#define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory")
+#else
+#define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory")
+#endif
+
+#else // PPC
+
+#define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory")
+
+#endif // PPC
+
+#endif // ARM
+
+// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient
+// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore.
+
+inline void OrderAccess::loadload()   { LIGHT_MEM_BARRIER; }
+inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; }
+inline void OrderAccess::loadstore()  { LIGHT_MEM_BARRIER; }
+inline void OrderAccess::storeload()  { FULL_MEM_BARRIER;  }
+
+inline void OrderAccess::acquire()    { LIGHT_MEM_BARRIER; }
+inline void OrderAccess::release()    { LIGHT_MEM_BARRIER; }
+inline void OrderAccess::fence()      { FULL_MEM_BARRIER;  }
+
+#endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_HPP
--- a/src/hotspot/os_cpu/bsd_zero/orderAccess_bsd_zero.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP
-#define OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP
-
-#include "runtime/orderAccess.hpp"
-
-#ifdef ARM
-
-/*
- * ARM Kernel helper for memory barrier.
- * Using __asm __volatile ("":::"memory") does not work reliable on ARM
- * and gcc __sync_synchronize(); implementation does not use the kernel
- * helper for all gcc versions so it is unreliable to use as well.
- */
-typedef void (__kernel_dmb_t) (void);
-#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0)
-
-#define FULL_MEM_BARRIER __kernel_dmb()
-#define LIGHT_MEM_BARRIER __kernel_dmb()
-
-#else // ARM
-
-#define FULL_MEM_BARRIER __sync_synchronize()
-
-#ifdef PPC
-
-#ifdef __NO_LWSYNC__
-#define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory")
-#else
-#define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory")
-#endif
-
-#else // PPC
-
-#define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory")
-
-#endif // PPC
-
-#endif // ARM
-
-// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient
-// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore.
-
-inline void OrderAccess::loadload()   { LIGHT_MEM_BARRIER; }
-inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; }
-inline void OrderAccess::loadstore()  { LIGHT_MEM_BARRIER; }
-inline void OrderAccess::storeload()  { FULL_MEM_BARRIER;  }
-
-inline void OrderAccess::acquire()    { LIGHT_MEM_BARRIER; }
-inline void OrderAccess::release()    { LIGHT_MEM_BARRIER; }
-inline void OrderAccess::fence()      { FULL_MEM_BARRIER;  }
-
-#endif // OS_CPU_BSD_ZERO_VM_ORDERACCESS_BSD_ZERO_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,73 @@
+/*
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_HPP
+#define OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_HPP
+
+// Included in orderAccess.hpp header file.
+
+#include "vm_version_aarch64.hpp"
+
+// Implementation of class OrderAccess.
+
+inline void OrderAccess::loadload()   { acquire(); }
+inline void OrderAccess::storestore() { release(); }
+inline void OrderAccess::loadstore()  { acquire(); }
+inline void OrderAccess::storeload()  { fence(); }
+
+inline void OrderAccess::acquire() {
+  READ_MEM_BARRIER;
+}
+
+inline void OrderAccess::release() {
+  WRITE_MEM_BARRIER;
+}
+
+inline void OrderAccess::fence() {
+  FULL_MEM_BARRIER;
+}
+
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { T data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
+};
+
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const { __atomic_store(p, &v, __ATOMIC_RELEASE); }
+};
+
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const { release_store(p, v); fence(); }
+};
+
+#endif // OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_HPP
--- a/src/hotspot/os_cpu/linux_aarch64/orderAccess_linux_aarch64.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
-#define OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
-#include "vm_version_aarch64.hpp"
-
-// Implementation of class OrderAccess.
-
-inline void OrderAccess::loadload()   { acquire(); }
-inline void OrderAccess::storestore() { release(); }
-inline void OrderAccess::loadstore()  { acquire(); }
-inline void OrderAccess::storeload()  { fence(); }
-
-inline void OrderAccess::acquire() {
-  READ_MEM_BARRIER;
-}
-
-inline void OrderAccess::release() {
-  WRITE_MEM_BARRIER;
-}
-
-inline void OrderAccess::fence() {
-  FULL_MEM_BARRIER;
-}
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { T data; __atomic_load(p, &data, __ATOMIC_ACQUIRE); return data; }
-};
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const { __atomic_store(p, &v, __ATOMIC_RELEASE); }
-};
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedStore<byte_size, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const { release_store(p, v); fence(); }
-};
-
-#endif // OS_CPU_LINUX_AARCH64_VM_ORDERACCESS_LINUX_AARCH64_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,248 @@
+/*
+ * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
+#define OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
+
+// Included in orderAccess.hpp header file.
+
+#include "runtime/os.hpp"
+#include "vm_version_arm.hpp"
+
+// Implementation of class OrderAccess.
+// - we define the high level barriers below and use the general
+//   implementation in orderAccess.hpp, with customizations
+//   on AARCH64 via the specialized_* template functions
+
+// Memory Ordering on ARM is weak.
+//
+// Implement all 4 memory ordering barriers by DMB, since it is a
+// lighter version of DSB.
+// dmb_sy implies full system shareability domain. RD/WR access type.
+// dmb_st implies full system shareability domain. WR only access type.
+//
+// NOP on < ARMv6 (MP not supported)
+//
+// Non mcr instructions can be used if we build for armv7 or higher arch
+//    __asm__ __volatile__ ("dmb" : : : "memory");
+//    __asm__ __volatile__ ("dsb" : : : "memory");
+//
+// inline void _OrderAccess_dsb() {
+//    volatile intptr_t dummy = 0;
+//    if (os::is_MP()) {
+//      __asm__ volatile (
+//        "mcr p15, 0, %0, c7, c10, 4"
+//        : : "r" (dummy) : "memory");
+//    }
+// }
+
+inline static void dmb_sy() {
+   if (!os::is_MP()) {
+     return;
+   }
+#ifdef AARCH64
+   __asm__ __volatile__ ("dmb sy" : : : "memory");
+#else
+   if (VM_Version::arm_arch() >= 7) {
+#ifdef __thumb__
+     __asm__ volatile (
+     "dmb sy": : : "memory");
+#else
+     __asm__ volatile (
+     ".word 0xF57FF050 | 0xf" : : : "memory");
+#endif
+   } else {
+     intptr_t zero = 0;
+     __asm__ volatile (
+       "mcr p15, 0, %0, c7, c10, 5"
+       : : "r" (zero) : "memory");
+   }
+#endif
+}
+
+inline static void dmb_st() {
+   if (!os::is_MP()) {
+     return;
+   }
+#ifdef AARCH64
+   __asm__ __volatile__ ("dmb st" : : : "memory");
+#else
+   if (VM_Version::arm_arch() >= 7) {
+#ifdef __thumb__
+     __asm__ volatile (
+     "dmb st": : : "memory");
+#else
+     __asm__ volatile (
+     ".word 0xF57FF050 | 0xe" : : : "memory");
+#endif
+   } else {
+     intptr_t zero = 0;
+     __asm__ volatile (
+       "mcr p15, 0, %0, c7, c10, 5"
+       : : "r" (zero) : "memory");
+   }
+#endif
+}
+
+// Load-Load/Store barrier
+inline static void dmb_ld() {
+#ifdef AARCH64
+   if (!os::is_MP()) {
+     return;
+   }
+   __asm__ __volatile__ ("dmb ld" : : : "memory");
+#else
+   dmb_sy();
+#endif
+}
+
+
+inline void OrderAccess::loadload()   { dmb_ld(); }
+inline void OrderAccess::loadstore()  { dmb_ld(); }
+inline void OrderAccess::acquire()    { dmb_ld(); }
+inline void OrderAccess::storestore() { dmb_st(); }
+inline void OrderAccess::storeload()  { dmb_sy(); }
+inline void OrderAccess::release()    { dmb_sy(); }
+inline void OrderAccess::fence()      { dmb_sy(); }
+
+// specializations for Aarch64
+// TODO-AARCH64: evaluate effectiveness of ldar*/stlr* implementations compared to 32-bit ARM approach
+
+#ifdef AARCH64
+
+template<>
+struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    volatile T result;
+    __asm__ volatile(
+      "ldarb %w[res], [%[ptr]]"
+      : [res] "=&r" (result)
+      : [ptr] "r" (p)
+      : "memory");
+    return result;
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    volatile T result;
+    __asm__ volatile(
+      "ldarh %w[res], [%[ptr]]"
+      : [res] "=&r" (result)
+      : [ptr] "r" (p)
+      : "memory");
+    return result;
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    volatile T result;
+    __asm__ volatile(
+      "ldar %w[res], [%[ptr]]"
+      : [res] "=&r" (result)
+      : [ptr] "r" (p)
+      : "memory");
+    return result;
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const {
+    volatile T result;
+    __asm__ volatile(
+      "ldar %[res], [%[ptr]]"
+      : [res] "=&r" (result)
+      : [ptr] "r" (p)
+      : "memory");
+    return result;
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile(
+      "stlrb %w[val], [%[ptr]]"
+      :
+      : [ptr] "r" (p), [val] "r" (v)
+      : "memory");
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile(
+      "stlrh %w[val], [%[ptr]]"
+      :
+      : [ptr] "r" (p), [val] "r" (v)
+      : "memory");
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile(
+      "stlr %w[val], [%[ptr]]"
+      :
+      : [ptr] "r" (p), [val] "r" (v)
+      : "memory");
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile(
+      "stlr %[val], [%[ptr]]"
+      :
+      : [ptr] "r" (p), [val] "r" (v)
+      : "memory");
+  }
+};
+
+#endif // AARCH64
+
+#endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_HPP
--- a/src/hotspot/os_cpu/linux_arm/orderAccess_linux_arm.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,247 +0,0 @@
-/*
- * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP
-#define OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP
-
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
-#include "vm_version_arm.hpp"
-
-// Implementation of class OrderAccess.
-// - we define the high level barriers below and use the general
-//   implementation in orderAccess.inline.hpp, with customizations
-//   on AARCH64 via the specialized_* template functions
-
-// Memory Ordering on ARM is weak.
-//
-// Implement all 4 memory ordering barriers by DMB, since it is a
-// lighter version of DSB.
-// dmb_sy implies full system shareability domain. RD/WR access type.
-// dmb_st implies full system shareability domain. WR only access type.
-//
-// NOP on < ARMv6 (MP not supported)
-//
-// Non mcr instructions can be used if we build for armv7 or higher arch
-//    __asm__ __volatile__ ("dmb" : : : "memory");
-//    __asm__ __volatile__ ("dsb" : : : "memory");
-//
-// inline void _OrderAccess_dsb() {
-//    volatile intptr_t dummy = 0;
-//    if (os::is_MP()) {
-//      __asm__ volatile (
-//        "mcr p15, 0, %0, c7, c10, 4"
-//        : : "r" (dummy) : "memory");
-//   }
-// }
-
-inline static void dmb_sy() {
-   if (!os::is_MP()) {
-     return;
-   }
-#ifdef AARCH64
-   __asm__ __volatile__ ("dmb sy" : : : "memory");
-#else
-   if (VM_Version::arm_arch() >= 7) {
-#ifdef __thumb__
-     __asm__ volatile (
-     "dmb sy": : : "memory");
-#else
-     __asm__ volatile (
-     ".word 0xF57FF050 | 0xf" : : : "memory");
-#endif
-   } else {
-     intptr_t zero = 0;
-     __asm__ volatile (
-       "mcr p15, 0, %0, c7, c10, 5"
-       : : "r" (zero) : "memory");
-   }
-#endif
-}
-
-inline static void dmb_st() {
-   if (!os::is_MP()) {
-     return;
-   }
-#ifdef AARCH64
-   __asm__ __volatile__ ("dmb st" : : : "memory");
-#else
-   if (VM_Version::arm_arch() >= 7) {
-#ifdef __thumb__
-     __asm__ volatile (
-     "dmb st": : : "memory");
-#else
-     __asm__ volatile (
-     ".word 0xF57FF050 | 0xe" : : : "memory");
-#endif
-   } else {
-     intptr_t zero = 0;
-     __asm__ volatile (
-       "mcr p15, 0, %0, c7, c10, 5"
-       : : "r" (zero) : "memory");
-   }
-#endif
-}
-
-// Load-Load/Store barrier
-inline static void dmb_ld() {
-#ifdef AARCH64
-   if (!os::is_MP()) {
-     return;
-   }
-   __asm__ __volatile__ ("dmb ld" : : : "memory");
-#else
-   dmb_sy();
-#endif
-}
-
-
-inline void OrderAccess::loadload()   { dmb_ld(); }
-inline void OrderAccess::loadstore()  { dmb_ld(); }
-inline void OrderAccess::acquire()    { dmb_ld(); }
-inline void OrderAccess::storestore() { dmb_st(); }
-inline void OrderAccess::storeload()  { dmb_sy(); }
-inline void OrderAccess::release()    { dmb_sy(); }
-inline void OrderAccess::fence()      { dmb_sy(); }
-
-// specializations for Aarch64
-// TODO-AARCH64: evaluate effectiveness of ldar*/stlr* implementations compared to 32-bit ARM approach
-
-#ifdef AARCH64
-
-template<>
-struct OrderAccess::PlatformOrderedLoad<1, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const {
-    volatile T result;
-    __asm__ volatile(
-      "ldarb %w[res], [%[ptr]]"
-      : [res] "=&r" (result)
-      : [ptr] "r" (p)
-      : "memory");
-    return result;
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedLoad<2, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const {
-    volatile T result;
-    __asm__ volatile(
-      "ldarh %w[res], [%[ptr]]"
-      : [res] "=&r" (result)
-      : [ptr] "r" (p)
-      : "memory");
-    return result;
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedLoad<4, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const {
-    volatile T result;
-    __asm__ volatile(
-      "ldar %w[res], [%[ptr]]"
-      : [res] "=&r" (result)
-      : [ptr] "r" (p)
-      : "memory");
-    return result;
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedLoad<8, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const {
-    volatile T result;
-    __asm__ volatile(
-      "ldar %[res], [%[ptr]]"
-      : [res] "=&r" (result)
-      : [ptr] "r" (p)
-      : "memory");
-    return result;
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile(
-      "stlrb %w[val], [%[ptr]]"
-      :
-      : [ptr] "r" (p), [val] "r" (v)
-      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile(
-      "stlrh %w[val], [%[ptr]]"
-      :
-      : [ptr] "r" (p), [val] "r" (v)
-      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile(
-      "stlr %w[val], [%[ptr]]"
-      :
-      : [ptr] "r" (p), [val] "r" (v)
-      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile(
-      "stlr %[val], [%[ptr]]"
-      :
-      : [ptr] "r" (p), [val] "r" (v)
-      : "memory");
-  }
-};
-
-#endif // AARCH64
-
-#endif // OS_CPU_LINUX_ARM_VM_ORDERACCESS_LINUX_ARM_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_HPP
+#define OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_HPP
+
+// Included in orderAccess.hpp header file.
+
+#ifndef PPC64
+#error "OrderAccess currently only implemented for PPC64"
+#endif
+
+// Compiler version last used for testing: gcc 4.1.2
+// Please update this information when this file changes
+
+// Implementation of class OrderAccess.
+
+//
+// Machine barrier instructions:
+//
+// - sync            Two-way memory barrier, aka fence.
+// - lwsync          orders  Store|Store,
+//                            Load|Store,
+//                            Load|Load,
+//                   but not Store|Load
+// - eieio           orders  Store|Store
+// - isync           Invalidates speculatively executed instructions,
+//                   but isync may complete before storage accesses
+//                   associated with instructions preceding isync have
+//                   been performed.
+//
+// Semantic barrier instructions:
+// (as defined in orderAccess.hpp)
+//
+// - release         orders Store|Store,       (maps to lwsync)
+//                           Load|Store
+// - acquire         orders  Load|Store,       (maps to lwsync)
+//                           Load|Load
+// - fence           orders Store|Store,       (maps to sync)
+//                           Load|Store,
+//                           Load|Load,
+//                          Store|Load
+//
+
+#define inlasm_sync()     __asm__ __volatile__ ("sync"   : : : "memory");
+#define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
+#define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
+#define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
+// Use twi-isync for load_acquire (faster than lwsync).
+#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
+
+inline void   OrderAccess::loadload()   { inlasm_lwsync(); }
+inline void   OrderAccess::storestore() { inlasm_lwsync(); }
+inline void   OrderAccess::loadstore()  { inlasm_lwsync(); }
+inline void   OrderAccess::storeload()  { inlasm_sync();   }
+
+inline void   OrderAccess::acquire()    { inlasm_lwsync(); }
+inline void   OrderAccess::release()    { inlasm_lwsync(); }
+inline void   OrderAccess::fence()      { inlasm_sync();   }
+
+
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
+};
+
+#undef inlasm_sync
+#undef inlasm_lwsync
+#undef inlasm_eieio
+#undef inlasm_isync
+#undef inlasm_acquire_reg
+
+#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_HPP
--- a/src/hotspot/os_cpu/linux_ppc/orderAccess_linux_ppc.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,97 +0,0 @@
-/*
- * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
-#define OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
-
-#include "runtime/orderAccess.hpp"
-
-#ifndef PPC64
-#error "OrderAccess currently only implemented for PPC64"
-#endif
-
-// Compiler version last used for testing: gcc 4.1.2
-// Please update this information when this file changes
-
-// Implementation of class OrderAccess.
-
-//
-// Machine barrier instructions:
-//
-// - sync            Two-way memory barrier, aka fence.
-// - lwsync          orders  Store|Store,
-//                            Load|Store,
-//                            Load|Load,
-//                   but not Store|Load
-// - eieio           orders  Store|Store
-// - isync           Invalidates speculatively executed instructions,
-//                   but isync may complete before storage accesses
-//                   associated with instructions preceding isync have
-//                   been performed.
-//
-// Semantic barrier instructions:
-// (as defined in orderAccess.hpp)
-//
-// - release         orders Store|Store,       (maps to lwsync)
-//                           Load|Store
-// - acquire         orders  Load|Store,       (maps to lwsync)
-//                           Load|Load
-// - fence           orders Store|Store,       (maps to sync)
-//                           Load|Store,
-//                           Load|Load,
-//                          Store|Load
-//
-
-#define inlasm_sync()     __asm__ __volatile__ ("sync"   : : : "memory");
-#define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
-#define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
-#define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
-// Use twi-isync for load_acquire (faster than lwsync).
-#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
-
-inline void   OrderAccess::loadload()   { inlasm_lwsync(); }
-inline void   OrderAccess::storestore() { inlasm_lwsync(); }
-inline void   OrderAccess::loadstore()  { inlasm_lwsync(); }
-inline void   OrderAccess::storeload()  { inlasm_sync();   }
-
-inline void   OrderAccess::acquire()    { inlasm_lwsync(); }
-inline void   OrderAccess::release()    { inlasm_lwsync(); }
-inline void   OrderAccess::fence()      { inlasm_sync();   }
-
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { register T t = Atomic::load(p); inlasm_acquire_reg(t); return t; }
-};
-
-#undef inlasm_sync
-#undef inlasm_lwsync
-#undef inlasm_eieio
-#undef inlasm_isync
-#undef inlasm_acquire_reg
-
-#endif // OS_CPU_LINUX_PPC_VM_ORDERACCESS_LINUX_PPC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016 SAP SE. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_HPP
+#define OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_HPP
+
+// Included in orderAccess.hpp header file.
+
+#include "vm_version_s390.hpp"
+
+// Implementation of class OrderAccess.
+
+//
+// machine barrier instructions:
+//
+//   - z_sync            two-way memory barrier, aka fence
+//
+// semantic barrier instructions:
+// (as defined in orderAccess.hpp)
+//
+//   - z_release         orders Store|Store,    (maps to compiler barrier)
+//                               Load|Store
+//   - z_acquire         orders  Load|Store,    (maps to compiler barrier)
+//                               Load|Load
+//   - z_fence           orders Store|Store,    (maps to z_sync)
+//                               Load|Store,
+//                               Load|Load,
+//                              Store|Load
+//
+
+
+// Only load-after-store-order is not guaranteed on z/Architecture, i.e. only 'fence'
+// is needed.
+
+// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions.
+#define inlasm_compiler_barrier() __asm__ volatile ("" : : : "memory");
+// "bcr 15, 0" is used as two way memory barrier.
+#define inlasm_zarch_sync() __asm__ __volatile__ ("bcr 15, 0" : : : "memory");
+
+// Release and acquire are empty on z/Architecture, but potential
+// optimizations of gcc must be forbidden by OrderAccess::release and
+// OrderAccess::acquire.
+#define inlasm_zarch_release() inlasm_compiler_barrier()
+#define inlasm_zarch_acquire() inlasm_compiler_barrier()
+#define inlasm_zarch_fence()   inlasm_zarch_sync()
+
+inline void OrderAccess::loadload()   { inlasm_compiler_barrier(); }
+inline void OrderAccess::storestore() { inlasm_compiler_barrier(); }
+inline void OrderAccess::loadstore()  { inlasm_compiler_barrier(); }
+inline void OrderAccess::storeload()  { inlasm_zarch_sync(); }
+
+inline void OrderAccess::acquire()    { inlasm_zarch_acquire(); }
+inline void OrderAccess::release()    { inlasm_zarch_release(); }
+inline void OrderAccess::fence()      { inlasm_zarch_sync(); }
+
+template<size_t byte_size>
+struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
+{
+  template <typename T>
+  T operator()(const volatile T* p) const { register T t = *p; inlasm_zarch_acquire(); return t; }
+};
+
+#undef inlasm_compiler_barrier
+#undef inlasm_zarch_sync
+#undef inlasm_zarch_release
+#undef inlasm_zarch_acquire
+#undef inlasm_zarch_fence
+
+#endif // OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_HPP
--- a/src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2016 SAP SE. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP
-#define OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP
-
-#include "runtime/orderAccess.hpp"
-#include "vm_version_s390.hpp"
-
-// Implementation of class OrderAccess.
-
-//
-// machine barrier instructions:
-//
-//   - z_sync            two-way memory barrier, aka fence
-//
-// semantic barrier instructions:
-// (as defined in orderAccess.hpp)
-//
-//   - z_release         orders Store|Store,    (maps to compiler barrier)
-//                               Load|Store
-//   - z_acquire         orders  Load|Store,    (maps to compiler barrier)
-//                               Load|Load
-//   - z_fence           orders Store|Store,    (maps to z_sync)
-//                               Load|Store,
-//                               Load|Load,
-//                              Store|Load
-//
-
-
-// Only load-after-store-order is not guaranteed on z/Architecture, i.e. only 'fence'
-// is needed.
-
-// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions.
-#define inlasm_compiler_barrier() __asm__ volatile ("" : : : "memory");
-// "bcr 15, 0" is used as two way memory barrier.
-#define inlasm_zarch_sync() __asm__ __volatile__ ("bcr 15, 0" : : : "memory");
-
-// Release and acquire are empty on z/Architecture, but potential
-// optimizations of gcc must be forbidden by OrderAccess::release and
-// OrderAccess::acquire.
-#define inlasm_zarch_release() inlasm_compiler_barrier()
-#define inlasm_zarch_acquire() inlasm_compiler_barrier()
-#define inlasm_zarch_fence()   inlasm_zarch_sync()
-
-inline void OrderAccess::loadload()   { inlasm_compiler_barrier(); }
-inline void OrderAccess::storestore() { inlasm_compiler_barrier(); }
-inline void OrderAccess::loadstore()  { inlasm_compiler_barrier(); }
-inline void OrderAccess::storeload()  { inlasm_zarch_sync(); }
-
-inline void OrderAccess::acquire()    { inlasm_zarch_acquire(); }
-inline void OrderAccess::release()    { inlasm_zarch_release(); }
-inline void OrderAccess::fence()      { inlasm_zarch_sync(); }
-
-template<size_t byte_size>
-struct OrderAccess::PlatformOrderedLoad<byte_size, X_ACQUIRE>
-{
-  template <typename T>
-  T operator()(const volatile T* p) const { register T t = *p; inlasm_zarch_acquire(); return t; }
-};
-
-#undef inlasm_compiler_barrier
-#undef inlasm_zarch_sync
-#undef inlasm_zarch_release
-#undef inlasm_zarch_acquire
-#undef inlasm_zarch_fence
-
-#endif // OS_CPU_LINUX_S390_VM_ORDERACCESS_LINUX_S390_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_HPP
+#define OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_HPP
+
+// Included in orderAccess.hpp header file.
+
+// Implementation of class OrderAccess.
+
+// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
+static inline void compiler_barrier() {
+  __asm__ volatile ("" : : : "memory");
+}
+
+// Assume TSO.
+
+inline void OrderAccess::loadload()   { compiler_barrier(); }
+inline void OrderAccess::storestore() { compiler_barrier(); }
+inline void OrderAccess::loadstore()  { compiler_barrier(); }
+inline void OrderAccess::storeload()  { fence();            }
+
+inline void OrderAccess::acquire()    { compiler_barrier(); }
+inline void OrderAccess::release()    { compiler_barrier(); }
+
+inline void OrderAccess::fence() {
+  __asm__ volatile ("membar  #StoreLoad" : : : "memory");
+}
+
+#endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_HPP
--- a/src/hotspot/os_cpu/linux_sparc/orderAccess_linux_sparc.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,51 +0,0 @@
-/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP
-#define OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP
-
-#include "runtime/orderAccess.hpp"
-
-// Implementation of class OrderAccess.
-
-// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
-static inline void compiler_barrier() {
-  __asm__ volatile ("" : : : "memory");
-}
-
-// Assume TSO.
-
-inline void OrderAccess::loadload()   { compiler_barrier(); }
-inline void OrderAccess::storestore() { compiler_barrier(); }
-inline void OrderAccess::loadstore()  { compiler_barrier(); }
-inline void OrderAccess::storeload()  { fence();            }
-
-inline void OrderAccess::acquire()    { compiler_barrier(); }
-inline void OrderAccess::release()    { compiler_barrier(); }
-
-inline void OrderAccess::fence() {
-  __asm__ volatile ("membar  #StoreLoad" : : : "memory");
-}
-
-#endif // OS_CPU_LINUX_SPARC_VM_ORDERACCESS_LINUX_SPARC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,108 @@
+/*
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP
+#define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP
+
+// Included in orderAccess.hpp header file.
+
+// Compiler version last used for testing: gcc 4.8.2
+// Please update this information when this file changes
+
+// Implementation of class OrderAccess.
+
+// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
+static inline void compiler_barrier() {
+  __asm__ volatile ("" : : : "memory");
+}
+
+inline void OrderAccess::loadload()   { compiler_barrier(); }
+inline void OrderAccess::storestore() { compiler_barrier(); }
+inline void OrderAccess::loadstore()  { compiler_barrier(); }
+inline void OrderAccess::storeload()  { fence();            }
+
+inline void OrderAccess::acquire()    { compiler_barrier(); }
+inline void OrderAccess::release()    { compiler_barrier(); }
+
+inline void OrderAccess::fence() {
+   // always use locked addl since mfence is sometimes expensive
+#ifdef AMD64
+  __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
+#else
+  __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
+#endif
+  compiler_barrier();
+}
+
+template<>
+struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgb (%2),%0"
+                      : "=q" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgw (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgl (%2),%0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+
+#ifdef AMD64
+template<>
+struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm__ volatile (  "xchgq (%2), %0"
+                      : "=r" (v)
+                      : "0" (v), "r" (p)
+                      : "memory");
+  }
+};
+#endif // AMD64
+
+#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP
--- a/src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,112 +0,0 @@
-/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
-#define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
-
-// Compiler version last used for testing: gcc 4.8.2
-// Please update this information when this file changes
-
-// Implementation of class OrderAccess.
-
-// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
-static inline void compiler_barrier() {
-  __asm__ volatile ("" : : : "memory");
-}
-
-inline void OrderAccess::loadload()   { compiler_barrier(); }
-inline void OrderAccess::storestore() { compiler_barrier(); }
-inline void OrderAccess::loadstore()  { compiler_barrier(); }
-inline void OrderAccess::storeload()  { fence();            }
-
-inline void OrderAccess::acquire()    { compiler_barrier(); }
-inline void OrderAccess::release()    { compiler_barrier(); }
-
-inline void OrderAccess::fence() {
-  if (os::is_MP()) {
-    // always use locked addl since mfence is sometimes expensive
-#ifdef AMD64
-    __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
-#else
-    __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
-#endif
-  }
-  compiler_barrier();
-}
-
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgb (%2),%0"
-                      : "=q" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgw (%2),%0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgl (%2),%0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-
-#ifdef AMD64
-template<>
-struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm__ volatile (  "xchgq (%2), %0"
-                      : "=r" (v)
-                      : "0" (v), "r" (p)
-                      : "memory");
-  }
-};
-#endif // AMD64
-
-#endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright 2007, 2008, 2009 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_HPP
+#define OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_HPP
+
+// Included in orderAccess.hpp header file.
+
+#ifdef ARM
+
+/*
+ * ARM Kernel helper for memory barrier.
+ * Using __asm __volatile ("":::"memory") does not work reliable on ARM
+ * and gcc __sync_synchronize(); implementation does not use the kernel
+ * helper for all gcc versions so it is unreliable to use as well.
+ */
+typedef void (__kernel_dmb_t) (void);
+#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0)
+
+#define FULL_MEM_BARRIER __kernel_dmb()
+#define LIGHT_MEM_BARRIER __kernel_dmb()
+
+#else // ARM
+
+#define FULL_MEM_BARRIER __sync_synchronize()
+
+#ifdef PPC
+
+#ifdef __NO_LWSYNC__
+#define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory")
+#else
+#define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory")
+#endif
+
+#else // PPC
+
+#ifdef ALPHA
+
+#define LIGHT_MEM_BARRIER __sync_synchronize()
+
+#else // ALPHA
+
+#define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory")
+
+#endif // ALPHA
+
+#endif // PPC
+
+#endif // ARM
+
+// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient
+// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore.
+
+inline void OrderAccess::loadload()   { LIGHT_MEM_BARRIER; }
+inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; }
+inline void OrderAccess::loadstore()  { LIGHT_MEM_BARRIER; }
+inline void OrderAccess::storeload()  { FULL_MEM_BARRIER;  }
+
+inline void OrderAccess::acquire()    { LIGHT_MEM_BARRIER; }
+inline void OrderAccess::release()    { LIGHT_MEM_BARRIER; }
+
+inline void OrderAccess::fence()      { FULL_MEM_BARRIER;  }
+
+#endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_HPP
--- a/src/hotspot/os_cpu/linux_zero/orderAccess_linux_zero.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,86 +0,0 @@
-/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2009 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP
-#define OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP
-
-#include "runtime/orderAccess.hpp"
-
-#ifdef ARM
-
-/*
- * ARM Kernel helper for memory barrier.
- * Using __asm __volatile ("":::"memory") does not work reliable on ARM
- * and gcc __sync_synchronize(); implementation does not use the kernel
- * helper for all gcc versions so it is unreliable to use as well.
- */
-typedef void (__kernel_dmb_t) (void);
-#define __kernel_dmb (*(__kernel_dmb_t *) 0xffff0fa0)
-
-#define FULL_MEM_BARRIER __kernel_dmb()
-#define LIGHT_MEM_BARRIER __kernel_dmb()
-
-#else // ARM
-
-#define FULL_MEM_BARRIER __sync_synchronize()
-
-#ifdef PPC
-
-#ifdef __NO_LWSYNC__
-#define LIGHT_MEM_BARRIER __asm __volatile ("sync":::"memory")
-#else
-#define LIGHT_MEM_BARRIER __asm __volatile ("lwsync":::"memory")
-#endif
-
-#else // PPC
-
-#ifdef ALPHA
-
-#define LIGHT_MEM_BARRIER __sync_synchronize()
-
-#else // ALPHA
-
-#define LIGHT_MEM_BARRIER __asm __volatile ("":::"memory")
-
-#endif // ALPHA
-
-#endif // PPC
-
-#endif // ARM
-
-// Note: What is meant by LIGHT_MEM_BARRIER is a barrier which is sufficient
-// to provide TSO semantics, i.e. StoreStore | LoadLoad | LoadStore.
-
-inline void OrderAccess::loadload()   { LIGHT_MEM_BARRIER; }
-inline void OrderAccess::storestore() { LIGHT_MEM_BARRIER; }
-inline void OrderAccess::loadstore()  { LIGHT_MEM_BARRIER; }
-inline void OrderAccess::storeload()  { FULL_MEM_BARRIER;  }
-
-inline void OrderAccess::acquire()    { LIGHT_MEM_BARRIER; }
-inline void OrderAccess::release()    { LIGHT_MEM_BARRIER; }
-
-inline void OrderAccess::fence()      { FULL_MEM_BARRIER;  }
-
-#endif // OS_CPU_LINUX_ZERO_VM_ORDERACCESS_LINUX_ZERO_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_HPP
+#define OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_HPP
+
+// Included in orderAccess.hpp header file.
+
+// Compiler version last used for testing: solaris studio 12u3
+// Please update this information when this file changes
+
+// Implementation of class OrderAccess.
+
+// Assume TSO.
+
+// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
+inline void compiler_barrier() {
+  __asm__ volatile ("" : : : "memory");
+}
+
+inline void OrderAccess::loadload()   { compiler_barrier(); }
+inline void OrderAccess::storestore() { compiler_barrier(); }
+inline void OrderAccess::loadstore()  { compiler_barrier(); }
+inline void OrderAccess::storeload()  { fence();            }
+
+inline void OrderAccess::acquire()    { compiler_barrier(); }
+inline void OrderAccess::release()    { compiler_barrier(); }
+
+inline void OrderAccess::fence() {
+  __asm__ volatile ("membar  #StoreLoad" : : : "memory");
+}
+
+#endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_HPP
--- a/src/hotspot/os_cpu/solaris_sparc/orderAccess_solaris_sparc.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
-#define OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-
-// Compiler version last used for testing: solaris studio 12u3
-// Please update this information when this file changes
-
-// Implementation of class OrderAccess.
-
-// Assume TSO.
-
-// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
-inline void compiler_barrier() {
-  __asm__ volatile ("" : : : "memory");
-}
-
-inline void OrderAccess::loadload()   { compiler_barrier(); }
-inline void OrderAccess::storestore() { compiler_barrier(); }
-inline void OrderAccess::loadstore()  { compiler_barrier(); }
-inline void OrderAccess::storeload()  { fence();            }
-
-inline void OrderAccess::acquire()    { compiler_barrier(); }
-inline void OrderAccess::release()    { compiler_barrier(); }
-
-inline void OrderAccess::fence() {
-  __asm__ volatile ("membar  #StoreLoad" : : : "memory");
-}
-
-#endif // OS_CPU_SOLARIS_SPARC_VM_ORDERACCESS_SOLARIS_SPARC_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_HPP
+#define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_HPP
+
+// Included in orderAccess.hpp header file.
+
+// Compiler version last used for testing: solaris studio 12u3
+// Please update this information when this file changes
+
+// Implementation of class OrderAccess.
+
+// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
+inline void compiler_barrier() {
+  __asm__ volatile ("" : : : "memory");
+}
+
+inline void OrderAccess::loadload()   { compiler_barrier(); }
+inline void OrderAccess::storestore() { compiler_barrier(); }
+inline void OrderAccess::loadstore()  { compiler_barrier(); }
+inline void OrderAccess::storeload()  { fence();            }
+
+inline void OrderAccess::acquire()    { compiler_barrier(); }
+inline void OrderAccess::release()    { compiler_barrier(); }
+
+inline void OrderAccess::fence() {
+#ifdef AMD64
+  __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
+#else
+  __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
+#endif
+  compiler_barrier();
+}
+
+#endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_HPP
--- a/src/hotspot/os_cpu/solaris_x86/orderAccess_solaris_x86.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
-#define OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
-
-// Compiler version last used for testing: solaris studio 12u3
-// Please update this information when this file changes
-
-// Implementation of class OrderAccess.
-
-// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
-inline void compiler_barrier() {
-  __asm__ volatile ("" : : : "memory");
-}
-
-inline void OrderAccess::loadload()   { compiler_barrier(); }
-inline void OrderAccess::storestore() { compiler_barrier(); }
-inline void OrderAccess::loadstore()  { compiler_barrier(); }
-inline void OrderAccess::storeload()  { fence();            }
-
-inline void OrderAccess::acquire()    { compiler_barrier(); }
-inline void OrderAccess::release()    { compiler_barrier(); }
-
-inline void OrderAccess::fence() {
-  if (os::is_MP()) {
-#ifdef AMD64
-    __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
-#else
-    __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
-#endif
-  }
-  compiler_barrier();
-}
-
-#endif // OS_CPU_SOLARIS_X86_VM_ORDERACCESS_SOLARIS_X86_INLINE_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP
+#define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP
+
+// Included in orderAccess.hpp header file.
+
+#include <intrin.h>
+
+// Compiler version last used for testing: Microsoft Visual Studio 2010
+// Please update this information when this file changes
+
+// Implementation of class OrderAccess.
+
+// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
+inline void compiler_barrier() {
+  _ReadWriteBarrier();
+}
+
+// Note that in MSVC, volatile memory accesses are explicitly
+// guaranteed to have acquire release semantics (w.r.t. compiler
+// reordering) and therefore does not even need a compiler barrier
+// for normal acquire release accesses. And all generalized
+// bound calls like release_store go through OrderAccess::load
+// and OrderAccess::store which do volatile memory accesses.
+template<> inline void ScopedFence<X_ACQUIRE>::postfix()       { }
+template<> inline void ScopedFence<RELEASE_X>::prefix()        { }
+template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix()  { }
+template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
+
+inline void OrderAccess::loadload()   { compiler_barrier(); }
+inline void OrderAccess::storestore() { compiler_barrier(); }
+inline void OrderAccess::loadstore()  { compiler_barrier(); }
+inline void OrderAccess::storeload()  { fence(); }
+
+inline void OrderAccess::acquire()    { compiler_barrier(); }
+inline void OrderAccess::release()    { compiler_barrier(); }
+
+inline void OrderAccess::fence() {
+#ifdef AMD64
+  StubRoutines_fence();
+#else
+  __asm {
+    lock add dword ptr [esp], 0;
+  }
+#endif // AMD64
+  compiler_barrier();
+}
+
+#ifndef AMD64
+template<>
+struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm {
+      mov edx, p;
+      mov al, v;
+      xchg al, byte ptr [edx];
+    }
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm {
+      mov edx, p;
+      mov ax, v;
+      xchg ax, word ptr [edx];
+    }
+  }
+};
+
+template<>
+struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
+{
+  template <typename T>
+  void operator()(T v, volatile T* p) const {
+    __asm {
+      mov edx, p;
+      mov eax, v;
+      xchg eax, dword ptr [edx];
+    }
+  }
+};
+#endif // AMD64
+
+#endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_HPP
--- a/src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,116 +0,0 @@
-/*
- * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
-#define OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
-
-#include <intrin.h>
-#include "runtime/atomic.hpp"
-#include "runtime/orderAccess.hpp"
-#include "runtime/os.hpp"
-
-// Compiler version last used for testing: Microsoft Visual Studio 2010
-// Please update this information when this file changes
-
-// Implementation of class OrderAccess.
-
-// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
-inline void compiler_barrier() {
-  _ReadWriteBarrier();
-}
-
-// Note that in MSVC, volatile memory accesses are explicitly
-// guaranteed to have acquire release semantics (w.r.t. compiler
-// reordering) and therefore does not even need a compiler barrier
-// for normal acquire release accesses. And all generalized
-// bound calls like release_store go through OrderAccess::load
-// and OrderAccess::store which do volatile memory accesses.
-template<> inline void ScopedFence<X_ACQUIRE>::postfix()       { }
-template<> inline void ScopedFence<RELEASE_X>::prefix()        { }
-template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix()  { }
-template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
-
-inline void OrderAccess::loadload()   { compiler_barrier(); }
-inline void OrderAccess::storestore() { compiler_barrier(); }
-inline void OrderAccess::loadstore()  { compiler_barrier(); }
-inline void OrderAccess::storeload()  { fence(); }
-
-inline void OrderAccess::acquire()    { compiler_barrier(); }
-inline void OrderAccess::release()    { compiler_barrier(); }
-
-inline void OrderAccess::fence() {
-#ifdef AMD64
-  StubRoutines_fence();
-#else
-  if (os::is_MP()) {
-    __asm {
-      lock add dword ptr [esp], 0;
-    }
-  }
-#endif // AMD64
-  compiler_barrier();
-}
-
-#ifndef AMD64
-template<>
-struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm {
-      mov edx, p;
-      mov al, v;
-      xchg al, byte ptr [edx];
-    }
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm {
-      mov edx, p;
-      mov ax, v;
-      xchg ax, word ptr [edx];
-    }
-  }
-};
-
-template<>
-struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
-{
-  template <typename T>
-  void operator()(T v, volatile T* p) const {
-    __asm {
-      mov edx, p;
-      mov eax, v;
-      xchg eax, dword ptr [edx];
-    }
-  }
-};
-#endif // AMD64
-
-#endif // OS_CPU_WINDOWS_X86_VM_ORDERACCESS_WINDOWS_X86_INLINE_HPP
--- a/src/hotspot/share/aot/aotCompiledMethod.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/aot/aotCompiledMethod.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -75,7 +75,7 @@
   return (address*) ((address)fr->unextended_sp() + _meta->orig_pc_offset());
 }
 
-bool AOTCompiledMethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
+bool AOTCompiledMethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) {
   return false;
 }
 
@@ -245,7 +245,7 @@
 // more conservative than for nmethods.
 void AOTCompiledMethod::flush_evol_dependents_on(InstanceKlass* dependee) {
   if (is_java_method()) {
-    cleanup_inline_caches();
+    clear_inline_caches();
     mark_for_deoptimization();
     make_not_entrant();
   }
--- a/src/hotspot/share/aot/aotCompiledMethod.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/aot/aotCompiledMethod.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -284,8 +284,8 @@
   bool is_aot_runtime_stub() const { return _method == NULL; }
 
 protected:
-  virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred);
-  virtual bool do_unloading_jvmci(bool unloading_occurred) { return false; }
+  virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive);
+  virtual bool do_unloading_jvmci() { return false; }
 
 };
 
--- a/src/hotspot/share/c1/c1_LIRAssembler.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/c1/c1_LIRAssembler.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -23,6 +23,7 @@
  */
 
 #include "precompiled.hpp"
+#include "asm/assembler.inline.hpp"
 #include "c1/c1_Compilation.hpp"
 #include "c1/c1_Instruction.hpp"
 #include "c1/c1_InstructionPrinter.hpp"
--- a/src/hotspot/share/c1/c1_MacroAssembler.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/c1/c1_MacroAssembler.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -26,7 +26,6 @@
 #define SHARE_VM_C1_C1_MACROASSEMBLER_HPP
 
 #include "asm/macroAssembler.hpp"
-#include "asm/macroAssembler.inline.hpp"
 #include "utilities/macros.hpp"
 
 class CodeEmitInfo;
--- a/src/hotspot/share/classfile/classLoader.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/classfile/classLoader.inline.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -26,7 +26,7 @@
 #define SHARE_VM_CLASSFILE_CLASSLOADER_INLINE_HPP
 
 #include "classfile/classLoader.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 
 // Next entry in class path
 inline ClassPathEntry* ClassPathEntry::next() const { return OrderAccess::load_acquire(&_next); }
--- a/src/hotspot/share/classfile/dictionary.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/classfile/dictionary.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -35,7 +35,7 @@
 #include "memory/resourceArea.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/safepointVerifiers.hpp"
 #include "utilities/hashtable.inline.hpp"
 
--- a/src/hotspot/share/classfile/dictionary.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/classfile/dictionary.inline.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -26,7 +26,7 @@
 #define SHARE_VM_CLASSFILE_DICTIONARY_INLINE_HPP
 
 #include "classfile/dictionary.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 
 inline ProtectionDomainEntry* DictionaryEntry::pd_set_acquire() const {
   return OrderAccess::load_acquire(&_pd_set);
--- a/src/hotspot/share/classfile/javaClasses.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/classfile/javaClasses.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -310,7 +310,8 @@
   Handle h_obj = basic_create(length, is_latin1, CHECK_NH);
   if (length > 0) {
     if (!has_multibyte) {
-      strncpy((char*)value(h_obj())->byte_at_addr(0), utf8_str, length);
+      const jbyte* src = reinterpret_cast<const jbyte*>(utf8_str);
+      ArrayAccess<>::arraycopy_from_native(src, value(h_obj()), typeArrayOopDesc::element_offset<jbyte>(0), length);
     } else if (is_latin1) {
       UTF8::convert_to_unicode(utf8_str, value(h_obj())->byte_at_addr(0), length);
     } else {
@@ -356,7 +357,8 @@
   Handle h_obj = basic_create(length, is_latin1, CHECK_NH);
   if (length > 0) {
     if (!has_multibyte) {
-      strncpy((char*)value(h_obj())->byte_at_addr(0), utf8_str, length);
+      const jbyte* src = reinterpret_cast<const jbyte*>(utf8_str);
+      ArrayAccess<>::arraycopy_from_native(src, value(h_obj()), typeArrayOopDesc::element_offset<jbyte>(0), length);
     } else if (is_latin1) {
       UTF8::convert_to_unicode(utf8_str, value(h_obj())->byte_at_addr(0), length);
     } else {
@@ -4255,7 +4257,7 @@
 int java_lang_AssertionStatusDirectives::packageEnabled_offset;
 int java_lang_AssertionStatusDirectives::deflt_offset;
 int java_nio_Buffer::_limit_offset;
-int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;
+int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset;
 int reflect_ConstantPool::_oop_offset;
 int reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
 
@@ -4397,13 +4399,12 @@
 }
 #endif
 
-void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
-  if (_owner_offset != 0) return;
-
-  SystemDictionary::load_abstract_ownable_synchronizer_klass(CHECK);
-  InstanceKlass* k = SystemDictionary::abstract_ownable_synchronizer_klass();
-  compute_offset(_owner_offset, k,
-                 "exclusiveOwnerThread", vmSymbols::thread_signature());
+#define AOS_FIELDS_DO(macro) \
+  macro(_owner_offset, k, "exclusiveOwnerThread", thread_signature, false)
+
+void java_util_concurrent_locks_AbstractOwnableSynchronizer::compute_offsets() {
+  InstanceKlass* k = SystemDictionary::java_util_concurrent_locks_AbstractOwnableSynchronizer_klass();
+  AOS_FIELDS_DO(FIELD_COMPUTE_OFFSET);
 }
 
 oop java_util_concurrent_locks_AbstractOwnableSynchronizer::get_owner_threadObj(oop obj) {
@@ -4471,6 +4472,7 @@
   java_lang_StackTraceElement::compute_offsets();
   java_lang_StackFrameInfo::compute_offsets();
   java_lang_LiveStackFrameInfo::compute_offsets();
+  java_util_concurrent_locks_AbstractOwnableSynchronizer::compute_offsets();
 
   // generated interpreter code wants to know about the offsets we just computed:
   AbstractAssembler::update_delayed_values();
--- a/src/hotspot/share/classfile/javaClasses.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/classfile/javaClasses.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1483,7 +1483,7 @@
  private:
   static int  _owner_offset;
  public:
-  static void initialize(TRAPS);
+  static void compute_offsets();
   static oop  get_owner_threadObj(oop obj);
 };
 
--- a/src/hotspot/share/classfile/moduleEntry.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/classfile/moduleEntry.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -25,7 +25,7 @@
 #include "precompiled.hpp"
 #include "jni.h"
 #include "classfile/classLoaderData.inline.hpp"
-#include "classfile/javaClasses.hpp"
+#include "classfile/javaClasses.inline.hpp"
 #include "classfile/moduleEntry.hpp"
 #include "logging/log.hpp"
 #include "memory/resourceArea.hpp"
@@ -236,10 +236,17 @@
   // The java.lang.Module for this loader's
   // corresponding unnamed module can be found in the java.lang.ClassLoader object.
   oop module = java_lang_ClassLoader::unnamedModule(cld->class_loader());
+
+  // Ensure that the unnamed module was correctly set when the class loader was constructed.
+  // Guarantee will cause a recognizable crash if the user code has circumvented calling the ClassLoader constructor.
+  ResourceMark rm;
+  guarantee(java_lang_Module::is_instance(module),
+            "The unnamed module for ClassLoader %s, is null or not an instance of java.lang.Module. The class loader has not been initialized correctly.",
+            cld->loader_name());
+
   ModuleEntry* unnamed_module = new_unnamed_module_entry(Handle(Thread::current(), module), cld);
 
-  // Store pointer to the ModuleEntry in the unnamed module's java.lang.Module
-  // object.
+  // Store pointer to the ModuleEntry in the unnamed module's java.lang.Module object.
   java_lang_Module::set_module_entry(module, unnamed_module);
 
   return unnamed_module;
--- a/src/hotspot/share/classfile/systemDictionary.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -76,7 +76,7 @@
 #include "runtime/java.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/sharedRuntime.hpp"
 #include "runtime/signature.hpp"
 #include "services/classLoadingService.hpp"
@@ -110,9 +110,6 @@
 
 bool        SystemDictionary::_has_checkPackageAccess     =  false;
 
-// lazily initialized klass variables
-InstanceKlass* volatile SystemDictionary::_abstract_ownable_synchronizer_klass = NULL;
-
 // Default ProtectionDomainCacheSize value
 
 const int defaultProtectionDomainCacheSize = 1009;
@@ -1897,22 +1894,6 @@
 }
 
 // ----------------------------------------------------------------------------
-// Lazily load klasses
-
-void SystemDictionary::load_abstract_ownable_synchronizer_klass(TRAPS) {
-  // if multiple threads calling this function, only one thread will load
-  // the class.  The other threads will find the loaded version once the
-  // class is loaded.
-  Klass* aos = _abstract_ownable_synchronizer_klass;
-  if (aos == NULL) {
-    Klass* k = resolve_or_fail(vmSymbols::java_util_concurrent_locks_AbstractOwnableSynchronizer(), true, CHECK);
-    // Force a fence to prevent any read before the write completes
-    OrderAccess::fence();
-    _abstract_ownable_synchronizer_klass = InstanceKlass::cast(k);
-  }
-}
-
-// ----------------------------------------------------------------------------
 // Initialization
 
 void SystemDictionary::initialize(TRAPS) {
--- a/src/hotspot/share/classfile/systemDictionary.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionary.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -199,6 +199,9 @@
   do_klass(StackFrameInfo_klass,                        java_lang_StackFrameInfo,                  Opt                 ) \
   do_klass(LiveStackFrameInfo_klass,                    java_lang_LiveStackFrameInfo,              Opt                 ) \
                                                                                                                          \
+  /* support for stack dump lock analysis */                                                                             \
+  do_klass(java_util_concurrent_locks_AbstractOwnableSynchronizer_klass, java_util_concurrent_locks_AbstractOwnableSynchronizer, Pre ) \
+                                                                                                                         \
   /* Preload boxing klasses */                                                                                           \
   do_klass(Boolean_klass,                               java_lang_Boolean,                         Pre                 ) \
   do_klass(Character_klass,                             java_lang_Character,                       Pre                 ) \
@@ -449,12 +452,6 @@
   }
   static BasicType box_klass_type(Klass* k);  // inverse of box_klass
 
-  // methods returning lazily loaded klasses
-  // The corresponding method to load the class must be called before calling them.
-  static InstanceKlass* abstract_ownable_synchronizer_klass() { return check_klass(_abstract_ownable_synchronizer_klass); }
-
-  static void load_abstract_ownable_synchronizer_klass(TRAPS);
-
 protected:
   // Returns the class loader data to be used when looking up/updating the
   // system dictionary.
@@ -729,9 +726,6 @@
   // Variables holding commonly used klasses (preloaded)
   static InstanceKlass* _well_known_klasses[];
 
-  // Lazily loaded klasses
-  static InstanceKlass* volatile _abstract_ownable_synchronizer_klass;
-
   // table of box klasses (int_klass, etc.)
   static InstanceKlass* _box_klasses[T_VOID+1];
 
--- a/src/hotspot/share/classfile/systemDictionaryShared.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/classfile/systemDictionaryShared.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -87,8 +87,8 @@
     assert(src != NULL, "No Manifest data");
     typeArrayOop buf = oopFactory::new_byteArray(size, CHECK_NH);
     typeArrayHandle bufhandle(THREAD, buf);
-    char* dst = (char*)(buf->byte_at_addr(0));
-    memcpy(dst, src, (size_t)size);
+    ArrayAccess<>::arraycopy_from_native(reinterpret_cast<const jbyte*>(src),
+                                         buf, typeArrayOopDesc::element_offset<jbyte>(0), size);
 
     Handle bais = JavaCalls::construct_new_instance(SystemDictionary::ByteArrayInputStream_klass(),
                       vmSymbols::byte_array_void_signature(),
--- a/src/hotspot/share/classfile/verifier.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/classfile/verifier.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -47,7 +47,7 @@
 #include "runtime/interfaceSupport.inline.hpp"
 #include "runtime/javaCalls.hpp"
 #include "runtime/jniHandles.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointVerifiers.hpp"
 #include "runtime/thread.hpp"
--- a/src/hotspot/share/code/codeCache.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/code/codeCache.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -685,8 +685,15 @@
   assert_locked_or_safepoint(CodeCache_lock);
   CompiledMethodIterator iter;
   while(iter.next_alive()) {
-    iter.method()->do_unloading(is_alive, unloading_occurred);
+    iter.method()->do_unloading(is_alive);
   }
+
+  // Now that all the unloaded nmethods are known, cleanup caches
+  // before CLDG is purged.
+  // This is another code cache walk but it is moved from gc_epilogue.
+  // G1 does a parallel walk of the nmethods so cleans them up
+  // as it goes and doesn't call this.
+  do_unloading_nmethod_caches(unloading_occurred);
 }
 
 void CodeCache::blobs_do(CodeBlobClosure* f) {
@@ -720,8 +727,11 @@
     assert(cur->on_scavenge_root_list(), "else shouldn't be on this list");
 
     bool is_live = (!cur->is_zombie() && !cur->is_unloaded());
-    if (TraceScavenge) {
-      cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr();
+    LogTarget(Trace, gc, nmethod) lt;
+    if (lt.is_enabled()) {
+      LogStream ls(lt);
+      CompileTask::print(&ls, cur,
+        is_live ? "scavenge root " : "dead scavenge root", /*short_form:*/ true);
     }
     if (is_live) {
       // Perform cur->oops_do(f), maybe just once per nmethod.
@@ -892,18 +902,26 @@
 #endif
 }
 
-void CodeCache::gc_prologue() {
+void CodeCache::gc_prologue() { }
+
+void CodeCache::gc_epilogue() {
+  prune_scavenge_root_nmethods();
 }
 
-void CodeCache::gc_epilogue() {
+
+void CodeCache::do_unloading_nmethod_caches(bool class_unloading_occurred) {
   assert_locked_or_safepoint(CodeCache_lock);
-  NOT_DEBUG(if (needs_cache_clean())) {
+  // Even if classes are not unloaded, there may have been some nmethods that are
+  // unloaded because oops in them are no longer reachable.
+  NOT_DEBUG(if (needs_cache_clean() || class_unloading_occurred)) {
     CompiledMethodIterator iter;
     while(iter.next_alive()) {
       CompiledMethod* cm = iter.method();
       assert(!cm->is_unloaded(), "Tautology");
-      DEBUG_ONLY(if (needs_cache_clean())) {
-        cm->cleanup_inline_caches();
+      DEBUG_ONLY(if (needs_cache_clean() || class_unloading_occurred)) {
+        // Clean up both unloaded klasses from nmethods and unloaded nmethods
+        // from inline caches.
+        cm->unload_nmethod_caches(/*parallel*/false, class_unloading_occurred);
       }
       DEBUG_ONLY(cm->verify());
       DEBUG_ONLY(cm->verify_oop_relocations());
@@ -911,8 +929,6 @@
   }
 
   set_needs_cache_clean(false);
-  prune_scavenge_root_nmethods();
-
   verify_icholder_relocations();
 }
 
--- a/src/hotspot/share/code/codeCache.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/code/codeCache.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -168,9 +168,10 @@
   static void gc_epilogue();
   static void gc_prologue();
   static void verify_oops();
-  // If "unloading_occurred" is true, then unloads (i.e., breaks root links
+  // If any oops are not marked this method unloads (i.e., breaks root links
   // to) any unmarked codeBlobs in the cache.  Sets "marked_for_unloading"
   // to "true" iff some code got unloaded.
+  // "unloading_occurred" controls whether metadata should be cleaned because of class unloading.
   static void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
   static void asserted_non_scavengable_nmethods_do(CodeBlobClosure* f = NULL) PRODUCT_RETURN;
 
@@ -223,8 +224,10 @@
 
   static bool needs_cache_clean()                     { return _needs_cache_clean; }
   static void set_needs_cache_clean(bool v)           { _needs_cache_clean = v;    }
+
   static void clear_inline_caches();                  // clear all inline caches
-  static void cleanup_inline_caches();
+  static void cleanup_inline_caches();                // clean unloaded/zombie nmethods from inline caches
+  static void do_unloading_nmethod_caches(bool class_unloading_occurred);  // clean all nmethod caches for unloading, including inline caches
 
   // Returns true if an own CodeHeap for the given CodeBlobType is available
   static bool heap_available(int code_blob_type);
--- a/src/hotspot/share/code/compiledIC.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/code/compiledIC.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -552,7 +552,8 @@
 
 // ----------------------------------------------------------------------------
 
-void CompiledStaticCall::set_to_clean() {
+void CompiledStaticCall::set_to_clean(bool in_use) {
+  // in_use is unused but needed to match template function in CompiledMethod
   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
   // Reset call site
   MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
--- a/src/hotspot/share/code/compiledIC.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/code/compiledIC.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -358,7 +358,7 @@
   virtual address destination() const = 0;
 
   // Clean static call (will force resolving on next use)
-  void set_to_clean();
+  void set_to_clean(bool in_use = true);
 
   // Set state. The entry must be the same, as computed by compute_entry.
   // Computation and setting is split up, since the actions are separate during
--- a/src/hotspot/share/code/compiledMethod.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/code/compiledMethod.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -28,6 +28,8 @@
 #include "code/scopeDesc.hpp"
 #include "code/codeCache.hpp"
 #include "interpreter/bytecode.inline.hpp"
+#include "logging/log.hpp"
+#include "logging/logTag.hpp"
 #include "memory/resourceArea.hpp"
 #include "oops/methodData.hpp"
 #include "oops/method.inline.hpp"
@@ -222,9 +224,7 @@
                        pd->return_oop());
 }
 
-void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
-  assert_locked_or_safepoint(CompiledIC_lock);
-
+address CompiledMethod::oops_reloc_begin() const {
   // If the method is not entrant or zombie then a JMP is plastered over the
   // first few bytes.  If an oop in the old code was there, that oop
   // should not get GC'd.  Skip the first few bytes of oops on
@@ -237,41 +237,7 @@
     // This shouldn't matter, since oops of non-entrant methods are never used.
     // In fact, why are we bothering to look at oops in a non-entrant method??
   }
-
-  // Find all calls in an nmethod and clear the ones that point to non-entrant,
-  // zombie and unloaded nmethods.
-  ResourceMark rm;
-  RelocIterator iter(this, low_boundary);
-  while(iter.next()) {
-    switch(iter.type()) {
-      case relocInfo::virtual_call_type:
-      case relocInfo::opt_virtual_call_type: {
-        CompiledIC *ic = CompiledIC_at(&iter);
-        // Ok, to lookup references to zombies here
-        CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
-        if( cb != NULL && cb->is_compiled() ) {
-          CompiledMethod* nm = cb->as_compiled_method();
-          // Clean inline caches pointing to zombie, non-entrant and unloaded methods
-          if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
-        }
-        break;
-      }
-      case relocInfo::static_call_type: {
-          CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
-          CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
-          if( cb != NULL && cb->is_compiled() ) {
-            CompiledMethod* cm = cb->as_compiled_method();
-            // Clean inline caches pointing to zombie, non-entrant and unloaded methods
-            if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
-              csc->set_to_clean();
-            }
-          }
-        break;
-      }
-      default:
-        break;
-    }
-  }
+  return low_boundary;
 }
 
 int CompiledMethod::verify_icholder_relocations() {
@@ -437,17 +403,15 @@
   return OrderAccess::load_acquire(&_unloading_clock);
 }
 
-// Processing of oop references should have been sufficient to keep
-// all strong references alive.  Any weak references should have been
-// cleared as well.  Visit all the metadata and ensure that it's
-// really alive.
-void CompiledMethod::verify_metadata_loaders(address low_boundary) {
+
+// static_stub_Relocations may have dangling references to
+// nmethods so trim them out here.  Otherwise it looks like
+// compiled code is maintaining a link to dead metadata.
+void CompiledMethod::clean_ic_stubs() {
 #ifdef ASSERT
-    RelocIterator iter(this, low_boundary);
-    while (iter.next()) {
-    // static_stub_Relocations may have dangling references to
-    // Method*s so trim them out here.  Otherwise it looks like
-    // compiled code is maintaining a link to dead metadata.
+  address low_boundary = oops_reloc_begin();
+  RelocIterator iter(this, low_boundary);
+  while (iter.next()) {
     address static_call_addr = NULL;
     if (iter.type() == relocInfo::opt_virtual_call_type) {
       CompiledIC* cic = CompiledIC_at(&iter);
@@ -470,8 +434,6 @@
       }
     }
   }
-  // Check that the metadata embedded in the nmethod is alive
-  metadata_do(check_class);
 #endif
 }
 
@@ -479,67 +441,43 @@
 // GC to unload an nmethod if it contains otherwise unreachable
 // oops.
 
-void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
+void CompiledMethod::do_unloading(BoolObjectClosure* is_alive) {
   // Make sure the oop's ready to receive visitors
   assert(!is_zombie() && !is_unloaded(),
          "should not call follow on zombie or unloaded nmethod");
 
-  // If the method is not entrant then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
-  }
+  address low_boundary = oops_reloc_begin();
 
-  // Exception cache
-  clean_exception_cache();
-
-  // If class unloading occurred we first iterate over all inline caches and
-  // clear ICs where the cached oop is referring to an unloaded klass or method.
-  // The remaining live cached oops will be traversed in the relocInfo::oop_type
-  // iteration below.
-  if (unloading_occurred) {
-    RelocIterator iter(this, low_boundary);
-    while(iter.next()) {
-      if (iter.type() == relocInfo::virtual_call_type) {
-        CompiledIC *ic = CompiledIC_at(&iter);
-        clean_ic_if_metadata_is_dead(ic);
-      }
-    }
-  }
-
-  if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
+  if (do_unloading_oops(low_boundary, is_alive)) {
     return;
   }
 
 #if INCLUDE_JVMCI
-  if (do_unloading_jvmci(unloading_occurred)) {
+  if (do_unloading_jvmci()) {
     return;
   }
 #endif
 
-  // Ensure that all metadata is still alive
-  verify_metadata_loaders(low_boundary);
+  // Cleanup exception cache and inline caches happens
+  // after all the unloaded methods are found.
 }
 
+// Clean references to unloaded nmethods at addr from this one, which is not unloaded.
 template <class CompiledICorStaticCall>
-static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from) {
+static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, CompiledMethod* from,
+                                         bool parallel, bool clean_all) {
   // Ok, to lookup references to zombies here
   CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
   CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
   if (nm != NULL) {
-    if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
+    if (parallel && nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
       // The nmethod has not been processed yet.
       return true;
     }
 
     // Clean inline caches pointing to both zombie and not_entrant methods
-    if (!nm->is_in_use() || (nm->method()->code() != nm)) {
-      ic->set_to_clean();
+    if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) {
+      ic->set_to_clean(from->is_alive());
       assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
     }
   }
@@ -547,12 +485,14 @@
   return false;
 }
 
-static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from) {
-  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from);
+static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, CompiledMethod* from,
+                                         bool parallel, bool clean_all = false) {
+  return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), from, parallel, clean_all);
 }
 
-static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from) {
-  return clean_if_nmethod_is_unloaded(csc, csc->destination(), from);
+static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, CompiledMethod* from,
+                                         bool parallel, bool clean_all = false) {
+  return clean_if_nmethod_is_unloaded(csc, csc->destination(), from, parallel, clean_all);
 }
 
 bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
@@ -562,47 +502,79 @@
   assert(!is_zombie() && !is_unloaded(),
          "should not call follow on zombie or unloaded nmethod");
 
-  // If the method is not entrant then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
+  address low_boundary = oops_reloc_begin();
+
+  if (do_unloading_oops(low_boundary, is_alive)) {
+    return false;
   }
 
-  // Exception cache
-  clean_exception_cache();
+#if INCLUDE_JVMCI
+  if (do_unloading_jvmci()) {
+    return false;
+  }
+#endif
 
+  return unload_nmethod_caches(/*parallel*/true, unloading_occurred);
+}
+
+// Cleans caches in nmethods that point to either classes that are unloaded
+// or nmethods that are unloaded.
+//
+// Can be called either in parallel by G1 currently or after all
+// nmethods are unloaded.  Return postponed=true in the parallel case for
+// inline caches found that point to nmethods that are not yet visited during
+// the do_unloading walk.
+bool CompiledMethod::unload_nmethod_caches(bool parallel, bool unloading_occurred) {
+
+  // Exception cache only needs to be called if unloading occurred
+  if (unloading_occurred) {
+    clean_exception_cache();
+  }
+
+  bool postponed = cleanup_inline_caches_impl(parallel, unloading_occurred, /*clean_all*/false);
+
+  // All static stubs need to be cleaned.
+  clean_ic_stubs();
+
+  // Check that the metadata embedded in the nmethod is alive
+  DEBUG_ONLY(metadata_do(check_class));
+
+  return postponed;
+}
+
+// Called to clean up after class unloading for live nmethods and from the sweeper
+// for all methods.
+bool CompiledMethod::cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all) {
+  assert_locked_or_safepoint(CompiledIC_lock);
   bool postponed = false;
 
-  RelocIterator iter(this, low_boundary);
+  // Find all calls in an nmethod and clear the ones that point to non-entrant,
+  // zombie and unloaded nmethods.
+  RelocIterator iter(this, oops_reloc_begin());
   while(iter.next()) {
 
     switch (iter.type()) {
 
     case relocInfo::virtual_call_type:
       if (unloading_occurred) {
-        // If class unloading occurred we first iterate over all inline caches and
-        // clear ICs where the cached oop is referring to an unloaded klass or method.
+        // If class unloading occurred we first clear ICs where the cached metadata
+        // is referring to an unloaded klass or method.
         clean_ic_if_metadata_is_dead(CompiledIC_at(&iter));
       }
 
-      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
+      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
       break;
 
     case relocInfo::opt_virtual_call_type:
-      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
+      postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, parallel, clean_all);
       break;
 
     case relocInfo::static_call_type:
-      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this);
+      postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, parallel, clean_all);
       break;
 
     case relocInfo::oop_type:
-      // handled by do_unloading_oops below
+      // handled by do_unloading_oops already
       break;
 
     case relocInfo::metadata_type:
@@ -613,19 +585,6 @@
     }
   }
 
-  if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
-    return postponed;
-  }
-
-#if INCLUDE_JVMCI
-  if (do_unloading_jvmci(unloading_occurred)) {
-    return postponed;
-  }
-#endif
-
-  // Ensure that all metadata is still alive
-  verify_metadata_loaders(low_boundary);
-
   return postponed;
 }
 
@@ -636,32 +595,21 @@
   assert(!is_zombie(),
          "should not call follow on zombie nmethod");
 
-  // If the method is not entrant then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
-  }
-
-  RelocIterator iter(this, low_boundary);
+  RelocIterator iter(this, oops_reloc_begin());
   while(iter.next()) {
 
     switch (iter.type()) {
 
     case relocInfo::virtual_call_type:
-      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
+      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
       break;
 
     case relocInfo::opt_virtual_call_type:
-      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this);
+      clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), this, true);
       break;
 
     case relocInfo::static_call_type:
-      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this);
+      clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), this, true);
       break;
 
     default:
--- a/src/hotspot/share/code/compiledMethod.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/code/compiledMethod.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -331,8 +331,19 @@
 
   static address get_deopt_original_pc(const frame* fr);
 
-  // Inline cache support
-  void cleanup_inline_caches(bool clean_all = false);
+  // GC unloading support
+  // Cleans unloaded klasses and unloaded nmethods in inline caches
+  bool unload_nmethod_caches(bool parallel, bool class_unloading_occurred);
+
+  // Inline cache support for class unloading and nmethod unloading
+ private:
+  bool cleanup_inline_caches_impl(bool parallel, bool unloading_occurred, bool clean_all);
+ public:
+  bool cleanup_inline_caches(bool clean_all = false) {
+    // Serial version used by sweeper and whitebox test
+    return cleanup_inline_caches_impl(false, false, clean_all);
+  }
+
   virtual void clear_inline_caches();
   void clear_ic_stubs();
 
@@ -364,12 +375,15 @@
   void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
   CompiledMethod* unloading_next()              { return _unloading_next; }
 
+ protected:
+  address oops_reloc_begin() const;
+ private:
   void static clean_ic_if_metadata_is_dead(CompiledIC *ic);
 
-  // Check that all metadata is still alive
-  void verify_metadata_loaders(address low_boundary);
+  void clean_ic_stubs();
 
-  virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
+ public:
+  virtual void do_unloading(BoolObjectClosure* is_alive);
   //  The parallel versions are used by G1.
   virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
   virtual void do_unloading_parallel_postponed();
@@ -381,9 +395,9 @@
   unsigned char unloading_clock();
 
 protected:
-  virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
+  virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) = 0;
 #if INCLUDE_JVMCI
-  virtual bool do_unloading_jvmci(bool unloading_occurred) = 0;
+  virtual bool do_unloading_jvmci() = 0;
 #endif
 
 private:
--- a/src/hotspot/share/code/nmethod.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/code/nmethod.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -51,7 +51,7 @@
 #include "runtime/frame.inline.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/jniHandles.inline.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 #include "runtime/safepointVerifiers.hpp"
 #include "runtime/sharedRuntime.hpp"
@@ -946,21 +946,8 @@
 void nmethod::verify_clean_inline_caches() {
   assert_locked_or_safepoint(CompiledIC_lock);
 
-  // If the method is not entrant or zombie then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (!is_in_use()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // This means that the low_boundary is going to be a little too high.
-    // This shouldn't matter, since oops of non-entrant methods are never used.
-    // In fact, why are we bothering to look at oops in a non-entrant method??
-  }
-
   ResourceMark rm;
-  RelocIterator iter(this, low_boundary);
+  RelocIterator iter(this, oops_reloc_begin());
   while(iter.next()) {
     switch(iter.type()) {
       case relocInfo::virtual_call_type:
@@ -1041,13 +1028,17 @@
   flush_dependencies(/*delete_immediately*/false);
 
   // Break cycle between nmethod & method
-  LogTarget(Trace, class, unload) lt;
+  LogTarget(Trace, class, unload, nmethod) lt;
   if (lt.is_enabled()) {
     LogStream ls(lt);
-    ls.print_cr("making nmethod " INTPTR_FORMAT
-                  " unloadable, Method*(" INTPTR_FORMAT
-                  "), cause(" INTPTR_FORMAT ")",
-                  p2i(this), p2i(_method), p2i(cause));
+    ls.print("making nmethod " INTPTR_FORMAT
+             " unloadable, Method*(" INTPTR_FORMAT
+             "), cause(" INTPTR_FORMAT ") ",
+             p2i(this), p2i(_method), p2i(cause));
+     if (cause != NULL) {
+       cause->print_value_on(&ls);
+     }
+     ls.cr();
   }
   // Unlink the osr method, so we do not look this up again
   if (is_osr_method()) {
@@ -1378,17 +1369,15 @@
 
 
 // If this oop is not live, the nmethod can be unloaded.
-bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred) {
+bool nmethod::can_unload(BoolObjectClosure* is_alive, oop* root) {
   assert(root != NULL, "just checking");
   oop obj = *root;
   if (obj == NULL || is_alive->do_object_b(obj)) {
       return false;
   }
 
-  // If ScavengeRootsInCode is true, an nmethod might be unloaded
-  // simply because one of its constant oops has gone dead.
+  // An nmethod might be unloaded simply because one of its constant oops has gone dead.
   // No actual classes need to be unloaded in order for this to occur.
-  assert(unloading_occurred || ScavengeRootsInCode, "Inconsistency in unloading");
   make_unloaded(obj);
   return true;
 }
@@ -1466,7 +1455,7 @@
   set_unload_reported();
 }
 
-bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive, bool unloading_occurred) {
+bool nmethod::unload_if_dead_at(RelocIterator* iter_at_oop, BoolObjectClosure *is_alive) {
   assert(iter_at_oop->type() == relocInfo::oop_type, "Wrong relocation type");
 
   oop_Relocation* r = iter_at_oop->oop_reloc();
@@ -1477,7 +1466,7 @@
          "oop must be found in exactly one place");
   if (r->oop_is_immediate() && r->oop_value() != NULL) {
     // Unload this nmethod if the oop is dead.
-    if (can_unload(is_alive, r->oop_addr(), unloading_occurred)) {
+    if (can_unload(is_alive, r->oop_addr())) {
       return true;;
     }
   }
@@ -1485,18 +1474,18 @@
   return false;
 }
 
-bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred) {
+bool nmethod::do_unloading_scopes(BoolObjectClosure* is_alive) {
   // Scopes
   for (oop* p = oops_begin(); p < oops_end(); p++) {
     if (*p == Universe::non_oop_word())  continue;  // skip non-oops
-    if (can_unload(is_alive, p, unloading_occurred)) {
+    if (can_unload(is_alive, p)) {
       return true;
     }
   }
   return false;
 }
 
-bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) {
+bool nmethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive) {
   // Compiled code
 
   // Prevent extra code cache walk for platforms that don't have immediate oops.
@@ -1504,18 +1493,18 @@
     RelocIterator iter(this, low_boundary);
     while (iter.next()) {
       if (iter.type() == relocInfo::oop_type) {
-        if (unload_if_dead_at(&iter, is_alive, unloading_occurred)) {
+        if (unload_if_dead_at(&iter, is_alive)) {
           return true;
         }
       }
     }
   }
 
-  return do_unloading_scopes(is_alive, unloading_occurred);
+  return do_unloading_scopes(is_alive);
 }
 
 #if INCLUDE_JVMCI
-bool nmethod::do_unloading_jvmci(bool unloading_occurred) {
+bool nmethod::do_unloading_jvmci() {
   if (_jvmci_installed_code != NULL) {
     if (JNIHandles::is_global_weak_cleared(_jvmci_installed_code)) {
       if (_jvmci_installed_code_triggers_unloading) {
@@ -1533,15 +1522,9 @@
 
 // Iterate over metadata calling this function.   Used by RedefineClasses
 void nmethod::metadata_do(void f(Metadata*)) {
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
-  }
   {
     // Visit all immediate references that are embedded in the instruction stream.
-    RelocIterator iter(this, low_boundary);
+    RelocIterator iter(this, oops_reloc_begin());
     while (iter.next()) {
       if (iter.type() == relocInfo::metadata_type ) {
         metadata_Relocation* r = iter.metadata_reloc();
@@ -1588,20 +1571,9 @@
   assert(allow_zombie || !is_zombie(), "should not call follow on zombie nmethod");
   assert(!is_unloaded(), "should not call follow on unloaded nmethod");
 
-  // If the method is not entrant or zombie then a JMP is plastered over the
-  // first few bytes.  If an oop in the old code was there, that oop
-  // should not get GC'd.  Skip the first few bytes of oops on
-  // not-entrant methods.
-  address low_boundary = verified_entry_point();
-  if (is_not_entrant()) {
-    low_boundary += NativeJump::instruction_size;
-    // %%% Note:  On SPARC we patch only a 4-byte trap, not a full NativeJump.
-    // (See comment above.)
-  }
-
   // Prevent extra code cache walk for platforms that don't have immediate oops.
   if (relocInfo::mustIterateImmediateOopsInCode()) {
-    RelocIterator iter(this, low_boundary);
+    RelocIterator iter(this, oops_reloc_begin());
 
     while (iter.next()) {
       if (iter.type() == relocInfo::oop_type ) {
@@ -1650,7 +1622,11 @@
           break;
       }
       // Mark was clear when we first saw this guy.
-      if (TraceScavenge) { print_on(tty, "oops_do, mark"); }
+      LogTarget(Trace, gc, nmethod) lt;
+      if (lt.is_enabled()) {
+        LogStream ls(lt);
+        CompileTask::print(&ls, this, "oops_do, mark", /*short_form:*/ true);
+      }
       return false;
     }
   }
@@ -1659,7 +1635,7 @@
 }
 
 void nmethod::oops_do_marking_prologue() {
-  if (TraceScavenge) { tty->print_cr("[oops_do_marking_prologue"); }
+  log_trace(gc, nmethod)("oops_do_marking_prologue");
   assert(_oops_do_mark_nmethods == NULL, "must not call oops_do_marking_prologue twice in a row");
   // We use cmpxchg instead of regular assignment here because the user
   // may fork a bunch of threads, and we need them all to see the same state.
@@ -1675,20 +1651,26 @@
     nmethod* next = cur->_oops_do_mark_link;
     cur->_oops_do_mark_link = NULL;
     DEBUG_ONLY(cur->verify_oop_relocations());
-    NOT_PRODUCT(if (TraceScavenge)  cur->print_on(tty, "oops_do, unmark"));
+
+    LogTarget(Trace, gc, nmethod) lt;
+    if (lt.is_enabled()) {
+      LogStream ls(lt);
+      CompileTask::print(&ls, cur, "oops_do, unmark", /*short_form:*/ true);
+    }
     cur = next;
   }
   nmethod* required = _oops_do_mark_nmethods;
   nmethod* observed = Atomic::cmpxchg((nmethod*)NULL, &_oops_do_mark_nmethods, required);
   guarantee(observed == required, "no races in this sequential code");
-  if (TraceScavenge) { tty->print_cr("oops_do_marking_epilogue]"); }
+  log_trace(gc, nmethod)("oops_do_marking_epilogue");
 }
 
 class DetectScavengeRoot: public OopClosure {
   bool     _detected_scavenge_root;
+  nmethod* _print_nm;
 public:
-  DetectScavengeRoot() : _detected_scavenge_root(false)
-  { NOT_PRODUCT(_print_nm = NULL); }
+  DetectScavengeRoot(nmethod* nm) : _detected_scavenge_root(false), _print_nm(nm) {}
+
   bool detected_scavenge_root() { return _detected_scavenge_root; }
   virtual void do_oop(oop* p) {
     if ((*p) != NULL && Universe::heap()->is_scavengable(*p)) {
@@ -1699,21 +1681,25 @@
   virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
 
 #ifndef PRODUCT
-  nmethod* _print_nm;
   void maybe_print(oop* p) {
-    if (_print_nm == NULL)  return;
-    if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
-    tty->print_cr("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ")",
-                  p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
-                  p2i(*p), p2i(p));
-    (*p)->print();
+    LogTarget(Trace, gc, nmethod) lt;
+    if (lt.is_enabled()) {
+      LogStream ls(lt);
+      if (!_detected_scavenge_root) {
+        CompileTask::print(&ls, _print_nm, "new scavenge root", /*short_form:*/ true);
+      }
+      ls.print("" PTR_FORMAT "[offset=%d] detected scavengable oop " PTR_FORMAT " (found at " PTR_FORMAT ") ",
+               p2i(_print_nm), (int)((intptr_t)p - (intptr_t)_print_nm),
+               p2i(*p), p2i(p));
+      (*p)->print_value_on(&ls);
+      ls.cr();
+    }
   }
 #endif //PRODUCT
 };
 
 bool nmethod::detect_scavenge_root_oops() {
-  DetectScavengeRoot detect_scavenge_root;
-  NOT_PRODUCT(if (TraceScavenge)  detect_scavenge_root._print_nm = this);
+  DetectScavengeRoot detect_scavenge_root(this);
   oops_do(&detect_scavenge_root);
   return detect_scavenge_root.detected_scavenge_root();
 }
--- a/src/hotspot/share/code/nmethod.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/code/nmethod.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -484,18 +484,18 @@
 #endif
 
  protected:
-  virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred);
+  virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive);
 #if INCLUDE_JVMCI
   // See comment for _jvmci_installed_code_triggers_unloading field.
   // Returns whether this nmethod was unloaded.
-  virtual bool do_unloading_jvmci(bool unloading_occurred);
+  virtual bool do_unloading_jvmci();
 #endif
 
  private:
-  bool do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred);
+  bool do_unloading_scopes(BoolObjectClosure* is_alive);
   //  Unload a nmethod if the *root object is dead.
-  bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
-  bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
+  bool can_unload(BoolObjectClosure* is_alive, oop* root);
+  bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive);
 
  public:
   void oops_do(OopClosure* f) { oops_do(f, false); }
--- a/src/hotspot/share/gc/cms/adaptiveFreeList.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/cms/adaptiveFreeList.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2012, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
 #include "memory/freeList.inline.hpp"
 #include "runtime/globals.hpp"
 #include "runtime/mutex.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/vmThread.hpp"
 
 template <>
--- a/src/hotspot/share/gc/cms/cmsCardTable.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/cms/cmsCardTable.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -34,7 +34,7 @@
 #include "oops/oop.inline.hpp"
 #include "runtime/java.hpp"
 #include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/vmThread.hpp"
 
 CMSCardTable::CMSCardTable(MemRegion whole_heap) :
--- a/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/cms/compactibleFreeListSpace.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -45,7 +45,7 @@
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
 #include "runtime/java.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/align.hpp"
 #include "utilities/copy.hpp"
--- a/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/cms/concurrentMarkSweepGeneration.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -74,7 +74,7 @@
 #include "runtime/globals_extension.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/java.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/timer.hpp"
 #include "runtime/vmThread.hpp"
 #include "services/memoryService.hpp"
@@ -5142,7 +5142,7 @@
   rp->setup_policy(false);
   verify_work_stacks_empty();
 
-  ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues());
+  ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
   {
     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer_cm);
 
--- a/src/hotspot/share/gc/cms/parNewGeneration.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/cms/parNewGeneration.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -958,7 +958,7 @@
   // Can  the mt_degree be set later (at run_task() time would be best)?
   rp->set_active_mt_degree(active_workers);
   ReferenceProcessorStats stats;
-  ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues());
+  ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
   if (rp->processing_is_mt()) {
     ParNewRefProcTaskExecutor task_executor(*this, *_old_gen, thread_state_set);
     stats = rp->process_discovered_references(&is_alive, &keep_alive,
--- a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -25,6 +25,7 @@
 #include "precompiled.hpp"
 #include "gc/g1/c2/g1BarrierSetC2.hpp"
 #include "gc/g1/g1BarrierSet.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
 #include "gc/g1/g1CardTable.hpp"
 #include "gc/g1/g1ThreadLocalData.hpp"
 #include "gc/g1/heapRegion.hpp"
@@ -33,10 +34,9 @@
 #include "opto/idealKit.hpp"
 #include "opto/macro.hpp"
 #include "opto/type.hpp"
-#include "runtime/sharedRuntime.hpp"
 #include "utilities/macros.hpp"
 
-const TypeFunc *G1BarrierSetC2::g1_wb_pre_Type() {
+const TypeFunc *G1BarrierSetC2::write_ref_field_pre_entry_Type() {
   const Type **fields = TypeTuple::fields(2);
   fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // original field value
   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL; // thread
@@ -49,7 +49,7 @@
   return TypeFunc::make(domain, range);
 }
 
-const TypeFunc *G1BarrierSetC2::g1_wb_post_Type() {
+const TypeFunc *G1BarrierSetC2::write_ref_field_post_entry_Type() {
   const Type **fields = TypeTuple::fields(2);
   fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL;  // Card addr
   fields[TypeFunc::Parms+1] = TypeRawPtr::NOTNULL;  // thread
@@ -264,8 +264,8 @@
       } __ else_(); {
 
         // logging buffer is full, call the runtime
-        const TypeFunc *tf = g1_wb_pre_Type();
-        __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), "g1_wb_pre", pre_val, tls);
+        const TypeFunc *tf = write_ref_field_pre_entry_Type();
+        __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_pre_entry), "write_ref_field_pre_entry", pre_val, tls);
       } __ end_if();  // (!index)
     } __ end_if();  // (pre_val != NULL)
   } __ end_if();  // (!marking)
@@ -364,7 +364,7 @@
     __ store(__ ctrl(), index_adr, next_index, TypeX_X->basic_type(), Compile::AliasIdxRaw, MemNode::unordered);
 
   } __ else_(); {
-    __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), "g1_wb_post", card_adr, __ thread());
+    __ make_leaf_call(tf, CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry), "write_ref_field_post_entry", card_adr, __ thread());
   } __ end_if();
 
 }
@@ -419,7 +419,7 @@
   Node* dirty_card = __ ConI((jint)G1CardTable::dirty_card_val());
   Node* zeroX = __ ConX(0);
 
-  const TypeFunc *tf = g1_wb_post_Type();
+  const TypeFunc *tf = write_ref_field_post_entry_Type();
 
   // Offsets into the thread
   const int index_offset  = in_bytes(G1ThreadLocalData::dirty_card_queue_index_offset());
@@ -652,7 +652,7 @@
     return false;
   }
 
-  return strcmp(call->_name, "g1_wb_pre") == 0 || strcmp(call->_name, "g1_wb_post") == 0;
+  return strcmp(call->_name, "write_ref_field_pre_entry") == 0 || strcmp(call->_name, "write_ref_field_post_entry") == 0;
 }
 
 void G1BarrierSetC2::eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const {
@@ -747,7 +747,7 @@
           if (r->in(j) != NULL && r->in(j)->is_Proj() &&
               r->in(j)->in(0) != NULL &&
               r->in(j)->in(0)->Opcode() == Op_CallLeaf &&
-              r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post)) {
+              r->in(j)->in(0)->as_Call()->entry_point() == CAST_FROM_FN_PTR(address, G1BarrierSetRuntime::write_ref_field_post_entry)) {
             Node* call = r->in(j)->in(0);
             c = c->in(i == 1 ? 2 : 1);
             if (c != NULL) {
--- a/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/c2/g1BarrierSetC2.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -79,8 +79,8 @@
   // Unsafe.getObject should be recorded in an SATB log buffer.
   void insert_pre_barrier(GraphKit* kit, Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar) const;
 
-  static const TypeFunc* g1_wb_pre_Type();
-  static const TypeFunc* g1_wb_post_Type();
+  static const TypeFunc* write_ref_field_pre_entry_Type();
+  static const TypeFunc* write_ref_field_post_entry_Type();
 
   virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
 
--- a/src/hotspot/share/gc/g1/g1AllocRegion.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1AllocRegion.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,7 +29,7 @@
 #include "logging/log.hpp"
 #include "logging/logStream.hpp"
 #include "memory/resourceArea.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/align.hpp"
 
 G1CollectedHeap* G1AllocRegion::_g1h = NULL;
--- a/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -72,21 +72,6 @@
   }
 }
 
-void G1BarrierSet::write_ref_array_pre_oop_entry(oop* dst, size_t length) {
-  G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
-  bs->write_ref_array_pre(dst, length, false);
-}
-
-void G1BarrierSet::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) {
-  G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
-  bs->write_ref_array_pre(dst, length, false);
-}
-
-void G1BarrierSet::write_ref_array_post_entry(HeapWord* dst, size_t length) {
-  G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
-  bs->G1BarrierSet::write_ref_array(dst, length);
-}
-
 template <class T> void
 G1BarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
   if (!_satb_mark_queue_set.is_active()) return;
--- a/src/hotspot/share/gc/g1/g1BarrierSet.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1BarrierSet.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -56,10 +56,6 @@
   virtual void write_ref_array_pre(oop* dst, size_t count, bool dest_uninitialized);
   virtual void write_ref_array_pre(narrowOop* dst, size_t count, bool dest_uninitialized);
 
-  static void write_ref_array_pre_oop_entry(oop* dst, size_t length);
-  static void write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length);
-  static void write_ref_array_post_entry(HeapWord* dst, size_t length);
-
   template <DecoratorSet decorators, typename T>
   void write_ref_field_pre(T* field);
 
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1BarrierSetRuntime.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "gc/g1/g1BarrierSet.inline.hpp"
+#include "gc/g1/g1BarrierSetRuntime.hpp"
+#include "gc/g1/g1ThreadLocalData.hpp"
+#include "runtime/interfaceSupport.inline.hpp"
+#include "utilities/macros.hpp"
+
+void G1BarrierSetRuntime::write_ref_array_pre_oop_entry(oop* dst, size_t length) {
+  G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+  bs->write_ref_array_pre(dst, length, false);
+}
+
+void G1BarrierSetRuntime::write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length) {
+  G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+  bs->write_ref_array_pre(dst, length, false);
+}
+
+void G1BarrierSetRuntime::write_ref_array_post_entry(HeapWord* dst, size_t length) {
+  G1BarrierSet *bs = barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
+  bs->G1BarrierSet::write_ref_array(dst, length);
+}
+
+// G1 pre write barrier slowpath
+JRT_LEAF(void, G1BarrierSetRuntime::write_ref_field_pre_entry(oopDesc* orig, JavaThread *thread))
+  if (orig == NULL) {
+    assert(false, "should be optimized out");
+    return;
+  }
+  assert(oopDesc::is_oop(orig, true /* ignore mark word */), "Error");
+  // store the original value that was in the field reference
+  G1ThreadLocalData::satb_mark_queue(thread).enqueue(orig);
+JRT_END
+
+// G1 post write barrier slowpath
+JRT_LEAF(void, G1BarrierSetRuntime::write_ref_field_post_entry(void* card_addr, JavaThread* thread))
+  G1ThreadLocalData::dirty_card_queue(thread).enqueue(card_addr);
+JRT_END
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/hotspot/share/gc/g1/g1BarrierSetRuntime.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -0,0 +1,48 @@
+/*
+ * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_GC_G1_G1BARRIERSETRUNTIME_HPP
+#define SHARE_GC_G1_G1BARRIERSETRUNTIME_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oopsHierarchy.hpp"
+#include "utilities/globalDefinitions.hpp"
+#include "utilities/macros.hpp"
+
+class oopDesc;
+class JavaThread;
+
+class G1BarrierSetRuntime: public AllStatic {
+public:
+  // Arraycopy stub generator
+  static void write_ref_array_pre_oop_entry(oop* dst, size_t length);
+  static void write_ref_array_pre_narrow_oop_entry(narrowOop* dst, size_t length);
+  static void write_ref_array_post_entry(HeapWord* dst, size_t length);
+
+  // C2 slow-path runtime calls.
+  static void write_ref_field_pre_entry(oopDesc* orig, JavaThread *thread);
+  static void write_ref_field_post_entry(void* card_addr, JavaThread* thread);
+};
+
+#endif // SHARE_GC_G1_G1BARRIERSETRUNTIME_HPP
--- a/src/hotspot/share/gc/g1/g1CardTable.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CardTable.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -28,7 +28,7 @@
 #include "gc/shared/memset_with_concurrent_readers.hpp"
 #include "logging/log.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 
 bool G1CardTable::mark_card_deferred(size_t card_index) {
   jbyte val = _byte_map[card_index];
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -86,7 +86,7 @@
 #include "runtime/flags/flagSetting.hpp"
 #include "runtime/handles.inline.hpp"
 #include "runtime/init.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/threadSMR.hpp"
 #include "runtime/vmThread.hpp"
 #include "utilities/align.hpp"
@@ -3355,7 +3355,7 @@
       add_to_postponed_list(nm);
     }
 
-    // Mark that this thread has been cleaned/unloaded.
+    // Mark that this nmethod has been cleaned/unloaded.
     // After this call, it will be safe to ask if this nmethod was unloaded or not.
     nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
   }
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -32,7 +32,7 @@
 #include "gc/g1/heapRegionManager.inline.hpp"
 #include "gc/g1/heapRegionSet.inline.hpp"
 #include "gc/shared/taskqueue.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 
 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
   switch (dest.value()) {
--- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1625,7 +1625,7 @@
     // Reference lists are balanced (see balance_all_queues() and balance_queues()).
     rp->set_active_mt_degree(active_workers);
 
-    ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->num_queues());
+    ReferenceProcessorPhaseTimes pt(_gc_timer_cm, rp->max_num_queues());
 
     // Process the weak references.
     const ReferenceProcessorStats& stats =
--- a/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/g1FullGCReferenceProcessorExecutor.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -78,7 +78,7 @@
   G1FullGCMarker* marker = _collector->marker(0);
   G1IsAliveClosure is_alive(_collector->mark_bitmap());
   G1FullKeepAliveClosure keep_alive(marker);
-  ReferenceProcessorPhaseTimes pt(timer, _reference_processor->num_queues());
+  ReferenceProcessorPhaseTimes pt(timer, _reference_processor->max_num_queues());
   AbstractRefProcTaskExecutor* executor = _reference_processor->processing_is_mt() ? this : NULL;
 
   // Process discovered references, use this executor if multi-threaded
--- a/src/hotspot/share/gc/g1/heapRegion.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/g1/heapRegion.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -43,7 +43,7 @@
 #include "oops/compressedOops.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/atomic.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "utilities/growableArray.hpp"
 
 int    HeapRegion::LogOfHRGrainBytes = 0;
--- a/src/hotspot/share/gc/parallel/gcTaskManager.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/parallel/gcTaskManager.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2002, 2016, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2002, 2018, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -34,7 +34,7 @@
 #include "memory/resourceArea.hpp"
 #include "runtime/mutex.hpp"
 #include "runtime/mutexLocker.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 #include "runtime/os.hpp"
 
 //
--- a/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psMarkSweep.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -536,7 +536,7 @@
     GCTraceTime(Debug, gc, phases) t("Reference Processing", _gc_timer);
 
     ref_processor()->setup_policy(clear_all_softrefs);
-    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
+    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
     const ReferenceProcessorStats& stats =
       ref_processor()->process_discovered_references(
         is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, &pt);
--- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -2111,7 +2111,7 @@
     GCTraceTime(Debug, gc, phases) tm("Reference Processing", &_gc_timer);
 
     ReferenceProcessorStats stats;
-    ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->num_queues());
+    ReferenceProcessorPhaseTimes pt(&_gc_timer, ref_processor()->max_num_queues());
     if (ref_processor()->processing_is_mt()) {
       RefProcTaskExecutor task_executor;
       stats = ref_processor()->process_discovered_references(
--- a/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psPromotionManager.inline.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -213,7 +213,8 @@
     Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size);
 
     // Now we have to CAS in the header.
-    if (o->cas_forward_to(new_obj, test_mark)) {
+    // Make copy visible to threads reading the forwardee.
+    if (o->cas_forward_to(new_obj, test_mark, memory_order_release)) {
       // We won any races, we "own" this object.
       assert(new_obj == o->forwardee(), "Sanity");
 
@@ -256,11 +257,12 @@
       }
 
       // don't update this before the unallocation!
-      new_obj = o->forwardee();
+      // Using acquire though consume would be accurate for accessing new_obj.
+      new_obj = o->forwardee_acquire();
     }
   } else {
     assert(o->is_forwarded(), "Sanity");
-    new_obj = o->forwardee();
+    new_obj = o->forwardee_acquire();
   }
 
   // This code must come after the CAS test, or it will print incorrect
--- a/src/hotspot/share/gc/parallel/psScavenge.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/parallel/psScavenge.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -399,7 +399,7 @@
       PSKeepAliveClosure keep_alive(promotion_manager);
       PSEvacuateFollowersClosure evac_followers(promotion_manager);
       ReferenceProcessorStats stats;
-      ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->num_queues());
+      ReferenceProcessorPhaseTimes pt(&_gc_timer, reference_processor()->max_num_queues());
       if (reference_processor()->processing_is_mt()) {
         PSRefProcTaskExecutor task_executor;
         stats = reference_processor()->process_discovered_references(
--- a/src/hotspot/share/gc/serial/defNewGeneration.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/serial/defNewGeneration.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -629,7 +629,7 @@
   FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
   ReferenceProcessor* rp = ref_processor();
   rp->setup_policy(clear_all_soft_refs);
-  ReferenceProcessorPhaseTimes pt(_gc_timer, rp->num_queues());
+  ReferenceProcessorPhaseTimes pt(_gc_timer, rp->max_num_queues());
   const ReferenceProcessorStats& stats =
   rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
                                     NULL, &pt);
--- a/src/hotspot/share/gc/serial/genMarkSweep.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/serial/genMarkSweep.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -208,7 +208,7 @@
     GCTraceTime(Debug, gc, phases) tm_m("Reference Processing", gc_timer());
 
     ref_processor()->setup_policy(clear_all_softrefs);
-    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->num_queues());
+    ReferenceProcessorPhaseTimes pt(_gc_timer, ref_processor()->max_num_queues());
     const ReferenceProcessorStats& stats =
       ref_processor()->process_discovered_references(
         &is_alive, &keep_alive, &follow_stack_closure, NULL, &pt);
--- a/src/hotspot/share/gc/shared/barrierSet.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/shared/barrierSet.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -213,8 +213,12 @@
     }
 
     template <typename T>
-    static void arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      Raw::arraycopy(src_obj, dst_obj, src, dst, length);
+    static void arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                  arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                  size_t length) {
+      Raw::arraycopy(src_obj, src_offset_in_bytes, src_raw,
+                     dst_obj, dst_offset_in_bytes, dst_raw,
+                     length);
     }
 
     // Heap oop accesses. These accessors get resolved when
@@ -257,8 +261,12 @@
     }
 
     template <typename T>
-    static bool oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
-      return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
+    static bool oop_arraycopy_in_heap(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
+                                      arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
+                                      size_t length) {
+      return Raw::oop_arraycopy(src_obj, src_offset_in_bytes, src_raw,
+                                dst_obj, dst_offset_in_bytes, dst_raw,
+                                length);
     }
 
     // Off-heap oop accesses. These accessors get resolved when
--- a/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/shared/cardTableBarrierSet.inline.hpp	Wed Jun 06 09:41:16 2018 -0700
@@ -27,7 +27,7 @@
 
 #include "gc/shared/cardTableBarrierSet.hpp"
 #include "gc/shared/cardTable.hpp"
-#include "runtime/orderAccess.inline.hpp"
+#include "runtime/orderAccess.hpp"
 
 template <DecoratorSet decorators, typename T>
 inline void CardTableBarrierSet::write_ref_field_post(T* field, oop newVal) {
--- a/src/hotspot/share/gc/shared/collectedHeap.cpp	Mon Jun 04 16:11:21 2018 +0200
+++ b/src/hotspot/share/gc/shared/collectedHeap.cpp	Wed Jun 06 09:41:16 2018 -0700
@@ -365,20 +365,32 @@
 }
 #endif
 
-HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, Thread* thread, size_t size) {
+HeapWord* CollectedHeap::obj_allocate_raw(Klass* klass, size_t size,
+                                          bool* gc_overhead_limit_was_exceeded, TRAPS) {
+  if (UseTLAB) {
+    HeapWord* result = allocate_from_tlab(klass, size, THREAD);
+    if (result != NULL) {
+      return result;
+    }
+  }
+  return Universe::heap()->mem_allocate(size, gc_overhead_limit_was_exceeded);
+}
+
+HeapWord* CollectedHeap::allocate_from_tlab_slow(Klass* klass, size_t size, TRAPS) {
+  ThreadLocalAllocBuffer& tlab = THREAD->tlab();
 
   // Retain tlab and allocate object in shared space if
   // the amount free in the tlab is too large to discard.
-  if (thread->tlab().free() > thread->tlab().refill_waste_limit()) {
-    thread->tlab().record_slow_allocation(size);
+  if (tlab.free() > tlab.refill_waste_limit()) {