changeset 60243:d352e5db468c

8248414: AArch64: Remove uses of long and unsigned long ints Reviewed-by: adinn, dholmes
author aph
date Thu, 09 Jul 2020 11:01:29 -0400
parents 819bfe8b8676
children aeeb545bbcdc
files src/hotspot/cpu/aarch64/aarch64-asmtest.py src/hotspot/cpu/aarch64/aarch64.ad src/hotspot/cpu/aarch64/aarch64_ad.m4 src/hotspot/cpu/aarch64/assembler_aarch64.cpp src/hotspot/cpu/aarch64/assembler_aarch64.hpp src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp src/hotspot/cpu/aarch64/frame_aarch64.cpp src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp src/hotspot/cpu/aarch64/immediate_aarch64.cpp src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp src/hotspot/cpu/aarch64/macroAssembler_aarch64_trig.cpp src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp src/hotspot/cpu/aarch64/register_aarch64.hpp src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp src/hotspot/cpu/aarch64/vm_version_aarch64.cpp
diffstat 23 files changed, 552 insertions(+), 244 deletions(-) [+]
line wrap: on
line diff
--- a/src/hotspot/cpu/aarch64/aarch64-asmtest.py	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/aarch64-asmtest.py	Thu Jul 09 11:01:29 2020 -0400
@@ -1,8 +1,8 @@
 import random
 
-AARCH64_AS = "<PATH-TO-AS>"
-AARCH64_OBJDUMP = "<PATH-TO-OBJDUMP>"
-AARCH64_OBJCOPY = "<PATH-TO-OBJCOPY>"
+AARCH64_AS = "as"
+AARCH64_OBJDUMP = "objdump"
+AARCH64_OBJCOPY = "objcopy"
 
 class Operand(object):
 
@@ -348,7 +348,7 @@
                   + ', #0x%x' % self.immed)
 
      def cstr(self):
-          return super(AddSubImmOp, self).cstr() + "l);"
+          return super(AddSubImmOp, self).cstr() + "ll);"
     
 class MultiOp():
 
--- a/src/hotspot/cpu/aarch64/aarch64.ad	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/aarch64.ad	Thu Jul 09 11:01:29 2020 -0400
@@ -1626,7 +1626,7 @@
   C2_MacroAssembler _masm(&cbuf);
 
   // n.b. frame size includes space for return pc and rfp
-  const long framesize = C->output()->frame_size_in_bytes();
+  const int framesize = C->output()->frame_size_in_bytes();
   assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment");
 
   // insert a nop at the start of the prolog so we can patch in a
@@ -3159,7 +3159,7 @@
         if (con < (address)(uintptr_t)os::vm_page_size()) {
           __ mov(dst_reg, con);
         } else {
-          unsigned long offset;
+          uintptr_t offset;
           __ adrp(dst_reg, con, offset);
           __ add(dst_reg, dst_reg, offset);
         }
@@ -4231,7 +4231,7 @@
 // 32 bit integer valid for add sub immediate
 operand immIAddSub()
 %{
-  predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int()));
+  predicate(Assembler::operand_valid_for_add_sub_immediate((int64_t)n->get_int()));
   match(ConI);
   op_cost(0);
   format %{ %}
@@ -4242,7 +4242,7 @@
 // TODO -- check this is right when e.g the mask is 0x80000000
 operand immILog()
 %{
-  predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int()));
+  predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (uint64_t)n->get_int()));
   match(ConI);
 
   op_cost(0);
@@ -4320,7 +4320,7 @@
 // 64 bit integer valid for logical immediate
 operand immLLog()
 %{
-  predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long()));
+  predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (uint64_t)n->get_long()));
   match(ConL);
   op_cost(0);
   format %{ %}
@@ -10802,6 +10802,9 @@
 
 // BEGIN This section of the file is automatically generated. Do not edit --------------
 
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct regL_not_reg(iRegLNoSp dst,
                          iRegL src1, immL_M1 m1,
                          rFlagsReg cr) %{
@@ -10818,6 +10821,9 @@
 
   ins_pipe(ialu_reg);
 %}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct regI_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, immI_M1 m1,
                          rFlagsReg cr) %{
@@ -10835,6 +10841,8 @@
   ins_pipe(ialu_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndI_reg_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
                          rFlagsReg cr) %{
@@ -10852,6 +10860,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndL_reg_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2, immL_M1 m1,
                          rFlagsReg cr) %{
@@ -10869,6 +10879,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrI_reg_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
                          rFlagsReg cr) %{
@@ -10886,6 +10898,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrL_reg_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2, immL_M1 m1,
                          rFlagsReg cr) %{
@@ -10903,6 +10917,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorI_reg_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1,
                          rFlagsReg cr) %{
@@ -10920,6 +10936,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorL_reg_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2, immL_M1 m1,
                          rFlagsReg cr) %{
@@ -10937,6 +10955,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndI_reg_URShift_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, immI_M1 src4, rFlagsReg cr) %{
@@ -10955,6 +10975,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndL_reg_URShift_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, immL_M1 src4, rFlagsReg cr) %{
@@ -10973,6 +10995,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndI_reg_RShift_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, immI_M1 src4, rFlagsReg cr) %{
@@ -10991,6 +11015,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndL_reg_RShift_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, immL_M1 src4, rFlagsReg cr) %{
@@ -11009,6 +11035,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndI_reg_LShift_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, immI_M1 src4, rFlagsReg cr) %{
@@ -11027,6 +11055,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndL_reg_LShift_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, immL_M1 src4, rFlagsReg cr) %{
@@ -11045,6 +11075,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorI_reg_URShift_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, immI_M1 src4, rFlagsReg cr) %{
@@ -11063,6 +11095,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorL_reg_URShift_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, immL_M1 src4, rFlagsReg cr) %{
@@ -11081,6 +11115,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorI_reg_RShift_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, immI_M1 src4, rFlagsReg cr) %{
@@ -11099,6 +11135,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorL_reg_RShift_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, immL_M1 src4, rFlagsReg cr) %{
@@ -11117,6 +11155,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorI_reg_LShift_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, immI_M1 src4, rFlagsReg cr) %{
@@ -11135,6 +11175,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorL_reg_LShift_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, immL_M1 src4, rFlagsReg cr) %{
@@ -11153,6 +11195,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrI_reg_URShift_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, immI_M1 src4, rFlagsReg cr) %{
@@ -11171,6 +11215,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrL_reg_URShift_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, immL_M1 src4, rFlagsReg cr) %{
@@ -11189,6 +11235,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrI_reg_RShift_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, immI_M1 src4, rFlagsReg cr) %{
@@ -11207,6 +11255,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrL_reg_RShift_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, immL_M1 src4, rFlagsReg cr) %{
@@ -11225,6 +11275,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrI_reg_LShift_not_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, immI_M1 src4, rFlagsReg cr) %{
@@ -11243,6 +11295,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrL_reg_LShift_not_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, immL_M1 src4, rFlagsReg cr) %{
@@ -11261,6 +11315,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndI_reg_URShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11280,6 +11336,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndL_reg_URShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11299,6 +11357,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndI_reg_RShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11318,6 +11378,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndL_reg_RShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11337,6 +11399,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndI_reg_LShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11356,6 +11420,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AndL_reg_LShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11375,6 +11441,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorI_reg_URShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11394,6 +11462,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorL_reg_URShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11413,6 +11483,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorI_reg_RShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11432,6 +11504,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorL_reg_RShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11451,6 +11525,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorI_reg_LShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11470,6 +11546,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct XorL_reg_LShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11489,6 +11567,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrI_reg_URShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11508,6 +11588,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrL_reg_URShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11527,6 +11609,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrI_reg_RShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11546,6 +11630,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrL_reg_RShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11565,6 +11651,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrI_reg_LShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11584,6 +11672,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct OrL_reg_LShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11603,6 +11693,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddI_reg_URShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11622,6 +11714,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddL_reg_URShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11641,6 +11735,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddI_reg_RShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11660,6 +11756,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddL_reg_RShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11679,6 +11777,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddI_reg_LShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11698,6 +11798,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddL_reg_LShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11717,6 +11819,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubI_reg_URShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11736,6 +11840,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubL_reg_URShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11755,6 +11861,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubI_reg_RShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11774,6 +11882,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubL_reg_RShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11793,6 +11903,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubI_reg_LShift_reg(iRegINoSp dst,
                          iRegIorL2I src1, iRegIorL2I src2,
                          immI src3, rFlagsReg cr) %{
@@ -11812,6 +11924,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubL_reg_LShift_reg(iRegLNoSp dst,
                          iRegL src1, iRegL src2,
                          immI src3, rFlagsReg cr) %{
@@ -11831,7 +11945,9 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
-
+ 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 
 // Shift Left followed by Shift Right.
 // This idiom is used by the compiler for the i2b bytecode etc.
@@ -11853,6 +11969,9 @@
   ins_pipe(ialu_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // Shift Left followed by Shift Right.
 // This idiom is used by the compiler for the i2b bytecode etc.
 instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
@@ -11873,6 +11992,9 @@
   ins_pipe(ialu_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // Shift Left followed by Shift Right.
 // This idiom is used by the compiler for the i2b bytecode etc.
 instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count)
@@ -11893,6 +12015,9 @@
   ins_pipe(ialu_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // Shift Left followed by Shift Right.
 // This idiom is used by the compiler for the i2b bytecode etc.
 instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count)
@@ -11912,8 +12037,11 @@
 
   ins_pipe(ialu_reg_shift);
 %}
+
 // Bitfield extract with shift & mask
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
 %{
   match(Set dst (AndI (URShiftI src rshift) mask));
@@ -11924,13 +12052,16 @@
   format %{ "ubfxw $dst, $src, $rshift, $mask" %}
   ins_encode %{
     int rshift = $rshift$$constant & 31;
-    long mask = $mask$$constant;
+    intptr_t mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfxw(as_Register($dst$$reg),
             as_Register($src$$reg), rshift, width);
   %}
   ins_pipe(ialu_reg_shift);
 %}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask)
 %{
   match(Set dst (AndL (URShiftL src rshift) mask));
@@ -11941,7 +12072,7 @@
   format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
     int rshift = $rshift$$constant & 63;
-    long mask = $mask$$constant;
+    intptr_t mask = $mask$$constant;
     int width = exact_log2_long(mask+1);
     __ ubfx(as_Register($dst$$reg),
             as_Register($src$$reg), rshift, width);
@@ -11949,6 +12080,10 @@
   ins_pipe(ialu_reg_shift);
 %}
 
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // We can use ubfx when extending an And with a mask when we know mask
 // is positive.  We know that because immI_bitmask guarantees it.
 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
@@ -11961,7 +12096,7 @@
   format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
     int rshift = $rshift$$constant & 31;
-    long mask = $mask$$constant;
+    intptr_t mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfx(as_Register($dst$$reg),
             as_Register($src$$reg), rshift, width);
@@ -11969,6 +12104,10 @@
   ins_pipe(ialu_reg_shift);
 %}
 
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // We can use ubfiz when masking by a positive number and then left shifting the result.
 // We know that the mask is positive because immI_bitmask guarantees it.
 instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
@@ -11980,13 +12119,17 @@
   format %{ "ubfizw $dst, $src, $lshift, $mask" %}
   ins_encode %{
     int lshift = $lshift$$constant & 31;
-    long mask = $mask$$constant;
+    intptr_t mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfizw(as_Register($dst$$reg),
           as_Register($src$$reg), lshift, width);
   %}
   ins_pipe(ialu_reg_shift);
 %}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // We can use ubfiz when masking by a positive number and then left shifting the result.
 // We know that the mask is positive because immL_bitmask guarantees it.
 instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask)
@@ -11998,7 +12141,7 @@
   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
   ins_encode %{
     int lshift = $lshift$$constant & 63;
-    long mask = $mask$$constant;
+    intptr_t mask = $mask$$constant;
     int width = exact_log2_long(mask+1);
     __ ubfiz(as_Register($dst$$reg),
           as_Register($src$$reg), lshift, width);
@@ -12006,6 +12149,10 @@
   ins_pipe(ialu_reg_shift);
 %}
 
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
 %{
@@ -12016,7 +12163,7 @@
   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
   ins_encode %{
     int lshift = $lshift$$constant & 63;
-    long mask = $mask$$constant;
+    intptr_t mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfiz(as_Register($dst$$reg),
              as_Register($src$$reg), lshift, width);
@@ -12024,8 +12171,10 @@
   ins_pipe(ialu_reg_shift);
 %}
 
-// Rotations
-
+
+// Rotations 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
 %{
   match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift)));
@@ -12041,6 +12190,9 @@
   ins_pipe(ialu_reg_reg_extr);
 %}
 
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
 %{
   match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift)));
@@ -12056,6 +12208,9 @@
   ins_pipe(ialu_reg_reg_extr);
 %}
 
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr)
 %{
   match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift)));
@@ -12071,6 +12226,9 @@
   ins_pipe(ialu_reg_reg_extr);
 %}
 
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr)
 %{
   match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift)));
@@ -12087,8 +12245,10 @@
 %}
 
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // rol expander
-
 instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
 %{
   effect(DEF dst, USE src, USE shift);
@@ -12103,8 +12263,10 @@
   ins_pipe(ialu_reg_reg_vshift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // rol expander
-
 instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
 %{
   effect(DEF dst, USE src, USE shift);
@@ -12119,6 +12281,8 @@
   ins_pipe(ialu_reg_reg_vshift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
 %{
   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift))));
@@ -12128,6 +12292,8 @@
   %}
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
 %{
   match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift))));
@@ -12137,6 +12303,8 @@
   %}
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
 %{
   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift))));
@@ -12146,6 +12314,8 @@
   %}
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
 %{
   match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift))));
@@ -12155,8 +12325,10 @@
   %}
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // ror expander
-
 instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr)
 %{
   effect(DEF dst, USE src, USE shift);
@@ -12170,8 +12342,10 @@
   ins_pipe(ialu_reg_reg_vshift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // ror expander
-
 instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr)
 %{
   effect(DEF dst, USE src, USE shift);
@@ -12185,6 +12359,8 @@
   ins_pipe(ialu_reg_reg_vshift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr)
 %{
   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift))));
@@ -12194,6 +12370,8 @@
   %}
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr)
 %{
   match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift))));
@@ -12203,6 +12381,8 @@
   %}
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr)
 %{
   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift))));
@@ -12212,6 +12392,8 @@
   %}
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr)
 %{
   match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift))));
@@ -12221,8 +12403,11 @@
   %}
 %}
 
+
 // Add/subtract (extended)
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (ConvI2L src2)));
@@ -12234,8 +12419,10 @@
             as_Register($src2$$reg), ext::sxtw);
    %}
   ins_pipe(ialu_reg_reg);
-%};
-
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (ConvI2L src2)));
@@ -12247,9 +12434,10 @@
             as_Register($src2$$reg), ext::sxtw);
    %}
   ins_pipe(ialu_reg_reg);
-%};
-
-
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr)
 %{
   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
@@ -12263,6 +12451,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
 %{
   match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift)));
@@ -12276,6 +12466,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr)
 %{
   match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift)));
@@ -12289,6 +12481,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
@@ -12302,6 +12496,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
@@ -12315,6 +12511,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift)));
@@ -12328,6 +12526,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift)));
@@ -12341,7 +12541,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
-
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
 %{
   match(Set dst (AddI src1 (AndI src2 mask)));
@@ -12355,6 +12556,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
 %{
   match(Set dst (AddI src1 (AndI src2 mask)));
@@ -12368,6 +12571,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (AndL src2 mask)));
@@ -12381,6 +12586,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (AndL src2 mask)));
@@ -12394,6 +12601,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (AndL src2 mask)));
@@ -12407,6 +12616,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr)
 %{
   match(Set dst (SubI src1 (AndI src2 mask)));
@@ -12420,6 +12631,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr)
 %{
   match(Set dst (SubI src1 (AndI src2 mask)));
@@ -12433,6 +12646,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (AndL src2 mask)));
@@ -12446,6 +12661,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (AndL src2 mask)));
@@ -12459,6 +12676,8 @@
   ins_pipe(ialu_reg_reg);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (AndL src2 mask)));
@@ -12473,6 +12692,8 @@
 %}
 
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
@@ -12486,6 +12707,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
@@ -12499,6 +12722,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
@@ -12512,6 +12737,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
@@ -12525,6 +12752,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
@@ -12538,6 +12767,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2)));
@@ -12551,6 +12782,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
 %{
   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
@@ -12564,6 +12797,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
 %{
   match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
@@ -12577,6 +12812,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr)
 %{
   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
@@ -12590,6 +12827,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr)
 %{
   match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2)));
@@ -12603,7 +12842,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
-
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift)));
@@ -12615,8 +12855,10 @@
             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
    %}
   ins_pipe(ialu_reg_reg_shift);
-%};
-
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift)));
@@ -12628,9 +12870,10 @@
             as_Register($src2$$reg), ext::sxtw, ($lshift$$constant));
    %}
   ins_pipe(ialu_reg_reg_shift);
-%};
-
-
+%}
+
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
@@ -12644,6 +12887,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
@@ -12657,6 +12902,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift)));
@@ -12670,6 +12917,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
@@ -12683,6 +12932,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
@@ -12696,6 +12947,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift)));
@@ -12709,6 +12962,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
@@ -12722,6 +12977,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift)));
@@ -12735,6 +12992,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
@@ -12748,6 +13007,8 @@
   ins_pipe(ialu_reg_reg_shift);
 %}
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift)));
@@ -12760,8 +13021,12 @@
    %}
   ins_pipe(ialu_reg_reg_shift);
 %}
+
+
+
 // END This section of the file is automatically generated. Do not edit --------------
 
+
 // ============================================================================
 // Floating Point Arithmetic Instructions
 
@@ -13253,7 +13518,7 @@
   ins_encode %{
     __ andw(as_Register($dst$$reg),
             as_Register($src1$$reg),
-            (unsigned long)($src2$$constant));
+            (uint64_t)($src2$$constant));
   %}
 
   ins_pipe(ialu_reg_imm);
@@ -13285,7 +13550,7 @@
   ins_encode %{
     __ orrw(as_Register($dst$$reg),
             as_Register($src1$$reg),
-            (unsigned long)($src2$$constant));
+            (uint64_t)($src2$$constant));
   %}
 
   ins_pipe(ialu_reg_imm);
@@ -13317,7 +13582,7 @@
   ins_encode %{
     __ eorw(as_Register($dst$$reg),
             as_Register($src1$$reg),
-            (unsigned long)($src2$$constant));
+            (uint64_t)($src2$$constant));
   %}
 
   ins_pipe(ialu_reg_imm);
@@ -13350,7 +13615,7 @@
   ins_encode %{
     __ andr(as_Register($dst$$reg),
             as_Register($src1$$reg),
-            (unsigned long)($src2$$constant));
+            (uint64_t)($src2$$constant));
   %}
 
   ins_pipe(ialu_reg_imm);
@@ -13382,7 +13647,7 @@
   ins_encode %{
     __ orr(as_Register($dst$$reg),
            as_Register($src1$$reg),
-           (unsigned long)($src2$$constant));
+           (uint64_t)($src2$$constant));
   %}
 
   ins_pipe(ialu_reg_imm);
@@ -13414,7 +13679,7 @@
   ins_encode %{
     __ eor(as_Register($dst$$reg),
            as_Register($src1$$reg),
-           (unsigned long)($src2$$constant));
+           (uint64_t)($src2$$constant));
   %}
 
   ins_pipe(ialu_reg_imm);
--- a/src/hotspot/cpu/aarch64/aarch64_ad.m4	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/aarch64_ad.m4	Thu Jul 09 11:01:29 2020 -0400
@@ -1,4 +1,4 @@
-dnl Copyright (c) 2014, Red Hat Inc. All rights reserved.
+dnl Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
 dnl DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 dnl
 dnl This code is free software; you can redistribute it and/or modify it
@@ -23,12 +23,12 @@
 dnl Process this file with m4 aarch64_ad.m4 to generate the arithmetic
 dnl and shift patterns patterns used in aarch64.ad.
 dnl
-// BEGIN This section of the file is automatically generated. Do not edit --------------
 dnl
 define(`ORL2I', `ifelse($1,I,orL2I)')
 dnl
 define(`BASE_SHIFT_INSN',
-`
+`// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $2$1_reg_$4_reg(iReg$1NoSp dst,
                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
                          immI src3, rFlagsReg cr) %{
@@ -46,9 +46,11 @@
   %}
 
   ins_pipe(ialu_reg_reg_shift);
-%}')dnl
+%}
+')dnl
 define(`BASE_INVERTED_INSN',
-`
+`// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $2$1_reg_not_reg(iReg$1NoSp dst,
                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_M1 m1,
                          rFlagsReg cr) %{
@@ -68,9 +70,11 @@
   %}
 
   ins_pipe(ialu_reg_reg);
-%}')dnl
+%}
+')dnl
 define(`INVERTED_SHIFT_INSN',
-`
+`// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $2$1_reg_$4_not_reg(iReg$1NoSp dst,
                          iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2,
                          immI src3, imm$1_M1 src4, rFlagsReg cr) %{
@@ -91,9 +95,12 @@
   %}
 
   ins_pipe(ialu_reg_reg_shift);
-%}')dnl
+%}
+')dnl
 define(`NOT_INSN',
-`instruct reg$1_not_reg(iReg$1NoSp dst,
+`// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+instruct reg$1_not_reg(iReg$1NoSp dst,
                          iReg$1`'ORL2I($1) src1, imm$1_M1 m1,
                          rFlagsReg cr) %{
   match(Set dst (Xor$1 src1 m1));
@@ -108,7 +115,8 @@
   %}
 
   ins_pipe(ialu_reg);
-%}')dnl
+%}
+')dnl
 dnl
 define(`BOTH_SHIFT_INSNS',
 `BASE_SHIFT_INSN(I, $1, ifelse($2,andr,andw,$2w), $3, $4)
@@ -120,7 +128,7 @@
 dnl
 define(`BOTH_INVERTED_SHIFT_INSNS',
 `INVERTED_SHIFT_INSN(I, $1, $2w, $3, $4, ~0, int)
-INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, long)')dnl
+INVERTED_SHIFT_INSN(L, $1, $2, $3, $4, ~0l, jlong)')dnl
 dnl
 define(`ALL_SHIFT_KINDS',
 `BOTH_SHIFT_INSNS($1, $2, URShift, LSR)
@@ -147,8 +155,10 @@
 ALL_SHIFT_KINDS(Sub, sub)
 dnl
 dnl EXTEND mode, rshift_op, src, lshift_count, rshift_count
-define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)')
-define(`BFM_INSN',`
+define(`EXTEND', `($2$1 (LShift$1 $3 $4) $5)') dnl
+define(`BFM_INSN',`// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // Shift Left followed by Shift Right.
 // This idiom is used by the compiler for the i2b bytecode etc.
 instruct $4$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift_count, immI rshift_count)
@@ -167,7 +177,8 @@
   %}
 
   ins_pipe(ialu_reg_shift);
-%}')
+%}
+')
 BFM_INSN(L, 63, RShift, sbfm)
 BFM_INSN(I, 31, RShift, sbfmw)
 BFM_INSN(L, 63, URShift, ubfm)
@@ -175,7 +186,9 @@
 dnl
 // Bitfield extract with shift & mask
 define(`BFX_INSN',
-`instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
+`// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+instruct $3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI rshift, imm$1_bitmask mask)
 %{
   match(Set dst (And$1 ($2$1 src rshift) mask));
   // Make sure we are not going to exceed what $3 can do.
@@ -185,16 +198,20 @@
   format %{ "$3 $dst, $src, $rshift, $mask" %}
   ins_encode %{
     int rshift = $rshift$$constant & $4;
-    long mask = $mask$$constant;
+    intptr_t mask = $mask$$constant;
     int width = exact_log2$6(mask+1);
     __ $3(as_Register($dst$$reg),
             as_Register($src$$reg), rshift, width);
   %}
   ins_pipe(ialu_reg_shift);
-%}')
+%}
+')
 BFX_INSN(I, URShift, ubfxw, 31, int)
 BFX_INSN(L, URShift, ubfx,  63, long, _long)
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // We can use ubfx when extending an And with a mask when we know mask
 // is positive.  We know that because immI_bitmask guarantees it.
 instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask)
@@ -207,7 +224,7 @@
   format %{ "ubfx $dst, $src, $rshift, $mask" %}
   ins_encode %{
     int rshift = $rshift$$constant & 31;
-    long mask = $mask$$constant;
+    intptr_t mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfx(as_Register($dst$$reg),
             as_Register($src$$reg), rshift, width);
@@ -215,10 +232,12 @@
   ins_pipe(ialu_reg_shift);
 %}
 
-define(`UBFIZ_INSN',
+define(`UBFIZ_INSN', `// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // We can use ubfiz when masking by a positive number and then left shifting the result.
 // We know that the mask is positive because imm$1_bitmask guarantees it.
-`instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
+instruct $2$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src, immI lshift, imm$1_bitmask mask)
 %{
   match(Set dst (LShift$1 (And$1 src mask) lshift));
   predicate((exact_log2$5(n->in(1)->in(2)->get_$4() + 1) + (n->in(2)->get_int() & $3)) <= ($3 + 1));
@@ -227,16 +246,20 @@
   format %{ "$2 $dst, $src, $lshift, $mask" %}
   ins_encode %{
     int lshift = $lshift$$constant & $3;
-    long mask = $mask$$constant;
+    intptr_t mask = $mask$$constant;
     int width = exact_log2$5(mask+1);
     __ $2(as_Register($dst$$reg),
           as_Register($src$$reg), lshift, width);
   %}
   ins_pipe(ialu_reg_shift);
-%}')
+%}
+')
 UBFIZ_INSN(I, ubfizw, 31, int)
 UBFIZ_INSN(L, ubfiz,  63, long, _long)
 
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz
 instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask)
 %{
@@ -247,7 +270,7 @@
   format %{ "ubfiz $dst, $src, $lshift, $mask" %}
   ins_encode %{
     int lshift = $lshift$$constant & 63;
-    long mask = $mask$$constant;
+    intptr_t mask = $mask$$constant;
     int width = exact_log2(mask+1);
     __ ubfiz(as_Register($dst$$reg),
              as_Register($src$$reg), lshift, width);
@@ -255,10 +278,12 @@
   ins_pipe(ialu_reg_shift);
 %}
 
-// Rotations
 
-define(`EXTRACT_INSN',
-`instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
+// Rotations dnl
+define(`EXTRACT_INSN',`
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+instruct extr$3$1(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI lshift, immI rshift, rFlagsReg cr)
 %{
   match(Set dst ($3$1 (LShift$1 src1 lshift) (URShift$1 src2 rshift)));
   predicate(0 == (((n->in(1)->in(2)->get_int() & $2) + (n->in(2)->in(2)->get_int() & $2)) & $2));
@@ -277,9 +302,10 @@
 EXTRACT_INSN(I, 31, Or, extrw)
 EXTRACT_INSN(L, 63, Add, extr)
 EXTRACT_INSN(I, 31, Add, extrw)
-define(`ROL_EXPAND', `
+define(`ROL_EXPAND', `// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // $2 expander
-
 instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
 %{
   effect(DEF dst, USE src, USE shift);
@@ -292,10 +318,12 @@
             rscratch1);
     %}
   ins_pipe(ialu_reg_reg_vshift);
-%}')dnl
-define(`ROR_EXPAND', `
+%}
+')
+define(`ROR_EXPAND', `// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
+
 // $2 expander
-
 instruct $2$1_rReg(iReg$1NoSp dst, iReg$1 src, iRegI shift, rFlagsReg cr)
 %{
   effect(DEF dst, USE src, USE shift);
@@ -307,8 +335,10 @@
             as_Register($shift$$reg));
     %}
   ins_pipe(ialu_reg_reg_vshift);
-%}')dnl
-define(ROL_INSN, `
+%}
+')dnl
+define(ROL_INSN, `// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
 %{
   match(Set dst (Or$1 (LShift$1 src shift) (URShift$1 src (SubI c$2 shift))));
@@ -316,8 +346,10 @@
   expand %{
     $3$1_rReg(dst, src, shift, cr);
   %}
-%}')dnl
-define(ROR_INSN, `
+%}
+')dnl
+define(ROR_INSN, `// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $3$1_rReg_Var_C$2(iReg$1NoSp dst, iReg$1 src, iRegI shift, immI$2 c$2, rFlagsReg cr)
 %{
   match(Set dst (Or$1 (URShift$1 src shift) (LShift$1 src (SubI c$2 shift))));
@@ -325,7 +357,8 @@
   expand %{
     $3$1_rReg(dst, src, shift, cr);
   %}
-%}')dnl
+%}
+')dnl
 ROL_EXPAND(L, rol, rorv)
 ROL_EXPAND(I, rol, rorvw)
 ROL_INSN(L, _64, rol)
@@ -342,6 +375,8 @@
 // Add/subtract (extended)
 dnl ADD_SUB_EXTENDED(mode, size, add node, shift node, insn, shift type, wordsize
 define(`ADD_SUB_CONV', `
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $3Ext$1(iReg$2NoSp dst, iReg$2`'ORL2I($2) src1, iReg$1`'ORL2I($1) src2, rFlagsReg cr)
 %{
   match(Set dst ($3$2 src1 (ConvI2L src2)));
@@ -354,10 +389,12 @@
    %}
   ins_pipe(ialu_reg_reg);
 %}')dnl
-ADD_SUB_CONV(I,L,Add,add,sxtw);
-ADD_SUB_CONV(I,L,Sub,sub,sxtw);
+ADD_SUB_CONV(I,L,Add,add,sxtw)
+ADD_SUB_CONV(I,L,Sub,sub,sxtw)
 dnl
 define(`ADD_SUB_EXTENDED', `
+// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $3Ext$1_$6(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immI_`'eval($7-$2) lshift, immI_`'eval($7-$2) rshift, rFlagsReg cr)
 %{
   match(Set dst ($3$1 src1 EXTEND($1, $4, src2, lshift, rshift)));
@@ -369,7 +406,7 @@
             as_Register($src2$$reg), ext::$6);
    %}
   ins_pipe(ialu_reg_reg);
-%}')
+%}')dnl
 ADD_SUB_EXTENDED(I,16,Add,RShift,add,sxth,32)
 ADD_SUB_EXTENDED(I,8,Add,RShift,add,sxtb,32)
 ADD_SUB_EXTENDED(I,8,Add,URShift,add,uxtb,32)
@@ -379,7 +416,8 @@
 ADD_SUB_EXTENDED(L,8,Add,URShift,add,uxtb,64)
 dnl
 dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, shift type)
-define(`ADD_SUB_ZERO_EXTEND', `
+define(`ADD_SUB_ZERO_EXTEND', `// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $3Ext$1_$5_and(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, rFlagsReg cr)
 %{
   match(Set dst ($3$1 src1 (And$1 src2 mask)));
@@ -391,7 +429,8 @@
             as_Register($src2$$reg), ext::$5);
    %}
   ins_pipe(ialu_reg_reg);
-%}')
+%}
+')
 dnl
 ADD_SUB_ZERO_EXTEND(I,255,Add,addw,uxtb)
 ADD_SUB_ZERO_EXTEND(I,65535,Add,addw,uxth)
@@ -406,7 +445,8 @@
 ADD_SUB_ZERO_EXTEND(L,4294967295,Sub,sub,uxtw)
 dnl
 dnl ADD_SUB_ZERO_EXTEND_SHIFT(mode, size, add node, insn, ext type)
-define(`ADD_SUB_EXTENDED_SHIFT', `
+define(`ADD_SUB_EXTENDED_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $3Ext$1_$6_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, immIExt lshift2, immI_`'eval($7-$2) lshift1, immI_`'eval($7-$2) rshift1, rFlagsReg cr)
 %{
   match(Set dst ($3$1 src1 (LShift$1 EXTEND($1, $4, src2, lshift1, rshift1) lshift2)));
@@ -418,7 +458,8 @@
             as_Register($src2$$reg), ext::$6, ($lshift2$$constant));
    %}
   ins_pipe(ialu_reg_reg_shift);
-%}')
+%}
+')
 dnl                   $1 $2 $3   $4   $5   $6  $7
 ADD_SUB_EXTENDED_SHIFT(L,8,Add,RShift,add,sxtb,64)
 ADD_SUB_EXTENDED_SHIFT(L,16,Add,RShift,add,sxth,64)
@@ -435,7 +476,8 @@
 ADD_SUB_EXTENDED_SHIFT(I,16,Sub,RShift,subw,sxth,32)
 dnl
 dnl ADD_SUB_CONV_SHIFT(mode, add node, insn, ext type)
-define(`ADD_SUB_CONV_SHIFT', `
+define(`ADD_SUB_CONV_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $2ExtI_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst ($2$1 src1 (LShiftL (ConvI2L src2) lshift)));
@@ -447,13 +489,14 @@
             as_Register($src2$$reg), ext::$4, ($lshift$$constant));
    %}
   ins_pipe(ialu_reg_reg_shift);
-%}')
-dnl
-ADD_SUB_CONV_SHIFT(L,Add,add,sxtw);
-ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw);
+%}
+')dnl
+ADD_SUB_CONV_SHIFT(L,Add,add,sxtw)
+ADD_SUB_CONV_SHIFT(L,Sub,sub,sxtw)
 dnl
 dnl ADD_SUB_ZERO_EXTEND(mode, size, add node, insn, ext type)
-define(`ADD_SUB_ZERO_EXTEND_SHIFT', `
+define(`ADD_SUB_ZERO_EXTEND_SHIFT', `// This pattern is automatically generated from aarch64_ad.m4
+// DO NOT EDIT ANYTHING IN THIS SECTION OF THE FILE
 instruct $3Ext$1_$5_and_shift(iReg$1NoSp dst, iReg$1`'ORL2I($1) src1, iReg$1`'ORL2I($1) src2, imm$1_$2 mask, immIExt lshift, rFlagsReg cr)
 %{
   match(Set dst ($3$1 src1 (LShift$1 (And$1 src2 mask) lshift)));
@@ -465,8 +508,8 @@
             as_Register($src2$$reg), ext::$5, ($lshift$$constant));
    %}
   ins_pipe(ialu_reg_reg_shift);
-%}')
-dnl
+%}
+')dnl
 dnl                       $1 $2  $3  $4  $5
 ADD_SUB_ZERO_EXTEND_SHIFT(L,255,Add,add,uxtb)
 ADD_SUB_ZERO_EXTEND_SHIFT(L,65535,Add,add,uxth)
@@ -482,4 +525,4 @@
 ADD_SUB_ZERO_EXTEND_SHIFT(I,255,Sub,subw,uxtb)
 ADD_SUB_ZERO_EXTEND_SHIFT(I,65535,Sub,subw,uxth)
 dnl
-// END This section of the file is automatically generated. Do not edit --------------
+
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -31,7 +31,7 @@
 #include "interpreter/interpreter.hpp"
 
 #ifndef PRODUCT
-const unsigned long Assembler::asm_bp = 0x00007fffee09ac88;
+const uintptr_t Assembler::asm_bp = 0x00007fffee09ac88;
 #endif
 
 #include "compiler/disassembler.hpp"
@@ -132,14 +132,14 @@
     __ subs(r4, r1, 698u);                             //        subs        x4, x1, #698
 
 // LogicalImmOp
-    __ andw(r28, r19, 4294709247ul);                   //        and        w28, w19, #0xfffc0fff
-    __ orrw(r27, r5, 536870910ul);                     //        orr        w27, w5, #0x1ffffffe
-    __ eorw(r30, r20, 4294840319ul);                   //        eor        w30, w20, #0xfffe0fff
-    __ andsw(r22, r26, 4294959615ul);                  //        ands        w22, w26, #0xffffe1ff
-    __ andr(r5, r7, 4194300ul);                        //        and        x5, x7, #0x3ffffc
-    __ orr(r13, r7, 18014398509481728ul);              //        orr        x13, x7, #0x3fffffffffff00
-    __ eor(r7, r9, 18442240474082197503ul);            //        eor        x7, x9, #0xfff0000000003fff
-    __ ands(r3, r0, 18374686479671656447ul);           //        ands        x3, x0, #0xff00000000007fff
+    __ andw(r28, r19, 4294709247ull);                   //        and        w28, w19, #0xfffc0fff
+    __ orrw(r27, r5, 536870910ull);                     //        orr        w27, w5, #0x1ffffffe
+    __ eorw(r30, r20, 4294840319ull);                   //        eor        w30, w20, #0xfffe0fff
+    __ andsw(r22, r26, 4294959615ull);                  //        ands        w22, w26, #0xffffe1ff
+    __ andr(r5, r7, 4194300ull);                        //        and        x5, x7, #0x3ffffc
+    __ orr(r13, r7, 18014398509481728ull);              //        orr        x13, x7, #0x3fffffffffff00
+    __ eor(r7, r9, 18442240474082197503ull);            //        eor        x7, x9, #0xfff0000000003fff
+    __ ands(r3, r0, 18374686479671656447ull);           //        ands        x3, x0, #0xff00000000007fff
 
 // AbsOp
     __ b(__ pc());                                     //        b        .
@@ -1493,7 +1493,7 @@
       Disassembler::decode((address)start, (address)start + len);
   }
 
-  JNIEXPORT void das1(unsigned long insn) {
+  JNIEXPORT void das1(uintptr_t insn) {
     das(insn, 1);
   }
 }
@@ -1532,7 +1532,7 @@
   }
 }
 
-void Assembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) {
+void Assembler::adrp(Register reg1, const Address &dest, uintptr_t &byte_offset) {
   ShouldNotReachHere();
 }
 
@@ -1541,7 +1541,7 @@
 #define starti Instruction_aarch64 do_not_use(this); set_current(&do_not_use)
 
   void Assembler::adr(Register Rd, address adr) {
-    long offset = adr - pc();
+    intptr_t offset = adr - pc();
     int offset_lo = offset & 3;
     offset >>= 2;
     starti;
@@ -1552,7 +1552,7 @@
   void Assembler::_adrp(Register Rd, address adr) {
     uint64_t pc_page = (uint64_t)pc() >> 12;
     uint64_t adr_page = (uint64_t)adr >> 12;
-    long offset = adr_page - pc_page;
+    intptr_t offset = adr_page - pc_page;
     int offset_lo = offset & 3;
     offset >>= 2;
     starti;
@@ -1701,9 +1701,9 @@
   srf(Rn, 5);
 }
 
-bool Assembler::operand_valid_for_add_sub_immediate(long imm) {
+bool Assembler::operand_valid_for_add_sub_immediate(int64_t imm) {
   bool shift = false;
-  unsigned long uimm = uabs(imm);
+  uint64_t uimm = (uint64_t)uabs(imm);
   if (uimm < (1 << 12))
     return true;
   if (uimm < (1 << 24)
--- a/src/hotspot/cpu/aarch64/assembler_aarch64.hpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/assembler_aarch64.hpp	Thu Jul 09 11:01:29 2020 -0400
@@ -199,7 +199,7 @@
     return extend(uval, msb - lsb);
   }
 
-  static void patch(address a, int msb, int lsb, unsigned long val) {
+  static void patch(address a, int msb, int lsb, uint64_t val) {
     int nbits = msb - lsb + 1;
     guarantee(val < (1U << nbits), "Field too big for insn");
     assert_cond(msb >= lsb);
@@ -212,9 +212,9 @@
     *(unsigned *)a = target;
   }
 
-  static void spatch(address a, int msb, int lsb, long val) {
+  static void spatch(address a, int msb, int lsb, int64_t val) {
     int nbits = msb - lsb + 1;
-    long chk = val >> (nbits - 1);
+    int64_t chk = val >> (nbits - 1);
     guarantee (chk == -1 || chk == 0, "Field too big for insn");
     unsigned uval = val;
     unsigned mask = (1U << nbits) - 1;
@@ -245,9 +245,9 @@
     f(val, bit, bit);
   }
 
-  void sf(long val, int msb, int lsb) {
+  void sf(int64_t val, int msb, int lsb) {
     int nbits = msb - lsb + 1;
-    long chk = val >> (nbits - 1);
+    int64_t chk = val >> (nbits - 1);
     guarantee (chk == -1 || chk == 0, "Field too big for insn");
     unsigned uval = val;
     unsigned mask = (1U << nbits) - 1;
@@ -357,7 +357,7 @@
  private:
   Register _base;
   Register _index;
-  long _offset;
+  int64_t _offset;
   enum mode _mode;
   extend _ext;
 
@@ -380,9 +380,9 @@
     : _base(r), _index(noreg), _offset(0), _mode(base_plus_offset), _target(0) { }
   Address(Register r, int o)
     : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
-  Address(Register r, long o)
+  Address(Register r, int64_t o)
     : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
-  Address(Register r, unsigned long o)
+  Address(Register r, uint64_t o)
     : _base(r), _index(noreg), _offset(o), _mode(base_plus_offset), _target(0) { }
 #ifdef ASSERT
   Address(Register r, ByteSize disp)
@@ -422,7 +422,7 @@
               "wrong mode");
     return _base;
   }
-  long offset() const {
+  int64_t offset() const {
     return _offset;
   }
   Register index() const {
@@ -554,7 +554,7 @@
 
   void lea(MacroAssembler *, Register) const;
 
-  static bool offset_ok_for_immed(long offset, int shift) {
+  static bool offset_ok_for_immed(int64_t offset, int shift) {
     unsigned mask = (1 << shift) - 1;
     if (offset < 0 || offset & mask) {
       return (uabs(offset) < (1 << (20 - 12))); // Unscaled offset
@@ -616,10 +616,10 @@
 class Assembler : public AbstractAssembler {
 
 #ifndef PRODUCT
-  static const unsigned long asm_bp;
+  static const uintptr_t asm_bp;
 
   void emit_long(jint x) {
-    if ((unsigned long)pc() == asm_bp)
+    if ((uintptr_t)pc() == asm_bp)
       asm volatile ("nop");
     AbstractAssembler::emit_int32(x);
   }
@@ -670,7 +670,7 @@
   void f(unsigned val, int msb) {
     current->f(val, msb, msb);
   }
-  void sf(long val, int msb, int lsb) {
+  void sf(int64_t val, int msb, int lsb) {
     current->sf(val, msb, lsb);
   }
   void rf(Register reg, int lsb) {
@@ -720,7 +720,7 @@
     wrap_label(Rd, L, &Assembler::_adrp);
   }
 
-  void adrp(Register Rd, const Address &dest, unsigned long &offset);
+  void adrp(Register Rd, const Address &dest, uint64_t &offset);
 
 #undef INSN
 
@@ -846,7 +846,7 @@
   // architecture.  In debug mode we shrink it in order to test
   // trampolines, but not so small that branches in the interpreter
   // are out of range.
-  static const unsigned long branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
+  static const uint64_t branch_range = NOT_DEBUG(128 * M) DEBUG_ONLY(2 * M);
 
   static bool reachable_from_branch_at(address branch, address target) {
     return uabs(target - branch) < branch_range;
@@ -856,7 +856,7 @@
 #define INSN(NAME, opcode)                                              \
   void NAME(address dest) {                                             \
     starti;                                                             \
-    long offset = (dest - pc()) >> 2;                                   \
+    int64_t offset = (dest - pc()) >> 2;                                \
     DEBUG_ONLY(assert(reachable_from_branch_at(pc(), dest), "debug only")); \
     f(opcode, 31), f(0b00101, 30, 26), sf(offset, 25, 0);               \
   }                                                                     \
@@ -873,7 +873,7 @@
   // Compare & branch (immediate)
 #define INSN(NAME, opcode)                              \
   void NAME(Register Rt, address dest) {                \
-    long offset = (dest - pc()) >> 2;                   \
+    int64_t offset = (dest - pc()) >> 2;                \
     starti;                                             \
     f(opcode, 31, 24), sf(offset, 23, 5), rf(Rt, 0);    \
   }                                                     \
@@ -891,7 +891,7 @@
   // Test & branch (immediate)
 #define INSN(NAME, opcode)                                              \
   void NAME(Register Rt, int bitpos, address dest) {                    \
-    long offset = (dest - pc()) >> 2;                                   \
+    int64_t offset = (dest - pc()) >> 2;                                \
     int b5 = bitpos >> 5;                                               \
     bitpos &= 0x1f;                                                     \
     starti;                                                             \
@@ -912,7 +912,7 @@
     {EQ, NE, HS, CS=HS, LO, CC=LO, MI, PL, VS, VC, HI, LS, GE, LT, GT, LE, AL, NV};
 
   void br(Condition  cond, address dest) {
-    long offset = (dest - pc()) >> 2;
+    int64_t offset = (dest - pc()) >> 2;
     starti;
     f(0b0101010, 31, 25), f(0, 24), sf(offset, 23, 5), f(0, 4), f(cond, 3, 0);
   }
@@ -1292,7 +1292,7 @@
   // Load register (literal)
 #define INSN(NAME, opc, V)                                              \
   void NAME(Register Rt, address dest) {                                \
-    long offset = (dest - pc()) >> 2;                                   \
+    int64_t offset = (dest - pc()) >> 2;                                \
     starti;                                                             \
     f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24),        \
       sf(offset, 23, 5);                                                \
@@ -1317,7 +1317,7 @@
 
 #define INSN(NAME, opc, V)                                              \
   void NAME(FloatRegister Rt, address dest) {                           \
-    long offset = (dest - pc()) >> 2;                                   \
+    int64_t offset = (dest - pc()) >> 2;                                \
     starti;                                                             \
     f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24),        \
       sf(offset, 23, 5);                                                \
@@ -1332,7 +1332,7 @@
 
 #define INSN(NAME, opc, V)                                              \
   void NAME(address dest, prfop op = PLDL1KEEP) {                       \
-    long offset = (dest - pc()) >> 2;                                   \
+    int64_t offset = (dest - pc()) >> 2;                                \
     starti;                                                             \
     f(opc, 31, 30), f(0b011, 29, 27), f(V, 26), f(0b00, 25, 24),        \
       sf(offset, 23, 5);                                                \
@@ -1408,7 +1408,7 @@
       assert(size == 0b10 || size == 0b11, "bad operand size in ldr");
       assert(op == 0b01, "literal form can only be used with loads");
       f(size & 0b01, 31, 30), f(0b011, 29, 27), f(0b00, 25, 24);
-      long offset = (adr.target() - pc()) >> 2;
+      int64_t offset = (adr.target() - pc()) >> 2;
       sf(offset, 23, 5);
       code_section()->relocate(pc(), adr.rspec());
       return;
@@ -2683,7 +2683,7 @@
   virtual void bang_stack_with_offset(int offset);
 
   static bool operand_valid_for_logical_immediate(bool is32, uint64_t imm);
-  static bool operand_valid_for_add_sub_immediate(long imm);
+  static bool operand_valid_for_add_sub_immediate(int64_t imm);
   static bool operand_valid_for_float_immediate(double imm);
 
   void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
--- a/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/c1_LIRAssembler_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -1352,7 +1352,7 @@
     __ load_klass(klass_RInfo, obj);
     if (k->is_loaded()) {
       // See if we get an immediate positive hit
-      __ ldr(rscratch1, Address(klass_RInfo, long(k->super_check_offset())));
+      __ ldr(rscratch1, Address(klass_RInfo, int64_t(k->super_check_offset())));
       __ cmp(k_RInfo, rscratch1);
       if ((juint)in_bytes(Klass::secondary_super_cache_offset()) != k->super_check_offset()) {
         __ br(Assembler::NE, *failure_target);
@@ -2675,7 +2675,7 @@
   Register res = op->result_opr()->as_register();
 
   assert_different_registers(val, crc, res);
-  unsigned long offset;
+  uint64_t offset;
   __ adrp(res, ExternalAddress(StubRoutines::crc_table_addr()), offset);
   if (offset) __ add(res, res, offset);
 
--- a/src/hotspot/cpu/aarch64/frame_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/frame_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -653,14 +653,14 @@
 
 #define DESCRIBE_FP_OFFSET(name)                                        \
   {                                                                     \
-    unsigned long *p = (unsigned long *)fp;                             \
-    printf("0x%016lx 0x%016lx %s\n", (unsigned long)(p + frame::name##_offset), \
+    uintptr_t *p = (uintptr_t *)fp;                                     \
+    printf("0x%016lx 0x%016lx %s\n", (uintptr_t)(p + frame::name##_offset), \
            p[frame::name##_offset], #name);                             \
   }
 
-static __thread unsigned long nextfp;
-static __thread unsigned long nextpc;
-static __thread unsigned long nextsp;
+static __thread uintptr_t nextfp;
+static __thread uintptr_t nextpc;
+static __thread uintptr_t nextsp;
 static __thread RegisterMap *reg_map;
 
 static void printbc(Method *m, intptr_t bcx) {
@@ -679,7 +679,7 @@
   printf("%s : %s ==> %s\n", m->name_and_sig_as_C_string(), buf, name);
 }
 
-void internal_pf(unsigned long sp, unsigned long fp, unsigned long pc, unsigned long bcx) {
+void internal_pf(uintptr_t sp, uintptr_t fp, uintptr_t pc, uintptr_t bcx) {
   if (! fp)
     return;
 
@@ -693,7 +693,7 @@
   DESCRIBE_FP_OFFSET(interpreter_frame_locals);
   DESCRIBE_FP_OFFSET(interpreter_frame_bcp);
   DESCRIBE_FP_OFFSET(interpreter_frame_initial_sp);
-  unsigned long *p = (unsigned long *)fp;
+  uintptr_t *p = (uintptr_t *)fp;
 
   // We want to see all frames, native and Java.  For compiled and
   // interpreted frames we have special information that allows us to
@@ -703,16 +703,16 @@
   if (this_frame.is_compiled_frame() ||
       this_frame.is_interpreted_frame()) {
     frame sender = this_frame.sender(reg_map);
-    nextfp = (unsigned long)sender.fp();
-    nextpc = (unsigned long)sender.pc();
-    nextsp = (unsigned long)sender.unextended_sp();
+    nextfp = (uintptr_t)sender.fp();
+    nextpc = (uintptr_t)sender.pc();
+    nextsp = (uintptr_t)sender.unextended_sp();
   } else {
     nextfp = p[frame::link_offset];
     nextpc = p[frame::return_addr_offset];
-    nextsp = (unsigned long)&p[frame::sender_sp_offset];
+    nextsp = (uintptr_t)&p[frame::sender_sp_offset];
   }
 
-  if (bcx == -1ul)
+  if (bcx == -1ULL)
     bcx = p[frame::interpreter_frame_bcp_offset];
 
   if (Interpreter::contains((address)pc)) {
@@ -746,8 +746,8 @@
   internal_pf (nextsp, nextfp, nextpc, -1);
 }
 
-extern "C" void pf(unsigned long sp, unsigned long fp, unsigned long pc,
-                   unsigned long bcx, unsigned long thread) {
+extern "C" void pf(uintptr_t sp, uintptr_t fp, uintptr_t pc,
+                   uintptr_t bcx, uintptr_t thread) {
   if (!reg_map) {
     reg_map = NEW_C_HEAP_OBJ(RegisterMap, mtInternal);
     ::new (reg_map) RegisterMap((JavaThread*)thread, false);
@@ -766,9 +766,9 @@
 // support for printing out where we are in a Java method
 // needs to be passed current fp and bcp register values
 // prints method name, bc index and bytecode name
-extern "C" void pm(unsigned long fp, unsigned long bcx) {
+extern "C" void pm(uintptr_t fp, uintptr_t bcx) {
   DESCRIBE_FP_OFFSET(interpreter_frame_method);
-  unsigned long *p = (unsigned long *)fp;
+  uintptr_t *p = (uintptr_t *)fp;
   Method* m = (Method*)p[frame::interpreter_frame_method_offset];
   printbc(m, bcx);
 }
--- a/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/gc/shared/barrierSetAssembler_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -178,7 +178,7 @@
     Label retry;
     __ bind(retry);
     {
-      unsigned long offset;
+      uint64_t offset;
       __ adrp(rscratch1, ExternalAddress((address) Universe::heap()->end_addr()), offset);
       __ ldr(heap_end, Address(rscratch1, offset));
     }
@@ -187,7 +187,7 @@
 
     // Get the current top of the heap
     {
-      unsigned long offset;
+      uint64_t offset;
       __ adrp(rscratch1, heap_top, offset);
       // Use add() here after ARDP, rather than lea().
       // lea() does not generate anything if its offset is zero.
--- a/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/gc/z/zBarrierSetAssembler_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -206,7 +206,7 @@
   BarrierSetAssembler::try_resolve_jobject_in_native(masm, jni_env, robj, tmp, slowpath);
 
   // The Address offset is too large to direct load - -784. Our range is +127, -128.
-  __ mov(tmp, (long int)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) -
+  __ mov(tmp, (int64_t)(in_bytes(ZThreadLocalData::address_bad_mask_offset()) -
               in_bytes(JavaThread::jni_environment_offset())));
 
   // Load address bad mask
--- a/src/hotspot/cpu/aarch64/immediate_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/immediate_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -66,7 +66,7 @@
 // for i = 1, ... N result<i-1> = 1 other bits are zero
 static inline uint64_t ones(int N)
 {
-  return (N == 64 ? (uint64_t)-1UL : ((1UL << N) - 1));
+  return (N == 64 ? -1ULL : (1ULL << N) - 1);
 }
 
 /*
--- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -168,7 +168,7 @@
 }
 
 void InterpreterMacroAssembler::get_dispatch() {
-  unsigned long offset;
+  uint64_t offset;
   adrp(rdispatch, ExternalAddress((address)Interpreter::dispatch_table()), offset);
   lea(rdispatch, Address(rdispatch, offset));
 }
@@ -765,7 +765,7 @@
     // copy
     mov(rscratch1, sp);
     sub(swap_reg, swap_reg, rscratch1);
-    ands(swap_reg, swap_reg, (unsigned long)(7 - os::vm_page_size()));
+    ands(swap_reg, swap_reg, (uint64_t)(7 - os::vm_page_size()));
 
     // Save the test result, for recursive case, the result is zero
     str(swap_reg, Address(lock_reg, mark_offset));
--- a/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/interpreterRT_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -1,6 +1,6 @@
 /*
- * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -346,7 +346,7 @@
 
     if (_num_fp_args < Argument::n_float_register_parameters_c) {
       *_fp_args++ = from_obj;
-      *_fp_identifiers |= (1 << _num_fp_args); // mark as double
+      *_fp_identifiers |= (1ull << _num_fp_args); // mark as double
       _num_fp_args++;
     } else {
       *_to++ = from_obj;
@@ -382,7 +382,7 @@
 
   // handle arguments
   SlowSignatureHandler ssh(m, (address)from, to);
-  ssh.iterate(UCONST64(-1));
+  ssh.iterate((uint64_t)CONST64(-1));
 
   // return result handler
   return Interpreter::result_handler(m->result_type());
--- a/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/jniFastGetField_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2004, 2020, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -73,7 +73,7 @@
 
   Label slow;
 
-  unsigned long offset;
+  uint64_t offset;
   __ adrp(rcounter_addr,
           SafepointSynchronize::safepoint_counter_addr(), offset);
   Address safepoint_counter_addr(rcounter_addr, offset);
@@ -88,7 +88,7 @@
 
     // Check to see if a field access watch has been set before we
     // take the fast path.
-    unsigned long offset2;
+    uint64_t offset2;
     __ adrp(result,
             ExternalAddress((address) JvmtiExport::get_field_access_count_addr()),
             offset2);
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -70,8 +70,8 @@
 // Return the total length (in bytes) of the instructions.
 int MacroAssembler::pd_patch_instruction_size(address branch, address target) {
   int instructions = 1;
-  assert((uint64_t)target < (1ul << 48), "48-bit overflow in address constant");
-  long offset = (target - branch) >> 2;
+  assert((uint64_t)target < (1ull << 48), "48-bit overflow in address constant");
+  intptr_t offset = (target - branch) >> 2;
   unsigned insn = *(unsigned*)branch;
   if ((Instruction_aarch64::extract(insn, 29, 24) & 0b111011) == 0b011000) {
     // Load register (literal)
@@ -134,9 +134,9 @@
                      Instruction_aarch64::extract(insn2, 4, 0)) {
         // movk #imm16<<32
         Instruction_aarch64::patch(branch + 4, 20, 5, (uint64_t)target >> 32);
-        long dest = ((long)target & 0xffffffffL) | ((long)branch & 0xffff00000000L);
-        long pc_page = (long)branch >> 12;
-        long adr_page = (long)dest >> 12;
+        uintptr_t dest = ((uintptr_t)target & 0xffffffffULL) | ((uintptr_t)branch & 0xffff00000000ULL);
+        uintptr_t pc_page = (uintptr_t)branch >> 12;
+        uintptr_t adr_page = (uintptr_t)dest >> 12;
         offset = adr_page - pc_page;
         instructions = 2;
       }
@@ -205,7 +205,7 @@
 }
 
 address MacroAssembler::target_addr_for_insn(address insn_addr, unsigned insn) {
-  long offset = 0;
+  intptr_t offset = 0;
   if ((Instruction_aarch64::extract(insn, 29, 24) & 0b011011) == 0b00011000) {
     // Load register (literal)
     offset = Instruction_aarch64::sextract(insn, 23, 5);
@@ -389,7 +389,7 @@
   assert(CodeCache::find_blob(entry.target()) != NULL,
          "destination of far call not found in code cache");
   if (far_branches()) {
-    unsigned long offset;
+    uintptr_t offset;
     // We can use ADRP here because we know that the total size of
     // the code cache cannot exceed 2Gb.
     adrp(tmp, entry, offset);
@@ -407,7 +407,7 @@
   assert(CodeCache::find_blob(entry.target()) != NULL,
          "destination of far call not found in code cache");
   if (far_branches()) {
-    unsigned long offset;
+    uintptr_t offset;
     // We can use ADRP here because we know that the total size of
     // the code cache cannot exceed 2Gb.
     adrp(tmp, entry, offset);
@@ -824,7 +824,7 @@
 address MacroAssembler::ic_call(address entry, jint method_index) {
   RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
   // address const_ptr = long_constant((jlong)Universe::non_oop_word());
-  // unsigned long offset;
+  // uintptr_t offset;
   // ldr_constant(rscratch2, const_ptr);
   movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
   return trampoline_call(Address(entry, rh));
@@ -1711,7 +1711,7 @@
 // not actually be used: you must use the Address that is returned.
 // It is up to you to ensure that the shift provided matches the size
 // of your data.
-Address MacroAssembler::form_address(Register Rd, Register base, long byte_offset, int shift) {
+Address MacroAssembler::form_address(Register Rd, Register base, int64_t byte_offset, int shift) {
   if (Address::offset_ok_for_immed(byte_offset, shift))
     // It fits; no need for any heroics
     return Address(base, byte_offset);
@@ -1726,8 +1726,8 @@
 
   // See if we can do this with two 12-bit offsets
   {
-    unsigned long word_offset = byte_offset >> shift;
-    unsigned long masked_offset = word_offset & 0xfff000;
+    uint64_t word_offset = byte_offset >> shift;
+    uint64_t masked_offset = word_offset & 0xfff000;
     if (Address::offset_ok_for_immed(word_offset - masked_offset, 0)
         && Assembler::operand_valid_for_add_sub_immediate(masked_offset << shift)) {
       add(Rd, base, masked_offset << shift);
@@ -1968,7 +1968,7 @@
   if (value < (1 << 12)) { sub(reg, reg, value); return; }
   /* else */ {
     assert(reg != rscratch2, "invalid dst for register decrement");
-    mov(rscratch2, (unsigned long)value);
+    mov(rscratch2, (uint64_t)value);
     sub(reg, reg, rscratch2);
   }
 }
@@ -2720,19 +2720,19 @@
 // Returns true if it is, else false.
 bool MacroAssembler::merge_alignment_check(Register base,
                                            size_t size,
-                                           long cur_offset,
-                                           long prev_offset) const {
+                                           int64_t cur_offset,
+                                           int64_t prev_offset) const {
   if (AvoidUnalignedAccesses) {
     if (base == sp) {
       // Checks whether low offset if aligned to pair of registers.
-      long pair_mask = size * 2 - 1;
-      long offset = prev_offset > cur_offset ? cur_offset : prev_offset;
+      int64_t pair_mask = size * 2 - 1;
+      int64_t offset = prev_offset > cur_offset ? cur_offset : prev_offset;
       return (offset & pair_mask) == 0;
     } else { // If base is not sp, we can't guarantee the access is aligned.
       return false;
     }
   } else {
-    long mask = size - 1;
+    int64_t mask = size - 1;
     // Load/store pair instruction only supports element size aligned offset.
     return (cur_offset & mask) == 0 && (prev_offset & mask) == 0;
   }
@@ -2765,8 +2765,8 @@
     return false;
   }
 
-  long max_offset = 63 * prev_size_in_bytes;
-  long min_offset = -64 * prev_size_in_bytes;
+  int64_t max_offset = 63 * prev_size_in_bytes;
+  int64_t min_offset = -64 * prev_size_in_bytes;
 
   assert(prev_ldst->is_not_pre_post_index(), "pre-index or post-index is not supported to be merged.");
 
@@ -2775,8 +2775,8 @@
     return false;
   }
 
-  long cur_offset = adr.offset();
-  long prev_offset = prev_ldst->offset();
+  int64_t cur_offset = adr.offset();
+  int64_t prev_offset = prev_ldst->offset();
   size_t diff = abs(cur_offset - prev_offset);
   if (diff != prev_size_in_bytes) {
     return false;
@@ -2793,7 +2793,7 @@
     return false;
   }
 
-  long low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
+  int64_t low_offset = prev_offset > cur_offset ? cur_offset : prev_offset;
   // Offset range must be in ldp/stp instruction's range.
   if (low_offset > max_offset || low_offset < min_offset) {
     return false;
@@ -2818,7 +2818,7 @@
   address prev = pc() - NativeInstruction::instruction_size;
   NativeLdSt* prev_ldst = NativeLdSt_at(prev);
 
-  long offset;
+  int64_t offset;
 
   if (adr.offset() < prev_ldst->offset()) {
     offset = adr.offset();
@@ -3364,7 +3364,7 @@
         Register table0, Register table1, Register table2, Register table3,
         Register tmp, Register tmp2, Register tmp3) {
   Label L_by16, L_by16_loop, L_by4, L_by4_loop, L_by1, L_by1_loop, L_exit;
-  unsigned long offset;
+  uint64_t offset;
 
   if (UseCRC32) {
       kernel_crc32_using_crc32(crc, buf, len, table0, table1, table2, table3);
@@ -3666,7 +3666,7 @@
 SkipIfEqual::SkipIfEqual(
     MacroAssembler* masm, const bool* flag_addr, bool value) {
   _masm = masm;
-  unsigned long offset;
+  uint64_t offset;
   _masm->adrp(rscratch1, ExternalAddress((address)flag_addr), offset);
   _masm->ldrb(rscratch1, Address(rscratch1, offset));
   _masm->cbzw(rscratch1, _label);
@@ -3695,7 +3695,7 @@
 }
 
 void MacroAssembler::cmpptr(Register src1, Address src2) {
-  unsigned long offset;
+  uint64_t offset;
   adrp(rscratch1, src2, offset);
   ldr(rscratch1, Address(rscratch1, offset));
   cmp(src1, rscratch1);
@@ -3951,7 +3951,7 @@
   if (operand_valid_for_logical_immediate(
         /*is32*/false, (uint64_t)CompressedKlassPointers::base())) {
     const uint64_t range_mask =
-      (1UL << log2_intptr(CompressedKlassPointers::range())) - 1;
+      (1ULL << log2_intptr(CompressedKlassPointers::range())) - 1;
     if (((uint64_t)CompressedKlassPointers::base() & range_mask) == 0) {
       return (_klass_decode_mode = KlassDecodeXor);
     }
@@ -4357,13 +4357,13 @@
   return inst_mark();
 }
 
-void MacroAssembler::adrp(Register reg1, const Address &dest, unsigned long &byte_offset) {
+void MacroAssembler::adrp(Register reg1, const Address &dest, uint64_t &byte_offset) {
   relocInfo::relocType rtype = dest.rspec().reloc()->type();
-  unsigned long low_page = (unsigned long)CodeCache::low_bound() >> 12;
-  unsigned long high_page = (unsigned long)(CodeCache::high_bound()-1) >> 12;
-  unsigned long dest_page = (unsigned long)dest.target() >> 12;
-  long offset_low = dest_page - low_page;
-  long offset_high = dest_page - high_page;
+  uint64_t low_page = (uint64_t)CodeCache::low_bound() >> 12;
+  uint64_t high_page = (uint64_t)(CodeCache::high_bound()-1) >> 12;
+  uint64_t dest_page = (uint64_t)dest.target() >> 12;
+  int64_t offset_low = dest_page - low_page;
+  int64_t offset_high = dest_page - high_page;
 
   assert(is_valid_AArch64_address(dest.target()), "bad address");
   assert(dest.getMode() == Address::literal, "ADRP must be applied to a literal address");
@@ -4375,14 +4375,14 @@
   if (offset_high >= -(1<<20) && offset_low < (1<<20)) {
     _adrp(reg1, dest.target());
   } else {
-    unsigned long target = (unsigned long)dest.target();
-    unsigned long adrp_target
-      = (target & 0xffffffffUL) | ((unsigned long)pc() & 0xffff00000000UL);
+    uint64_t target = (uint64_t)dest.target();
+    uint64_t adrp_target
+      = (target & 0xffffffffULL) | ((uint64_t)pc() & 0xffff00000000ULL);
 
     _adrp(reg1, (address)adrp_target);
     movk(reg1, target >> 32, 32);
   }
-  byte_offset = (unsigned long)dest.target() & 0xfff;
+  byte_offset = (uint64_t)dest.target() & 0xfff;
 }
 
 void MacroAssembler::load_byte_map_base(Register reg) {
@@ -4392,7 +4392,7 @@
   if (is_valid_AArch64_address((address)byte_map_base)) {
     // Strictly speaking the byte_map_base isn't an address at all,
     // and it might even be negative.
-    unsigned long offset;
+    uint64_t offset;
     adrp(reg, ExternalAddress((address)byte_map_base), offset);
     // We expect offset to be zero with most collectors.
     if (offset != 0) {
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64.hpp	Thu Jul 09 11:01:29 2020 -0400
@@ -499,14 +499,14 @@
     mov_immediate32(dst, imm32);
   }
 
-  inline void mov(Register dst, long l)
+  inline void mov(Register dst, int64_t l)
   {
     mov(dst, (uint64_t)l);
   }
 
   inline void mov(Register dst, int i)
   {
-    mov(dst, (long)i);
+    mov(dst, (int64_t)i);
   }
 
   void mov(Register dst, RegisterOrConstant src) {
@@ -1170,7 +1170,7 @@
   void sub(Register Rd, Register Rn, RegisterOrConstant decrement);
   void subw(Register Rd, Register Rn, RegisterOrConstant decrement);
 
-  void adrp(Register reg1, const Address &dest, unsigned long &byte_offset);
+  void adrp(Register reg1, const Address &dest, uint64_t &byte_offset);
 
   void tableswitch(Register index, jint lowbound, jint highbound,
                    Label &jumptable, Label &jumptable_end, int stride = 1) {
@@ -1187,7 +1187,7 @@
   // actually be used: you must use the Address that is returned.  It
   // is up to you to ensure that the shift provided matches the size
   // of your data.
-  Address form_address(Register Rd, Register base, long byte_offset, int shift);
+  Address form_address(Register Rd, Register base, int64_t byte_offset, int shift);
 
   // Return true iff an address is within the 48-bit AArch64 address
   // space.
@@ -1212,7 +1212,7 @@
     if (NearCpool) {
       ldr(dest, const_addr);
     } else {
-      unsigned long offset;
+      uint64_t offset;
       adrp(dest, InternalAddress(const_addr.target()), offset);
       ldr(dest, Address(dest, offset));
     }
@@ -1310,7 +1310,7 @@
   // Uses rscratch2 if the address is not directly reachable
   Address spill_address(int size, int offset, Register tmp=rscratch2);
 
-  bool merge_alignment_check(Register base, size_t size, long cur_offset, long prev_offset) const;
+  bool merge_alignment_check(Register base, size_t size, int64_t cur_offset, int64_t prev_offset) const;
 
   // Check whether two loads/stores can be merged into ldp/stp.
   bool ldst_can_merge(Register rx, const Address &adr, size_t cur_size_in_bytes, bool is_store) const;
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_log.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -260,9 +260,9 @@
                               Register tmp4, Register tmp5) {
   Label DONE, CHECK_CORNER_CASES, SMALL_VALUE, MAIN,
       CHECKED_CORNER_CASES, RETURN_MINF_OR_NAN;
-  const long INF_OR_NAN_PREFIX = 0x7FF0;
-  const long MINF_OR_MNAN_PREFIX = 0xFFF0;
-  const long ONE_PREFIX = 0x3FF0;
+  const int64_t INF_OR_NAN_PREFIX = 0x7FF0;
+  const int64_t MINF_OR_MNAN_PREFIX = 0xFFF0;
+  const int64_t ONE_PREFIX = 0x3FF0;
     movz(tmp2, ONE_PREFIX, 48);
     movz(tmp4, 0x0010, 48);
     fmovd(rscratch1, v0); // rscratch1 = AS_LONG_BITS(X)
--- a/src/hotspot/cpu/aarch64/macroAssembler_aarch64_trig.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/macroAssembler_aarch64_trig.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -201,9 +201,9 @@
 // NOTE: fpu registers are actively reused. See comments in code about their usage
 void MacroAssembler::generate__ieee754_rem_pio2(address npio2_hw,
     address two_over_pi, address pio2) {
-  const long PIO2_1t = 0x3DD0B4611A626331UL;
-  const long PIO2_2  = 0x3DD0B4611A600000UL;
-  const long PIO2_2t = 0x3BA3198A2E037073UL;
+  const int64_t PIO2_1t = 0x3DD0B4611A626331ULL;
+  const int64_t PIO2_2  = 0x3DD0B4611A600000ULL;
+  const int64_t PIO2_2t = 0x3BA3198A2E037073ULL;
   Label X_IS_NEGATIVE, X_IS_MEDIUM_OR_LARGE, X_IS_POSITIVE_LONG_PI, LARGE_ELSE,
       REDUCTION_DONE, X_IS_MEDIUM_BRANCH_DONE, X_IS_LARGE, NX_SET,
       X_IS_NEGATIVE_LONG_PI;
--- a/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, 2018, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -305,7 +305,7 @@
   unsigned insn = *(unsigned*)pc;
   if (maybe_cpool_ref(pc)) {
     address addr = MacroAssembler::target_addr_for_insn(pc);
-    *(long*)addr = x;
+    *(int64_t*)addr = x;
   } else {
     MacroAssembler::pd_patch_instruction(pc, (address)intptr_t(x));
     ICache::invalidate_range(instruction_address(), instruction_size);
--- a/src/hotspot/cpu/aarch64/register_aarch64.hpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/register_aarch64.hpp	Thu Jul 09 11:01:29 2020 -0400
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -65,7 +65,7 @@
 
   // Return the bit which represents this register.  This is intended
   // to be ORed into a bitmask: for usage see class RegSet below.
-  unsigned long bit(bool should_set = true) const { return should_set ? 1 << encoding() : 0; }
+  uint64_t bit(bool should_set = true) const { return should_set ? 1 << encoding() : 0; }
 };
 
 // The integer registers of the aarch64 architecture
--- a/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -403,7 +403,7 @@
     // 3    8 T_BOOL
     // -    0 return address
     //
-    // However to make thing extra confusing. Because we can fit a long/double in
+    // However to make thing extra confusing. Because we can fit a Java long/double in
     // a single slot on a 64 bt vm and it would be silly to break them up, the interpreter
     // leaves one slot empty and only stores to a single slot. In this case the
     // slot that is occupied is the T_VOID slot. See I said it was confusing.
@@ -436,7 +436,7 @@
           __ str(rscratch1, Address(sp, next_off));
 #ifdef ASSERT
           // Overwrite the unused slot with known junk
-          __ mov(rscratch1, 0xdeadffffdeadaaaaul);
+          __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaaaull);
           __ str(rscratch1, Address(sp, st_off));
 #endif /* ASSERT */
         } else {
@@ -453,10 +453,10 @@
         // Two VMREgs|OptoRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
         // T_DOUBLE and T_LONG use two slots in the interpreter
         if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
-          // long/double in gpr
+          // jlong/double in gpr
 #ifdef ASSERT
           // Overwrite the unused slot with known junk
-          __ mov(rscratch1, 0xdeadffffdeadaaabul);
+          __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaabull);
           __ str(rscratch1, Address(sp, st_off));
 #endif /* ASSERT */
           __ str(r, Address(sp, next_off));
@@ -472,7 +472,7 @@
       } else {
 #ifdef ASSERT
         // Overwrite the unused slot with known junk
-        __ mov(rscratch1, 0xdeadffffdeadaaacul);
+        __ mov(rscratch1, (uint64_t)0xdeadffffdeadaaacull);
         __ str(rscratch1, Address(sp, st_off));
 #endif /* ASSERT */
         __ strd(r_1->as_FloatRegister(), Address(sp, next_off));
@@ -1701,7 +1701,7 @@
 
   Label dtrace_method_entry, dtrace_method_entry_done;
   {
-    unsigned long offset;
+    uint64_t offset;
     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
     __ ldrb(rscratch1, Address(rscratch1, offset));
     __ cbnzw(rscratch1, dtrace_method_entry);
@@ -1915,7 +1915,7 @@
 
   Label dtrace_method_exit, dtrace_method_exit_done;
   {
-    unsigned long offset;
+    uint64_t offset;
     __ adrp(rscratch1, ExternalAddress((address)&DTraceMethodProbes), offset);
     __ ldrb(rscratch1, Address(rscratch1, offset));
     __ cbnzw(rscratch1, dtrace_method_exit);
--- a/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/stubGenerator_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -3283,8 +3283,8 @@
 
     // Max number of bytes we can process before having to take the mod
     // 0x15B0 is 5552 in decimal, the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1
-    unsigned long BASE = 0xfff1;
-    unsigned long NMAX = 0x15B0;
+    uint64_t BASE = 0xfff1;
+    uint64_t NMAX = 0x15B0;
 
     __ mov(base, BASE);
     __ mov(nmax, NMAX);
@@ -5381,12 +5381,12 @@
     // In C, approximately:
 
     // void
-    // montgomery_multiply(unsigned long Pa_base[], unsigned long Pb_base[],
-    //                     unsigned long Pn_base[], unsigned long Pm_base[],
-    //                     unsigned long inv, int len) {
-    //   unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
-    //   unsigned long *Pa, *Pb, *Pn, *Pm;
-    //   unsigned long Ra, Rb, Rn, Rm;
+    // montgomery_multiply(julong Pa_base[], julong Pb_base[],
+    //                     julong Pn_base[], julong Pm_base[],
+    //                     julong inv, int len) {
+    //   julong t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
+    //   julong *Pa, *Pb, *Pn, *Pm;
+    //   julong Ra, Rb, Rn, Rm;
 
     //   int i;
 
@@ -5594,11 +5594,11 @@
     // In C, approximately:
 
     // void
-    // montgomery_square(unsigned long Pa_base[], unsigned long Pn_base[],
-    //                   unsigned long Pm_base[], unsigned long inv, int len) {
-    //   unsigned long t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
-    //   unsigned long *Pa, *Pb, *Pn, *Pm;
-    //   unsigned long Ra, Rb, Rn, Rm;
+    // montgomery_square(julong Pa_base[], julong Pn_base[],
+    //                   julong Pm_base[], julong inv, int len) {
+    //   julong t0 = 0, t1 = 0, t2 = 0; // Triple-precision accumulator
+    //   julong *Pa, *Pb, *Pn, *Pm;
+    //   julong Ra, Rb, Rn, Rm;
 
     //   int i;
 
--- a/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/templateInterpreterGenerator_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2003, 2020, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2014, 2019, Red Hat Inc. All rights reserved.
+ * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -994,7 +994,7 @@
     __ ldrw(val, Address(esp, 0));              // byte value
     __ ldrw(crc, Address(esp, wordSize));       // Initial CRC
 
-    unsigned long offset;
+    uint64_t offset;
     __ adrp(tbl, ExternalAddress(StubRoutines::crc_table_addr()), offset);
     __ add(tbl, tbl, offset);
 
--- a/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp	Sun Jul 19 21:34:28 2020 -0700
+++ b/src/hotspot/cpu/aarch64/vm_version_aarch64.cpp	Thu Jul 09 11:01:29 2020 -0400
@@ -161,7 +161,7 @@
     SoftwarePrefetchHintDistance &= ~7;
   }
 
-  unsigned long auxv = getauxval(AT_HWCAP);
+  uint64_t auxv = getauxval(AT_HWCAP);
 
   char buf[512];