changeset 7694:9666d88e03b2

8151036: aarch32: code cleanup Summary: backport of JDK9 fixes to get rid of some build warnings and few occurrences of #if 0 / 1 clause Reviewed-by: enevill Contributed-by: snazarkin@azul.com
author enevill
date Thu, 03 Mar 2016 15:11:46 +0000
parents 7c7cfa16b0e6
children 91519470238d
files src/cpu/aarch32/vm/aarch32Test.cpp src/cpu/aarch32/vm/assembler_aarch32.cpp src/cpu/aarch32/vm/macroAssembler_aarch32.cpp src/cpu/aarch32/vm/macroAssembler_aarch32.hpp src/cpu/aarch32/vm/nativeInst_aarch32.cpp src/cpu/aarch32/vm/relocInfo_aarch32.cpp src/cpu/aarch32/vm/sharedRuntime_aarch32.cpp
diffstat 7 files changed, 13 insertions(+), 108 deletions(-) [+]
line wrap: on
line diff
--- a/src/cpu/aarch32/vm/aarch32Test.cpp	Thu Mar 03 15:08:19 2016 +0000
+++ b/src/cpu/aarch32/vm/aarch32Test.cpp	Thu Mar 03 15:11:46 2016 +0000
@@ -29,7 +29,7 @@
 #include "code/codeBlob.hpp"
 #include "asm/macroAssembler.hpp"
 
-// hook routine called during JVM bootstrap to test AArch64 assembler
+// hook routine called during JVM bootstrap to test AArch32 assembler
 
 extern "C" void entry(CodeBuffer*);
 
--- a/src/cpu/aarch32/vm/assembler_aarch32.cpp	Thu Mar 03 15:08:19 2016 +0000
+++ b/src/cpu/aarch32/vm/assembler_aarch32.cpp	Thu Mar 03 15:11:46 2016 +0000
@@ -1403,6 +1403,7 @@
     case no_mode:
     default:
       ShouldNotReachHere();
+      return false;
   }
 }
 
--- a/src/cpu/aarch32/vm/macroAssembler_aarch32.cpp	Thu Mar 03 15:08:19 2016 +0000
+++ b/src/cpu/aarch32/vm/macroAssembler_aarch32.cpp	Thu Mar 03 15:11:46 2016 +0000
@@ -305,11 +305,7 @@
          "destination of far call not found in code cache");
   // TODO performance issue: if intented to patch later,
   // generate mov rX, imm; bl rX far call (to reserve space)
-#if 0
-  if (far_branches()) {
-#else
   if (entry.rspec().type() != relocInfo::none || far_branches()) {
-#endif
     lea(tmp, entry);
     if (cbuf) cbuf->set_insts_mark();
     bl(tmp);
@@ -325,11 +321,7 @@
   assert(!external_word_Relocation::is_reloc_index((intptr_t)entry.target()), "can't far jump to reloc index)");
   // TODO performance issue: if intented to patch later,
   // generate mov rX, imm; bl rX far call (to reserve space)
-#if 0
-  if (far_branches()) {
-#else
   if (entry.rspec().type() != relocInfo::none || far_branches()) {
-#endif
     lea(tmp, entry);
     if (cbuf) cbuf->set_insts_mark();
     b(tmp);
@@ -630,29 +622,9 @@
   compile_in_scratch_emit_size = Compile::current()->in_scratch_emit_size();
   #endif
 
-  // TODO review and fix this block
-#if 0
-  if (far_branches() && !compile_in_scratch_emit_size) {
-  // Replaces this
-  //if (far_branches() && !Compile::current()->in_scratch_emit_size()) {
-  //FIXME End
-    emit_trampoline_stub(offset(), entry.target());
-  }
-#endif
-
   if (cbuf) cbuf->set_insts_mark();
   relocate(entry.rspec());
 
-  // TODO perfomance issue: uncomment below, bl could be encoded in 1 instruction.
-  // However, need to add nops to allow future patching when target will not fit
-  // as immediate
-#if 0
-  if (Assembler::reachable_from_branch_at(pc(), entry.target())) {
-    bl(entry.target());
-  } else {
-    bl(pc());
-  }
-#endif
   mov(rscratch1, entry.target());
   bl(rscratch1);
 }
@@ -3003,11 +2975,6 @@
       } else if (adr.get_wb_mode() == Address::post) {
         // current implementation does not use Address:post for indexed access
         // enable the code below and implement proper post() method if it is required
-#if 0
-        (this->*sgl)(Rt, Address(post(adr.base(), wordSize)), cond);
-        (this->*sgl)(Rt2, Address(post(adr.base(), adr.index(), adr.shift())), cond);
-        sub(adr.base(), wordSize, cond);
-#endif
         ShouldNotReachHere();
       } else if (adr.get_wb_mode() == Address::off) {
         (this->*sgl)(Rt, Address(pre(adr.base(), adr.index(), adr.shift(), adr.op())), cond);
--- a/src/cpu/aarch32/vm/macroAssembler_aarch32.hpp	Thu Mar 03 15:08:19 2016 +0000
+++ b/src/cpu/aarch32/vm/macroAssembler_aarch32.hpp	Thu Mar 03 15:11:46 2016 +0000
@@ -654,15 +654,7 @@
 
   static int far_branch_size() {
     // TODO performance issue: always generate real far jumps
-#if 0
-    if (far_branches()) {
-      return 3 * 4;  // movw, movt, br
-    } else {
-      return 4;
-    }
-#else
-      return 3 * 4;  // movw, movt, br
-#endif
+    return 3 * 4;  // movw, movt, br
   }
 
   // Emit the CompiledIC call idiom
--- a/src/cpu/aarch32/vm/nativeInst_aarch32.cpp	Thu Mar 03 15:08:19 2016 +0000
+++ b/src/cpu/aarch32/vm/nativeInst_aarch32.cpp	Thu Mar 03 15:11:46 2016 +0000
@@ -58,6 +58,7 @@
     return address(NativeMovConstReg::from(addr())->data());
   }
   ShouldNotReachHere();
+  return NULL;
 }
 
 void NativeCall::set_destination(address dest) {
@@ -102,6 +103,7 @@
     return NativeRegCall::from(next_instr)->next_instruction_address();
   } else {
     ShouldNotReachHere();
+    return NULL;
   }
 }
 
@@ -306,6 +308,7 @@
     return address(NativeMovConstReg::from(addr())->data());
   }
   ShouldNotReachHere();
+  return NULL;
 }
 
 void NativeJump::set_jump_destination(address dest) {
@@ -329,6 +332,7 @@
     return NativeRegJump::from(after_move)->next_instruction_address();
   }
   ShouldNotReachHere();
+  return NULL;
 }
 
 bool NativeJump::is_at(address addr) {
--- a/src/cpu/aarch32/vm/relocInfo_aarch32.cpp	Thu Mar 03 15:08:19 2016 +0000
+++ b/src/cpu/aarch32/vm/relocInfo_aarch32.cpp	Thu Mar 03 15:11:46 2016 +0000
@@ -35,7 +35,7 @@
   if (verify_only)
     return;
 
-  int bytes;
+  int bytes = 0;
 
   NativeInstruction *ni = NativeInstruction::from(addr());
   if (ni->is_mov_const_reg()) {
@@ -66,6 +66,8 @@
   }
 
   ShouldNotReachHere();
+
+  return NULL;
 }
 
 void Relocation::pd_set_call_destination(address x) {
@@ -85,10 +87,12 @@
 
 address* Relocation::pd_address_in_code() {
   ShouldNotCallThis();
+  return NULL;
 }
 
 address Relocation::pd_get_address_from_code() {
   ShouldNotCallThis();
+  return NULL;
 }
 
 void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
@@ -97,9 +101,6 @@
     address old_addr = old_addr_for(addr(), src, dest);
     NativeMovConstReg *nm2 = NativeMovConstReg::from(old_addr);
     NativeMovConstReg::from(addr())->set_data(nm2->data());
-  } else {
-#if 0
-#endif
   }
 }
 
@@ -109,9 +110,6 @@
     address old_addr = old_addr_for(addr(), src, dest);
     NativeMovConstReg *nm2 = NativeMovConstReg::from(old_addr);
     NativeMovConstReg::from(addr())->set_data(nm2->data());
-  } else {
-#if 0
-#endif
   }
 }
 
--- a/src/cpu/aarch32/vm/sharedRuntime_aarch32.cpp	Thu Mar 03 15:08:19 2016 +0000
+++ b/src/cpu/aarch32/vm/sharedRuntime_aarch32.cpp	Thu Mar 03 15:11:46 2016 +0000
@@ -1797,41 +1797,10 @@
   if (method->is_synchronized()) {
     assert(!is_critical_native, "unhandled");
     // TODO Fast path disabled as requires at least 4 registers, which already contain arguments prepared for call
-#if 0
-    const Register swap_reg = r0;
-    const Register obj_reg  = r1;  // Will contain the oop
-    const Register lock_reg = r2;  // Address of compiler lock object (BasicLock)
-
-    const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
-#endif
+
     // Get the handle (the 2nd argument)
     __ mov(oop_handle_reg, c_rarg1);
-#if 0
-    // Get address of the box
-
-    __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
-
-    // Load the oop from the handle
-    __ ldr(obj_reg, Address(oop_handle_reg, 0));
-
-    if (UseBiasedLocking) {
-      __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch2, false, lock_done, &slow_path_lock);
-    }
-
-    // Load (object->mark() | 1) into swap_reg %r0
-    __ ldr(swap_reg, Address(obj_reg, 0));
-    __ orr(swap_reg, swap_reg, 1);
-
-    // Save (object->mark() | 1) into BasicLock's displaced header
-    __ str(swap_reg, Address(lock_reg, mark_word_offset));
-
-    // src -> dest iff dest == r0 else r0 <- dest
-    { Label here;
-      __ cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, lock_done, &slow_path_lock);
-    }
-#else
     __ b(slow_path_lock);
-#endif
 
     // Slow path will re-enter here
     __ bind(lock_done);
@@ -1963,33 +1932,7 @@
   Label slow_path_unlock;
   if (method->is_synchronized()) {
     // TODO fast path disabled as requires at least 4 registers, but r0,r1 contains result
-#if 0
-    const Register obj_reg  = r2;  // Will contain the oop
-    const Register lock_reg = rscratch1; // Address of compiler lock object (BasicLock)
-    const Register old_hdr  = r3;  // value of old header at unlock time
-
-    // Get locked oop from the handle we passed to jni
-    __ ldr(obj_reg, Address(oop_handle_reg, 0));
-
-    if (UseBiasedLocking) {
-      __ biased_locking_exit(obj_reg, old_hdr, unlock_done);
-    }
-
-    // Simple recursive lock?
-    // get address of the stack lock
-    __ lea(lock_reg, Address(sp, lock_slot_offset * VMRegImpl::stack_slot_size));
-
-    //  get old displaced header
-    __ ldr(old_hdr, Address(lock_reg, 0));
-    __ cbz(old_hdr, unlock_done);
-
-    // Atomic swap old header if oop still contains the stack lock
-    Label succeed;
-    __ cmpxchgptr(lock_reg, old_hdr, obj_reg, rscratch1, succeed, &slow_path_unlock);
-    __ bind(succeed);
-#else
     __ b(slow_path_unlock);
-#endif
 
     // slow path re-enters here
     __ bind(unlock_done);