changeset 2595:3eec55c8d534

. Make changes to the BSD specific files equivalent to the changes to the Linux specific files.
author Greg Lewis <glewis@eyesbeyond.com>
date Fri, 13 May 2011 22:29:56 -0700
parents bcd9df050b17
children d5248acd8b04
files make/bsd/makefiles/gcc.make make/bsd/makefiles/vm.make src/os/bsd/vm/globals_bsd.hpp src/os/bsd/vm/os_bsd.cpp src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp
diffstat 5 files changed, 20 insertions(+), 20 deletions(-) [+]
line wrap: on
line diff
--- a/make/bsd/makefiles/gcc.make	Thu May 12 20:54:46 2011 -0700
+++ b/make/bsd/makefiles/gcc.make	Fri May 13 22:29:56 2011 -0700
@@ -215,7 +215,7 @@
   SHARED_FLAG = -dynamiclib $(VM_PICFLAG)
 
   # Keep symbols even they are not used
-  #AOUT_FLAGS += -export-dynamic
+  #AOUT_FLAGS += -Xlinker -export-dynamic
 else
   # Enable linker optimization
   LFLAGS += -Xlinker -O1
@@ -227,7 +227,7 @@
   SHARED_FLAG = -shared $(VM_PICFLAG)
 
   # Keep symbols even they are not used
-  AOUT_FLAGS += -export-dynamic
+  AOUT_FLAGS += -Xlinker -export-dynamic
 endif
 
 #------------------------------------------------------------------------
--- a/make/bsd/makefiles/vm.make	Thu May 12 20:54:46 2011 -0700
+++ b/make/bsd/makefiles/vm.make	Fri May 13 22:29:56 2011 -0700
@@ -106,6 +106,10 @@
 CFLAGS += $(EXTRA_CFLAGS)
 LFLAGS += $(EXTRA_CFLAGS)
 
+# Don't set excutable bit on stack segment
+# the same could be done by separate execstack command
+LFLAGS += -Xlinker -z -Xlinker noexecstack
+
 LIBS += -lm -pthread
 
 # By default, link the *.o into the library, not the executable.
--- a/src/os/bsd/vm/globals_bsd.hpp	Thu May 12 20:54:46 2011 -0700
+++ b/src/os/bsd/vm/globals_bsd.hpp	Fri May 13 22:29:56 2011 -0700
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
--- a/src/os/bsd/vm/os_bsd.cpp	Thu May 12 20:54:46 2011 -0700
+++ b/src/os/bsd/vm/os_bsd.cpp	Fri May 13 22:29:56 2011 -0700
@@ -3084,19 +3084,22 @@
 
 static size_t _large_page_size = 0;
 
-bool os::large_page_init() {
-#ifdef _ALLBSD_SOURCE
-  return false;
-#else
+void os::large_page_init() {
+#ifndef _ALLBSD_SOURCE
   if (!UseLargePages) {
     UseHugeTLBFS = false;
     UseSHM = false;
-    return false;
+    return;
   }
 
   if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
-    // Our user has not expressed a preference, so we'll try both.
-    UseHugeTLBFS = UseSHM = true;
+    // If UseLargePages is specified on the command line try both methods,
+    // if it's default, then try only HugeTLBFS.
+    if (FLAG_IS_DEFAULT(UseLargePages)) {
+      UseHugeTLBFS = true;
+    } else {
+      UseHugeTLBFS = UseSHM = true;
+    }
   }
 
   if (LargePageSizeInBytes) {
@@ -3151,7 +3154,6 @@
     _page_sizes[1] = default_page_size;
     _page_sizes[2] = 0;
   }
-
   UseHugeTLBFS = UseHugeTLBFS &&
                  Bsd::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
 
@@ -3161,12 +3163,6 @@
   UseLargePages = UseHugeTLBFS || UseSHM;
 
   set_coredump_filter();
-
-  // Large page support is available on 2.6 or newer kernel, some vendors
-  // (e.g. Redhat) have backported it to their 2.4 based distributions.
-  // We optimistically assume the support is available. If later it turns out
-  // not true, VM will automatically switch to use regular page size.
-  return true;
 #endif
 }
 
@@ -4402,7 +4398,7 @@
 #endif
   }
 
-  FLAG_SET_DEFAULT(UseLargePages, os::large_page_init());
+  os::large_page_init();
 
   // initialize suspend/resume support - must do this before signal_sets_init()
   if (SR_initialize() != 0) {
--- a/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Thu May 12 20:54:46 2011 -0700
+++ b/src/os_cpu/bsd_x86/vm/orderAccess_bsd_x86.inline.hpp	Fri May 13 22:29:56 2011 -0700
@@ -93,7 +93,7 @@
 
 inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
   __asm__ volatile (  "xchgb (%2),%0"
-                    : "=r" (v)
+                    : "=q" (v)
                     : "0" (v), "r" (p)
                     : "memory");
 }
@@ -155,7 +155,7 @@
 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
 inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
   __asm__ volatile (  "xchgb (%2),%0"
-                    : "=r" (v)
+                    : "=q" (v)
                     : "0" (v), "r" (p)
                     : "memory");
 }