annotate src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp @ 1999:4fc084dac61e

7009756: volatile variables could be broken throw reflection API Summary: Use Atomic::load() and Atomic::store() to access a volatile long. Reviewed-by: iveresov, jrose, dholmes, never
author kvn
date Fri, 07 Jan 2011 10:16:57 -0800
parents f95d63e2154a
children da880ba4edf9
rev   line source
duke@0 1 /*
kvn@1999 2 * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
duke@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@0 4 *
duke@0 5 * This code is free software; you can redistribute it and/or modify it
duke@0 6 * under the terms of the GNU General Public License version 2 only, as
duke@0 7 * published by the Free Software Foundation.
duke@0 8 *
duke@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@0 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@0 13 * accompanied this code).
duke@0 14 *
duke@0 15 * You should have received a copy of the GNU General Public License version
duke@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@0 18 *
trims@1472 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1472 20 * or visit www.oracle.com if you need additional information or have any
trims@1472 21 * questions.
duke@0 22 *
duke@0 23 */
duke@0 24
stefank@1879 25 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
stefank@1879 26 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
stefank@1879 27
kvn@1999 28 #include "runtime/atomic.hpp"
stefank@1879 29 #include "runtime/orderAccess.hpp"
stefank@1879 30 #include "vm_version_x86.hpp"
stefank@1879 31
duke@0 32 // Implementation of class OrderAccess.
duke@0 33
duke@0 34 inline void OrderAccess::loadload() { acquire(); }
duke@0 35 inline void OrderAccess::storestore() { release(); }
duke@0 36 inline void OrderAccess::loadstore() { acquire(); }
duke@0 37 inline void OrderAccess::storeload() { fence(); }
duke@0 38
duke@0 39 inline void OrderAccess::acquire() {
ysr@1631 40 volatile intptr_t local_dummy;
duke@0 41 #ifdef AMD64
ysr@1631 42 __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
duke@0 43 #else
ysr@1631 44 __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
duke@0 45 #endif // AMD64
duke@0 46 }
duke@0 47
duke@0 48 inline void OrderAccess::release() {
ysr@1631 49 // Avoid hitting the same cache-line from
ysr@1631 50 // different threads.
ysr@1631 51 volatile jint local_dummy = 0;
duke@0 52 }
duke@0 53
duke@0 54 inline void OrderAccess::fence() {
duke@0 55 if (os::is_MP()) {
never@671 56 // always use locked addl since mfence is sometimes expensive
duke@0 57 #ifdef AMD64
never@671 58 __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
duke@0 59 #else
duke@0 60 __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
never@671 61 #endif
duke@0 62 }
duke@0 63 }
duke@0 64
duke@0 65 inline jbyte OrderAccess::load_acquire(volatile jbyte* p) { return *p; }
duke@0 66 inline jshort OrderAccess::load_acquire(volatile jshort* p) { return *p; }
duke@0 67 inline jint OrderAccess::load_acquire(volatile jint* p) { return *p; }
kvn@1999 68 inline jlong OrderAccess::load_acquire(volatile jlong* p) { return Atomic::load(p); }
duke@0 69 inline jubyte OrderAccess::load_acquire(volatile jubyte* p) { return *p; }
duke@0 70 inline jushort OrderAccess::load_acquire(volatile jushort* p) { return *p; }
duke@0 71 inline juint OrderAccess::load_acquire(volatile juint* p) { return *p; }
kvn@1999 72 inline julong OrderAccess::load_acquire(volatile julong* p) { return Atomic::load((volatile jlong*)p); }
duke@0 73 inline jfloat OrderAccess::load_acquire(volatile jfloat* p) { return *p; }
duke@0 74 inline jdouble OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
duke@0 75
duke@0 76 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t* p) { return *p; }
duke@0 77 inline void* OrderAccess::load_ptr_acquire(volatile void* p) { return *(void* volatile *)p; }
duke@0 78 inline void* OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
duke@0 79
duke@0 80 inline void OrderAccess::release_store(volatile jbyte* p, jbyte v) { *p = v; }
duke@0 81 inline void OrderAccess::release_store(volatile jshort* p, jshort v) { *p = v; }
duke@0 82 inline void OrderAccess::release_store(volatile jint* p, jint v) { *p = v; }
kvn@1999 83 inline void OrderAccess::release_store(volatile jlong* p, jlong v) { Atomic::store(v, p); }
duke@0 84 inline void OrderAccess::release_store(volatile jubyte* p, jubyte v) { *p = v; }
duke@0 85 inline void OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
duke@0 86 inline void OrderAccess::release_store(volatile juint* p, juint v) { *p = v; }
kvn@1999 87 inline void OrderAccess::release_store(volatile julong* p, julong v) { Atomic::store((jlong)v, (volatile jlong*)p); }
duke@0 88 inline void OrderAccess::release_store(volatile jfloat* p, jfloat v) { *p = v; }
duke@0 89 inline void OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
duke@0 90
duke@0 91 inline void OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
duke@0 92 inline void OrderAccess::release_store_ptr(volatile void* p, void* v) { *(void* volatile *)p = v; }
duke@0 93
duke@0 94 inline void OrderAccess::store_fence(jbyte* p, jbyte v) {
duke@0 95 __asm__ volatile ( "xchgb (%2),%0"
duke@0 96 : "=r" (v)
duke@0 97 : "0" (v), "r" (p)
duke@0 98 : "memory");
duke@0 99 }
duke@0 100 inline void OrderAccess::store_fence(jshort* p, jshort v) {
duke@0 101 __asm__ volatile ( "xchgw (%2),%0"
duke@0 102 : "=r" (v)
duke@0 103 : "0" (v), "r" (p)
duke@0 104 : "memory");
duke@0 105 }
duke@0 106 inline void OrderAccess::store_fence(jint* p, jint v) {
duke@0 107 __asm__ volatile ( "xchgl (%2),%0"
duke@0 108 : "=r" (v)
duke@0 109 : "0" (v), "r" (p)
duke@0 110 : "memory");
duke@0 111 }
duke@0 112
duke@0 113 inline void OrderAccess::store_fence(jlong* p, jlong v) {
duke@0 114 #ifdef AMD64
duke@0 115 __asm__ __volatile__ ("xchgq (%2), %0"
duke@0 116 : "=r" (v)
duke@0 117 : "0" (v), "r" (p)
duke@0 118 : "memory");
duke@0 119 #else
duke@0 120 *p = v; fence();
duke@0 121 #endif // AMD64
duke@0 122 }
duke@0 123
duke@0 124 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
duke@0 125 // compiler does the inlining this is simpler.
duke@0 126 inline void OrderAccess::store_fence(jubyte* p, jubyte v) { store_fence((jbyte*)p, (jbyte)v); }
duke@0 127 inline void OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
duke@0 128 inline void OrderAccess::store_fence(juint* p, juint v) { store_fence((jint*)p, (jint)v); }
duke@0 129 inline void OrderAccess::store_fence(julong* p, julong v) { store_fence((jlong*)p, (jlong)v); }
duke@0 130 inline void OrderAccess::store_fence(jfloat* p, jfloat v) { *p = v; fence(); }
duke@0 131 inline void OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
duke@0 132
duke@0 133 inline void OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
duke@0 134 #ifdef AMD64
duke@0 135 __asm__ __volatile__ ("xchgq (%2), %0"
duke@0 136 : "=r" (v)
duke@0 137 : "0" (v), "r" (p)
duke@0 138 : "memory");
duke@0 139 #else
duke@0 140 store_fence((jint*)p, (jint)v);
duke@0 141 #endif // AMD64
duke@0 142 }
duke@0 143
duke@0 144 inline void OrderAccess::store_ptr_fence(void** p, void* v) {
duke@0 145 #ifdef AMD64
duke@0 146 __asm__ __volatile__ ("xchgq (%2), %0"
duke@0 147 : "=r" (v)
duke@0 148 : "0" (v), "r" (p)
duke@0 149 : "memory");
duke@0 150 #else
duke@0 151 store_fence((jint*)p, (jint)v);
duke@0 152 #endif // AMD64
duke@0 153 }
duke@0 154
duke@0 155 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
duke@0 156 inline void OrderAccess::release_store_fence(volatile jbyte* p, jbyte v) {
duke@0 157 __asm__ volatile ( "xchgb (%2),%0"
duke@0 158 : "=r" (v)
duke@0 159 : "0" (v), "r" (p)
duke@0 160 : "memory");
duke@0 161 }
duke@0 162 inline void OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
duke@0 163 __asm__ volatile ( "xchgw (%2),%0"
duke@0 164 : "=r" (v)
duke@0 165 : "0" (v), "r" (p)
duke@0 166 : "memory");
duke@0 167 }
duke@0 168 inline void OrderAccess::release_store_fence(volatile jint* p, jint v) {
duke@0 169 __asm__ volatile ( "xchgl (%2),%0"
duke@0 170 : "=r" (v)
duke@0 171 : "0" (v), "r" (p)
duke@0 172 : "memory");
duke@0 173 }
duke@0 174
duke@0 175 inline void OrderAccess::release_store_fence(volatile jlong* p, jlong v) {
duke@0 176 #ifdef AMD64
duke@0 177 __asm__ __volatile__ ( "xchgq (%2), %0"
duke@0 178 : "=r" (v)
duke@0 179 : "0" (v), "r" (p)
duke@0 180 : "memory");
duke@0 181 #else
kvn@1999 182 release_store(p, v); fence();
duke@0 183 #endif // AMD64
duke@0 184 }
duke@0 185
duke@0 186 inline void OrderAccess::release_store_fence(volatile jubyte* p, jubyte v) { release_store_fence((volatile jbyte*)p, (jbyte)v); }
duke@0 187 inline void OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
duke@0 188 inline void OrderAccess::release_store_fence(volatile juint* p, juint v) { release_store_fence((volatile jint*)p, (jint)v); }
duke@0 189 inline void OrderAccess::release_store_fence(volatile julong* p, julong v) { release_store_fence((volatile jlong*)p, (jlong)v); }
duke@0 190
duke@0 191 inline void OrderAccess::release_store_fence(volatile jfloat* p, jfloat v) { *p = v; fence(); }
duke@0 192 inline void OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
duke@0 193
duke@0 194 inline void OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
duke@0 195 #ifdef AMD64
duke@0 196 __asm__ __volatile__ ( "xchgq (%2), %0"
duke@0 197 : "=r" (v)
duke@0 198 : "0" (v), "r" (p)
duke@0 199 : "memory");
duke@0 200 #else
duke@0 201 release_store_fence((volatile jint*)p, (jint)v);
duke@0 202 #endif // AMD64
duke@0 203 }
duke@0 204 inline void OrderAccess::release_store_ptr_fence(volatile void* p, void* v) {
duke@0 205 #ifdef AMD64
duke@0 206 __asm__ __volatile__ ( "xchgq (%2), %0"
duke@0 207 : "=r" (v)
duke@0 208 : "0" (v), "r" (p)
duke@0 209 : "memory");
duke@0 210 #else
duke@0 211 release_store_fence((volatile jint*)p, (jint)v);
duke@0 212 #endif // AMD64
duke@0 213 }
stefank@1879 214
stefank@1879 215 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP