changeset 7908:38f9f453eb12

Somehow I managed to apply the ppc64 diffs and get the double the content in all these files. Fix that by removing the second duplicate content. Pointed out by: Curtis Hamilton <hamiltcl@verizon.net>
author Greg Lewis <glewis@eyesbeyond.com>
date Fri, 17 Jun 2016 22:08:33 -0700
parents 7d6b5e9516c9
children dd5bc996e115
files make/bsd/makefiles/ppc64.make make/bsd/platform_ppc64 src/os_cpu/bsd_ppc/vm/atomic_bsd_ppc.inline.hpp src/os_cpu/bsd_ppc/vm/bytes_bsd_ppc.inline.hpp src/os_cpu/bsd_ppc/vm/globals_bsd_ppc.hpp src/os_cpu/bsd_ppc/vm/orderAccess_bsd_ppc.inline.hpp src/os_cpu/bsd_ppc/vm/os_bsd_ppc.cpp src/os_cpu/bsd_ppc/vm/os_bsd_ppc.hpp src/os_cpu/bsd_ppc/vm/prefetch_bsd_ppc.inline.hpp src/os_cpu/bsd_ppc/vm/threadLS_bsd_ppc.cpp src/os_cpu/bsd_ppc/vm/threadLS_bsd_ppc.hpp src/os_cpu/bsd_ppc/vm/thread_bsd_ppc.cpp src/os_cpu/bsd_ppc/vm/thread_bsd_ppc.hpp src/os_cpu/bsd_ppc/vm/vmStructs_bsd_ppc.hpp
diffstat 14 files changed, 0 insertions(+), 1710 deletions(-) [+]
line wrap: on
line diff
--- a/make/bsd/makefiles/ppc64.make	Fri Jun 17 22:06:31 2016 -0700
+++ b/make/bsd/makefiles/ppc64.make	Fri Jun 17 22:08:33 2016 -0700
@@ -49,54 +49,3 @@
   # Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
   CFLAGS += -mcpu=power7 -mtune=power8 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
 endif
-#
-# Copyright (c) 2004, 2013, Oracle and/or its affiliates. All rights reserved.
-# Copyright 2012, 2013 SAP AG. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-# This code is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License version 2 only, as
-# published by the Free Software Foundation.
-#
-# This code is distributed in the hope that it will be useful, but WITHOUT
-# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
-# version 2 for more details (a copy is included in the LICENSE file that
-# accompanied this code).
-#
-# You should have received a copy of the GNU General Public License version
-# 2 along with this work; if not, write to the Free Software Foundation,
-# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
-#
-# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
-# or visit www.oracle.com if you need additional information or have any
-# questions.
-#
-#
-
-# make c code know it is on a 64 bit platform.
-CFLAGS += -D_LP64=1
-
-ifeq ($(origin OPENJDK_TARGET_CPU_ENDIAN),undefined)
-  # This can happen during hotspot standalone build. Set endianness from
-  # uname. We assume build and target machines are the same.
-  OPENJDK_TARGET_CPU_ENDIAN:=$(if $(filter ppc64le,$(shell uname -m)),little,big)
-endif
-
-ifeq ($(filter $(OPENJDK_TARGET_CPU_ENDIAN),big little),)
-  $(error OPENJDK_TARGET_CPU_ENDIAN value should be 'big' or 'little')
-endif
-
-ifeq ($(OPENJDK_TARGET_CPU_ENDIAN),big)
-  # fixes `relocation truncated to fit' error for gcc 4.1.
-  CFLAGS += -mminimal-toc
-
-  # finds use ppc64 instructions, but schedule for power5
-  CFLAGS += -mcpu=powerpc64 -mtune=power5 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
-else
-  # Little endian machine uses ELFv2 ABI.
-  CFLAGS += -DVM_LITTLE_ENDIAN -DABI_ELFv2
-
-  # Use Power8, this is the first CPU to support PPC64 LE with ELFv2 ABI.
-  CFLAGS += -mcpu=power7 -mtune=power8 -minsert-sched-nops=regroup_exact -mno-multiple -mno-string
-endif
--- a/make/bsd/platform_ppc64	Fri Jun 17 22:06:31 2016 -0700
+++ b/make/bsd/platform_ppc64	Fri Jun 17 22:08:33 2016 -0700
@@ -15,20 +15,3 @@
 gnu_dis_arch = ppc64
 
 sysdefs = -DBSD -D_ALLBSD_SOURCE -D_GNU_SOURCE -DPPC64
-os_family = bsd
-
-arch = ppc
-
-arch_model = ppc_64
-
-os_arch = bsd_ppc
-
-os_arch_model = bsd_ppc_64
-
-lib_arch = ppc64
-
-compiler = gcc
-
-gnu_dis_arch = ppc64
-
-sysdefs = -DBSD -D_ALLBSD_SOURCE -D_GNU_SOURCE -DPPC64
--- a/src/os_cpu/bsd_ppc/vm/atomic_bsd_ppc.inline.hpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/atomic_bsd_ppc.inline.hpp	Fri Jun 17 22:08:33 2016 -0700
@@ -398,403 +398,3 @@
 #undef strasm_nobarrier_clobber_memory
 
 #endif // OS_CPU_BSD_PPC_VM_ATOMIC_BSD_PPC_INLINE_HPP
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_PPC_VM_ATOMIC_BSD_PPC_INLINE_HPP
-#define OS_CPU_BSD_PPC_VM_ATOMIC_BSD_PPC_INLINE_HPP
-
-#include "runtime/atomic.hpp"
-#include "runtime/os.hpp"
-#include "vm_version_ppc.hpp"
-
-#ifndef PPC64
-#error "Atomic currently only implemented for PPC64"
-#endif
-
-// Implementation of class atomic
-
-inline void Atomic::store    (jbyte    store_value, jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, void*     dest) { *(void**)dest = store_value; }
-
-inline void Atomic::store    (jbyte    store_value, volatile jbyte*    dest) { *dest = store_value; }
-inline void Atomic::store    (jshort   store_value, volatile jshort*   dest) { *dest = store_value; }
-inline void Atomic::store    (jint     store_value, volatile jint*     dest) { *dest = store_value; }
-inline void Atomic::store    (jlong    store_value, volatile jlong*    dest) { *dest = store_value; }
-inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
-inline void Atomic::store_ptr(void*    store_value, volatile void*     dest) { *(void* volatile *)dest = store_value; }
-
-inline jlong Atomic::load(volatile jlong* src) { return *src; }
-
-//
-// machine barrier instructions:
-//
-// - sync            two-way memory barrier, aka fence
-// - lwsync          orders  Store|Store,
-//                            Load|Store,
-//                            Load|Load,
-//                   but not Store|Load
-// - eieio           orders memory accesses for device memory (only)
-// - isync           invalidates speculatively executed instructions
-//                   From the POWER ISA 2.06 documentation:
-//                    "[...] an isync instruction prevents the execution of
-//                   instructions following the isync until instructions
-//                   preceding the isync have completed, [...]"
-//                   From IBM's AIX assembler reference:
-//                    "The isync [...] instructions causes the processor to
-//                   refetch any instructions that might have been fetched
-//                   prior to the isync instruction. The instruction isync
-//                   causes the processor to wait for all previous instructions
-//                   to complete. Then any instructions already fetched are
-//                   discarded and instruction processing continues in the
-//                   environment established by the previous instructions."
-//
-// semantic barrier instructions:
-// (as defined in orderAccess.hpp)
-//
-// - release         orders Store|Store,       (maps to lwsync)
-//                           Load|Store
-// - acquire         orders  Load|Store,       (maps to lwsync)
-//                           Load|Load
-// - fence           orders Store|Store,       (maps to sync)
-//                           Load|Store,
-//                           Load|Load,
-//                          Store|Load
-//
-
-#define strasm_sync                       "\n  sync    \n"
-#define strasm_lwsync                     "\n  lwsync  \n"
-#define strasm_isync                      "\n  isync   \n"
-#define strasm_release                    strasm_lwsync
-#define strasm_acquire                    strasm_lwsync
-#define strasm_fence                      strasm_sync
-#define strasm_nobarrier                  ""
-#define strasm_nobarrier_clobber_memory   ""
-
-inline jint     Atomic::add    (jint     add_value, volatile jint*     dest) {
-
-  unsigned int result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: lwarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (jint) result;
-}
-
-
-inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) {
-
-  long result;
-
-  __asm__ __volatile__ (
-    strasm_lwsync
-    "1: ldarx   %0,  0, %2    \n"
-    "   add     %0, %0, %1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_isync
-    : /*%0*/"=&r" (result)
-    : /*%1*/"r" (add_value), /*%2*/"r" (dest)
-    : "cc", "memory" );
-
-  return (intptr_t) result;
-}
-
-inline void*    Atomic::add_ptr(intptr_t add_value, volatile void*     dest) {
-  return (void*)add_ptr(add_value, (volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::inc    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0,  1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::inc_ptr(volatile void*     dest) {
-  inc_ptr((volatile intptr_t*)dest);
-}
-
-
-inline void Atomic::dec    (volatile jint*     dest) {
-
-  unsigned int temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: lwarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stwcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile intptr_t* dest) {
-
-  long temp;
-
-  __asm__ __volatile__ (
-    strasm_nobarrier
-    "1: ldarx   %0,  0, %2    \n"
-    "   addic   %0, %0, -1    \n"
-    "   stdcx.  %0,  0, %2    \n"
-    "   bne-    1b            \n"
-    strasm_nobarrier
-    : /*%0*/"=&r" (temp), "=m" (*dest)
-    : /*%2*/"r" (dest), "m" (*dest)
-    : "cc" strasm_nobarrier_clobber_memory);
-
-}
-
-inline void Atomic::dec_ptr(volatile void*     dest) {
-  dec_ptr((volatile intptr_t*)dest);
-}
-
-inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (jint) old_value;
-}
-
-inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
-
-  // Note that xchg_ptr doesn't necessarily do an acquire
-  // (see synchronizer.cpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* lwsync */
-    strasm_lwsync
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* isync */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (intptr_t) old_value;
-}
-
-inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) {
-  return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest);
-}
-
-inline jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
-  // (see atomic.hpp).
-
-  unsigned int old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* fence */
-    strasm_sync
-    /* simple guard */
-    "   lwz     %[old_value], 0(%[dest])                \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   lwarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpw    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stwcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* acquire */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (jint) old_value;
-}
-
-inline jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong* dest, jlong compare_value) {
-
-  // Note that cmpxchg guarantees a two-way memory barrier across
-  // the cmpxchg, so it's really a a 'fence_cmpxchg_acquire'
-  // (see atomic.hpp).
-
-  long old_value;
-  const uint64_t zero = 0;
-
-  __asm__ __volatile__ (
-    /* fence */
-    strasm_sync
-    /* simple guard */
-    "   ld      %[old_value], 0(%[dest])                \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    /* atomic loop */
-    "1:                                                 \n"
-    "   ldarx   %[old_value], %[dest], %[zero]          \n"
-    "   cmpd    %[compare_value], %[old_value]          \n"
-    "   bne-    2f                                      \n"
-    "   stdcx.  %[exchange_value], %[dest], %[zero]     \n"
-    "   bne-    1b                                      \n"
-    /* acquire */
-    strasm_sync
-    /* exit */
-    "2:                                                 \n"
-    /* out */
-    : [old_value]       "=&r"   (old_value),
-                        "=m"    (*dest)
-    /* in */
-    : [dest]            "b"     (dest),
-      [zero]            "r"     (zero),
-      [compare_value]   "r"     (compare_value),
-      [exchange_value]  "r"     (exchange_value),
-                        "m"     (*dest)
-    /* clobber */
-    : "cc",
-      "memory"
-    );
-
-  return (jlong) old_value;
-}
-
-inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value) {
-  return (intptr_t)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
-}
-
-inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value) {
-  return (void*)cmpxchg((jlong)exchange_value, (volatile jlong*)dest, (jlong)compare_value);
-}
-
-#undef strasm_sync
-#undef strasm_lwsync
-#undef strasm_isync
-#undef strasm_release
-#undef strasm_acquire
-#undef strasm_fence
-#undef strasm_nobarrier
-#undef strasm_nobarrier_clobber_memory
-
-#endif // OS_CPU_BSD_PPC_VM_ATOMIC_BSD_PPC_INLINE_HPP
--- a/src/os_cpu/bsd_ppc/vm/bytes_bsd_ppc.inline.hpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/bytes_bsd_ppc.inline.hpp	Fri Jun 17 22:08:33 2016 -0700
@@ -37,42 +37,3 @@
 #endif // VM_LITTLE_ENDIAN
 
 #endif // OS_CPU_BSD_PPC_VM_BYTES_BSD_PPC_INLINE_HPP
-/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2014 Google Inc.  All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_PPC_VM_BYTES_BSD_PPC_INLINE_HPP
-#define OS_CPU_BSD_PPC_VM_BYTES_BSD_PPC_INLINE_HPP
-
-#if defined(VM_LITTLE_ENDIAN)
-#include <byteswap.h>
-
-// Efficient swapping of data bytes from Java byte
-// ordering to native byte ordering and vice versa.
-inline u2 Bytes::swap_u2(u2 x) { return bswap_16(x); }
-inline u4 Bytes::swap_u4(u4 x) { return bswap_32(x); }
-inline u8 Bytes::swap_u8(u8 x) { return bswap_64(x); }
-#endif // VM_LITTLE_ENDIAN
-
-#endif // OS_CPU_BSD_PPC_VM_BYTES_BSD_PPC_INLINE_HPP
--- a/src/os_cpu/bsd_ppc/vm/globals_bsd_ppc.hpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/globals_bsd_ppc.hpp	Fri Jun 17 22:08:33 2016 -0700
@@ -52,57 +52,3 @@
 define_pd_global(bool, UseVectoredExceptions,    false);
 
 #endif // OS_CPU_BSD_PPC_VM_GLOBALS_BSD_PPC_HPP
-/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_PPC_VM_GLOBALS_BSD_PPC_HPP
-#define OS_CPU_BSD_PPC_VM_GLOBALS_BSD_PPC_HPP
-
-// Sets the default values for platform dependent flags used by the runtime system.
-// (see globals.hpp)
-
-define_pd_global(bool, DontYieldALot,            false);
-define_pd_global(intx, ThreadStackSize,          2048); // 0 => use system default
-define_pd_global(intx, VMThreadStackSize,        2048);
-
-// if we set CompilerThreadStackSize to a value different than 0, it will
-// be used in os::create_thread(). Otherwise, due the strange logic in os::create_thread(),
-// the stack size for compiler threads will default to VMThreadStackSize, although it
-// is defined to 4M in os::Bsd::default_stack_size()!
-define_pd_global(intx, CompilerThreadStackSize,  4096);
-
-// Allow extra space in DEBUG builds for asserts.
-define_pd_global(uintx,JVMInvokeMethodSlack,     8192);
-
-define_pd_global(intx, StackYellowPages,         6);
-define_pd_global(intx, StackRedPages,            1);
-define_pd_global(intx, StackShadowPages,         6 DEBUG_ONLY(+2));
-
-// Only used on 64 bit platforms
-define_pd_global(uintx,HeapBaseMinAddress,       2*G);
-// Only used on 64 bit Windows platforms
-define_pd_global(bool, UseVectoredExceptions,    false);
-
-#endif // OS_CPU_BSD_PPC_VM_GLOBALS_BSD_PPC_HPP
--- a/src/os_cpu/bsd_ppc/vm/orderAccess_bsd_ppc.inline.hpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/orderAccess_bsd_ppc.inline.hpp	Fri Jun 17 22:08:33 2016 -0700
@@ -147,152 +147,3 @@
 #undef inlasm_fence
 
 #endif // OS_CPU_BSD_PPC_VM_ORDERACCESS_BSD_PPC_INLINE_HPP
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_PPC_VM_ORDERACCESS_BSD_PPC_INLINE_HPP
-#define OS_CPU_BSD_PPC_VM_ORDERACCESS_BSD_PPC_INLINE_HPP
-
-#include "runtime/orderAccess.hpp"
-#include "vm_version_ppc.hpp"
-
-#ifndef PPC64
-#error "OrderAccess currently only implemented for PPC64"
-#endif
-
-// Implementation of class OrderAccess.
-
-//
-// Machine barrier instructions:
-//
-// - sync            Two-way memory barrier, aka fence.
-// - lwsync          orders  Store|Store,
-//                            Load|Store,
-//                            Load|Load,
-//                   but not Store|Load
-// - eieio           orders  Store|Store
-// - isync           Invalidates speculatively executed instructions,
-//                   but isync may complete before storage accesses
-//                   associated with instructions preceding isync have
-//                   been performed.
-//
-// Semantic barrier instructions:
-// (as defined in orderAccess.hpp)
-//
-// - release         orders Store|Store,       (maps to lwsync)
-//                           Load|Store
-// - acquire         orders  Load|Store,       (maps to lwsync)
-//                           Load|Load
-// - fence           orders Store|Store,       (maps to sync)
-//                           Load|Store,
-//                           Load|Load,
-//                          Store|Load
-//
-
-#define inlasm_sync()     __asm__ __volatile__ ("sync"   : : : "memory");
-#define inlasm_lwsync()   __asm__ __volatile__ ("lwsync" : : : "memory");
-#define inlasm_eieio()    __asm__ __volatile__ ("eieio"  : : : "memory");
-#define inlasm_isync()    __asm__ __volatile__ ("isync"  : : : "memory");
-#define inlasm_release()  inlasm_lwsync();
-#define inlasm_acquire()  inlasm_lwsync();
-// Use twi-isync for load_acquire (faster than lwsync).
-#define inlasm_acquire_reg(X) __asm__ __volatile__ ("twi 0,%0,0\n isync\n" : : "r" (X) : "memory");
-#define inlasm_fence()    inlasm_sync();
-
-inline void     OrderAccess::loadload()   { inlasm_lwsync();  }
-inline void     OrderAccess::storestore() { inlasm_lwsync();  }
-inline void     OrderAccess::loadstore()  { inlasm_lwsync();  }
-inline void     OrderAccess::storeload()  { inlasm_fence();   }
-
-inline void     OrderAccess::acquire()    { inlasm_acquire(); }
-inline void     OrderAccess::release()    { inlasm_release(); }
-inline void     OrderAccess::fence()      { inlasm_fence();   }
-
-inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { register jbyte t = *p;   inlasm_acquire_reg(t); return t; }
-inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { register jshort t = *p;  inlasm_acquire_reg(t); return t; }
-inline jint     OrderAccess::load_acquire(volatile jint*    p) { register jint t = *p;    inlasm_acquire_reg(t); return t; }
-inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { register jlong t = *p;   inlasm_acquire_reg(t); return t; }
-inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { register jubyte t = *p;  inlasm_acquire_reg(t); return t; }
-inline jushort  OrderAccess::load_acquire(volatile jushort* p) { register jushort t = *p; inlasm_acquire_reg(t); return t; }
-inline juint    OrderAccess::load_acquire(volatile juint*   p) { register juint t = *p;   inlasm_acquire_reg(t); return t; }
-inline julong   OrderAccess::load_acquire(volatile julong*  p) { return (julong)load_acquire((volatile jlong*)p); }
-inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { register jfloat t = *p;  inlasm_acquire(); return t; }
-inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { register jdouble t = *p; inlasm_acquire(); return t; }
-
-inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return (intptr_t)load_acquire((volatile jlong*)p); }
-inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return (void*)   load_acquire((volatile jlong*)p); }
-inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return (void*)   load_acquire((volatile jlong*)p); }
-
-inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { inlasm_release(); *p = v; }
-inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { inlasm_release(); *p = v; }
-inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { inlasm_release(); *p = v; }
-inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { inlasm_release(); *p = v; }
-inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { inlasm_release(); *p = v; }
-inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { inlasm_release(); *p = v; }
-inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { inlasm_release(); *p = v; }
-inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { inlasm_release(); *p = v; }
-inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { inlasm_release(); *p = v; }
-inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; }
-
-inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; }
-inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { inlasm_release(); *(void* volatile *)p = v; }
-
-inline void     OrderAccess::store_fence(jbyte*   p, jbyte   v) { *p = v; inlasm_fence(); }
-inline void     OrderAccess::store_fence(jshort*  p, jshort  v) { *p = v; inlasm_fence(); }
-inline void     OrderAccess::store_fence(jint*    p, jint    v) { *p = v; inlasm_fence(); }
-inline void     OrderAccess::store_fence(jlong*   p, jlong   v) { *p = v; inlasm_fence(); }
-inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { *p = v; inlasm_fence(); }
-inline void     OrderAccess::store_fence(jushort* p, jushort v) { *p = v; inlasm_fence(); }
-inline void     OrderAccess::store_fence(juint*   p, juint   v) { *p = v; inlasm_fence(); }
-inline void     OrderAccess::store_fence(julong*  p, julong  v) { *p = v; inlasm_fence(); }
-inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; inlasm_fence(); }
-inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; inlasm_fence(); }
-
-inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) { *p = v; inlasm_fence(); }
-inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) { *p = v; inlasm_fence(); }
-
-inline void     OrderAccess::release_store_fence(volatile jbyte*   p, jbyte   v) { inlasm_release(); *p = v; inlasm_fence(); }
-inline void     OrderAccess::release_store_fence(volatile jshort*  p, jshort  v) { inlasm_release(); *p = v; inlasm_fence(); }
-inline void     OrderAccess::release_store_fence(volatile jint*    p, jint    v) { inlasm_release(); *p = v; inlasm_fence(); }
-inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) { inlasm_release(); *p = v; inlasm_fence(); }
-inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { inlasm_release(); *p = v; inlasm_fence(); }
-inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { inlasm_release(); *p = v; inlasm_fence(); }
-inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { inlasm_release(); *p = v; inlasm_fence(); }
-inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { inlasm_release(); *p = v; inlasm_fence(); }
-inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { inlasm_release(); *p = v; inlasm_fence(); }
-inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { inlasm_release(); *p = v; inlasm_fence(); }
-
-inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) { inlasm_release(); *p = v; inlasm_fence(); }
-inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) { inlasm_release(); *(void* volatile *)p = v; inlasm_fence(); }
-
-#undef inlasm_sync
-#undef inlasm_lwsync
-#undef inlasm_eieio
-#undef inlasm_isync
-#undef inlasm_release
-#undef inlasm_acquire
-#undef inlasm_fence
-
-#endif // OS_CPU_BSD_PPC_VM_ORDERACCESS_BSD_PPC_INLINE_HPP
--- a/src/os_cpu/bsd_ppc/vm/os_bsd_ppc.cpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/os_bsd_ppc.cpp	Fri Jun 17 22:08:33 2016 -0700
@@ -664,669 +664,3 @@
   assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
 }
 #endif
-/*
- * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-// no precompiled headers
-#include "assembler_ppc.inline.hpp"
-#include "classfile/classLoader.hpp"
-#include "classfile/systemDictionary.hpp"
-#include "classfile/vmSymbols.hpp"
-#include "code/icBuffer.hpp"
-#include "code/vtableStubs.hpp"
-#include "interpreter/interpreter.hpp"
-#include "jvm_bsd.h"
-#include "memory/allocation.inline.hpp"
-#include "mutex_bsd.inline.hpp"
-#include "nativeInst_ppc.hpp"
-#include "os_share_bsd.hpp"
-#include "prims/jniFastGetField.hpp"
-#include "prims/jvm.h"
-#include "prims/jvm_misc.hpp"
-#include "runtime/arguments.hpp"
-#include "runtime/extendedPC.hpp"
-#include "runtime/frame.inline.hpp"
-#include "runtime/interfaceSupport.hpp"
-#include "runtime/java.hpp"
-#include "runtime/javaCalls.hpp"
-#include "runtime/mutexLocker.hpp"
-#include "runtime/osThread.hpp"
-#include "runtime/sharedRuntime.hpp"
-#include "runtime/stubRoutines.hpp"
-#include "runtime/thread.inline.hpp"
-#include "runtime/timer.hpp"
-#include "utilities/events.hpp"
-#include "utilities/vmError.hpp"
-#ifdef COMPILER1
-#include "c1/c1_Runtime1.hpp"
-#endif
-#ifdef COMPILER2
-#include "opto/runtime.hpp"
-#endif
-
-// put OS-includes here
-# include <sys/types.h>
-# include <sys/mman.h>
-# include <pthread.h>
-# include <signal.h>
-# include <errno.h>
-# include <dlfcn.h>
-# include <stdlib.h>
-# include <stdio.h>
-# include <unistd.h>
-# include <sys/resource.h>
-# include <pthread_np.h>
-# include <sys/stat.h>
-# include <sys/time.h>
-# include <sys/utsname.h>
-# include <sys/socket.h>
-# include <sys/wait.h>
-# include <pwd.h>
-# include <poll.h>
-# include <ucontext.h>
-
-
-address os::current_stack_pointer() {
-  intptr_t* csp;
-
-  // inline assembly `mr regno(csp), R1_SP':
-  __asm__ __volatile__ ("mr %0, 1":"=r"(csp):);
-
-  return (address) csp;
-}
-
-char* os::non_memory_address_word() {
-  // Must never look like an address returned by reserve_memory,
-  // even in its subfields (as defined by the CPU immediate fields,
-  // if the CPU splits constants across multiple instructions).
-
-  return (char*) -1;
-}
-
-void os::initialize_thread(Thread *thread) { }
-
-address os::Bsd::ucontext_get_pc(ucontext_t * uc) {
-  guarantee(uc->uc_mcontext.mc_gpr != NULL, "only use ucontext_get_pc in sigaction context");
-  return (address)uc->uc_mcontext.mc_srr0;
-}
-
-intptr_t* os::Bsd::ucontext_get_sp(ucontext_t * uc) {
-  return (intptr_t*)uc->uc_mcontext.mc_gpr[1/*REG_SP*/];
-}
-
-intptr_t* os::Bsd::ucontext_get_fp(ucontext_t * uc) {
-  return NULL;
-}
-
-ExtendedPC os::fetch_frame_from_context(void* ucVoid,
-                    intptr_t** ret_sp, intptr_t** ret_fp) {
-
-  ExtendedPC  epc;
-  ucontext_t* uc = (ucontext_t*)ucVoid;
-
-  if (uc != NULL) {
-    epc = ExtendedPC(os::Bsd::ucontext_get_pc(uc));
-    if (ret_sp) *ret_sp = os::Bsd::ucontext_get_sp(uc);
-    if (ret_fp) *ret_fp = os::Bsd::ucontext_get_fp(uc);
-  } else {
-    // construct empty ExtendedPC for return value checking
-    epc = ExtendedPC(NULL);
-    if (ret_sp) *ret_sp = (intptr_t *)NULL;
-    if (ret_fp) *ret_fp = (intptr_t *)NULL;
-  }
-
-  return epc;
-}
-
-frame os::fetch_frame_from_context(void* ucVoid) {
-  intptr_t* sp;
-  intptr_t* fp;
-  ExtendedPC epc = fetch_frame_from_context(ucVoid, &sp, &fp);
-  return frame(sp, epc.pc());
-}
-
-frame os::get_sender_for_C_frame(frame* fr) {
-  if (*fr->sp() == 0) {
-    // fr is the last C frame
-    return frame(NULL, NULL);
-  }
-  return frame(fr->sender_sp(), fr->sender_pc());
-}
-
-
-frame os::current_frame() {
-  intptr_t* csp = (intptr_t*) *((intptr_t*) os::current_stack_pointer());
-  // hack.
-  frame topframe(csp, (address)0x8);
-  // return sender of current topframe which hopefully has pc != NULL.
-  return os::get_sender_for_C_frame(&topframe);
-}
-
-// Utility functions
-
-extern "C" JNIEXPORT int
-JVM_handle_bsd_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrecognized) {
-
-  ucontext_t* uc = (ucontext_t*) ucVoid;
-
-  Thread* t = ThreadLocalStorage::get_thread_slow();   // slow & steady
-
-  SignalHandlerMark shm(t);
-
-  // Note: it's not uncommon that JNI code uses signal/sigset to install
-  // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
-  // or have a SIGILL handler when detecting CPU type). When that happens,
-  // JVM_handle_bsd_signal() might be invoked with junk info/ucVoid. To
-  // avoid unnecessary crash when libjsig is not preloaded, try handle signals
-  // that do not require siginfo/ucontext first.
-
-  if (sig == SIGPIPE) {
-    if (os::Bsd::chained_handler(sig, info, ucVoid)) {
-      return 1;
-    } else {
-      if (PrintMiscellaneous && (WizardMode || Verbose)) {
-        warning("Ignoring SIGPIPE - see bug 4229104");
-      }
-      return 1;
-    }
-  }
-
-  JavaThread* thread = NULL;
-  VMThread* vmthread = NULL;
-  if (os::Bsd::signal_handlers_are_installed) {
-    if (t != NULL) {
-      if(t->is_Java_thread()) {
-        thread = (JavaThread*)t;
-      }
-      else if(t->is_VM_thread()) {
-        vmthread = (VMThread *)t;
-      }
-    }
-  }
-
-  // Decide if this trap can be handled by a stub.
-  address stub = NULL;
-
-  // retrieve program counter
-  address const pc = uc ? os::Bsd::ucontext_get_pc(uc) : NULL;
-
-  // retrieve crash address
-  address const addr = info ? (const address) info->si_addr : NULL;
-
-  // SafeFetch 32 handling:
-  // - make it work if _thread is null
-  // - make it use the standard os::...::ucontext_get/set_pc APIs
-  if (uc) {
-    address const pc = os::Bsd::ucontext_get_pc(uc);
-    if (pc && StubRoutines::is_safefetch_fault(pc)) {
-      uc->uc_mcontext.mc_srr0 = (unsigned long)StubRoutines::continuation_for_safefetch_fault(pc);
-      return true;
-    }
-  }
-
-  // Handle SIGDANGER right away. AIX would raise SIGDANGER whenever available swap
-  // space falls below 30%. This is only a chance for the process to gracefully abort.
-  // We can't hope to proceed after SIGDANGER since SIGKILL tailgates.
-  // if (sig == SIGDANGER) {
-  //  goto report_and_die;
-  // }
-
-  if (info == NULL || uc == NULL || thread == NULL && vmthread == NULL) {
-    goto run_chained_handler;
-  }
-
-  // If we are a java thread...
-  if (thread != NULL) {
-
-    // Handle ALL stack overflow variations here
-    if (sig == SIGSEGV && (addr < thread->stack_base() &&
-                           addr >= thread->stack_base() - thread->stack_size())) {
-      // stack overflow
-      //
-      // If we are in a yellow zone and we are inside java, we disable the yellow zone and
-      // throw a stack overflow exception.
-      // If we are in native code or VM C code, we report-and-die. The original coding tried
-      // to continue with yellow zone disabled, but that doesn't buy us much and prevents
-      // hs_err_pid files.
-      if (thread->in_stack_yellow_zone(addr)) {
-        thread->disable_stack_yellow_zone();
-        if (thread->thread_state() == _thread_in_Java) {
-          // Throw a stack overflow exception.
-          // Guard pages will be reenabled while unwinding the stack.
-          stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
-          goto run_stub;
-        } else {
-          // Thread was in the vm or native code. Return and try to finish.
-          return 1;
-        }
-      } else if (thread->in_stack_red_zone(addr)) {
-        // Fatal red zone violation. Disable the guard pages and fall through
-        // to handle_unexpected_exception way down below.
-        thread->disable_stack_red_zone();
-        tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
-        goto report_and_die;
-      } else {
-        // This means a segv happened inside our stack, but not in
-        // the guarded zone. I'd like to know when this happens,
-        tty->print_raw_cr("SIGSEGV happened inside stack but outside yellow and red zone.");
-        goto report_and_die;
-      }
-
-    } // end handle SIGSEGV inside stack boundaries
-
-    if (thread->thread_state() == _thread_in_Java) {
-      // Java thread running in Java code
-
-      // The following signals are used for communicating VM events:
-      //
-      // SIGILL: the compiler generates illegal opcodes
-      //   at places where it wishes to interrupt the VM:
-      //   Safepoints, Unreachable Code, Entry points of Zombie methods,
-      //    This results in a SIGILL with (*pc) == inserted illegal instruction.
-      //
-      //   (so, SIGILLs with a pc inside the zero page are real errors)
-      //
-      // SIGTRAP:
-      //   The ppc trap instruction raises a SIGTRAP and is very efficient if it
-      //   does not trap. It is used for conditional branches that are expected
-      //   to be never taken. These are:
-      //     - zombie methods
-      //     - IC (inline cache) misses.
-      //     - null checks leading to UncommonTraps.
-      //     - range checks leading to Uncommon Traps.
-      //   On Bsd, these are especially null checks, as the ImplicitNullCheck
-      //   optimization works only in rare cases, as the page at address 0 is only
-      //   write protected.      //
-      //   Note: !UseSIGTRAP is used to prevent SIGTRAPS altogether, to facilitate debugging.
-      //
-      // SIGSEGV:
-      //   used for safe point polling:
-      //     To notify all threads that they have to reach a safe point, safe point polling is used:
-      //     All threads poll a certain mapped memory page. Normally, this page has read access.
-      //     If the VM wants to inform the threads about impending safe points, it puts this
-      //     page to read only ("poisens" the page), and the threads then reach a safe point.
-      //   used for null checks:
-      //     If the compiler finds a store it uses it for a null check. Unfortunately this
-      //     happens rarely.  In heap based and disjoint base compressd oop modes also loads
-      //     are used for null checks.
-
-      // A VM-related SIGILL may only occur if we are not in the zero page.
-      // On AIX, we get a SIGILL if we jump to 0x0 or to somewhere else
-      // in the zero page, because it is filled with 0x0. We ignore
-      // explicit SIGILLs in the zero page.
-      if (sig == SIGILL && (pc < (address) 0x200)) {
-        if (TraceTraps) {
-          tty->print_raw_cr("SIGILL happened inside zero page.");
-        }
-        goto report_and_die;
-      }
-
-      // Handle signal from NativeJump::patch_verified_entry().
-      if (( TrapBasedNotEntrantChecks && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
-          (!TrapBasedNotEntrantChecks && sig == SIGILL  && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
-        if (TraceTraps) {
-          tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
-        }
-        stub = SharedRuntime::get_handle_wrong_method_stub();
-        goto run_stub;
-      }
-
-      else if (sig == SIGSEGV && os::is_poll_address(addr)) {
-        if (TraceTraps) {
-          tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc);
-        }
-        stub = SharedRuntime::get_poll_stub(pc);
-        goto run_stub;
-      }
-
-      // SIGTRAP-based ic miss check in compiled code.
-      else if (sig == SIGTRAP && TrapBasedICMissChecks &&
-               nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) {
-        if (TraceTraps) {
-          tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
-        }
-        stub = SharedRuntime::get_ic_miss_stub();
-        goto run_stub;
-      }
-
-      // SIGTRAP-based implicit null check in compiled code.
-      else if (sig == SIGTRAP && TrapBasedNullChecks &&
-               nativeInstruction_at(pc)->is_sigtrap_null_check()) {
-        if (TraceTraps) {
-          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
-        }
-        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
-        goto run_stub;
-      }
-
-      // SIGSEGV-based implicit null check in compiled code.
-      else if (sig == SIGSEGV && ImplicitNullChecks &&
-               CodeCache::contains((void*) pc) &&
-               !MacroAssembler::needs_explicit_null_check((intptr_t) info->si_addr)) {
-        if (TraceTraps) {
-          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc);
-        }
-        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
-      }
-
-#ifdef COMPILER2
-      // SIGTRAP-based implicit range check in compiled code.
-      else if (sig == SIGTRAP && TrapBasedRangeChecks &&
-               nativeInstruction_at(pc)->is_sigtrap_range_check()) {
-        if (TraceTraps) {
-          tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
-        }
-        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
-        goto run_stub;
-      }
-#endif
-
-      else if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
-        if (TraceTraps) {
-          tty->print_raw_cr("Fix SIGFPE handler, trying divide by zero handler.");
-        }
-        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
-        goto run_stub;
-      }
-
-      else if (sig == SIGBUS) {
-        // BugId 4454115: A read from a MappedByteBuffer can fault here if the
-        // underlying file has been truncated. Do not crash the VM in such a case.
-        CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
-        nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
-        if (nm != NULL && nm->has_unsafe_access()) {
-          // We don't really need a stub here! Just set the pending exeption and
-          // continue at the next instruction after the faulting read. Returning
-          // garbage from this read is ok.
-          thread->set_pending_unsafe_access_error();
-          uc->uc_mcontext.mc_srr0 = ((unsigned long)pc) + 4;
-          return 1;
-        }
-      }
-    }
-
-    else { // thread->thread_state() != _thread_in_Java
-      // Detect CPU features. This is only done at the very start of the VM. Later, the
-      // VM_Version::is_determine_features_test_running() flag should be false.
-
-      if (sig == SIGILL && VM_Version::is_determine_features_test_running()) {
-        // SIGILL must be caused by VM_Version::determine_features().
-        *(int *)pc = 0; // patch instruction to 0 to indicate that it causes a SIGILL,
-                        // flushing of icache is not necessary.
-        stub = pc + 4;  // continue with next instruction.
-        goto run_stub;
-      }
-      else if (thread->thread_state() == _thread_in_vm &&
-               sig == SIGBUS && thread->doing_unsafe_access()) {
-        // We don't really need a stub here! Just set the pending exeption and
-        // continue at the next instruction after the faulting read. Returning
-        // garbage from this read is ok.
-        thread->set_pending_unsafe_access_error();
-        uc->uc_mcontext.mc_srr0 = ((unsigned long)pc) + 4;
-        return 1;
-      }
-    }
-
-    // Check to see if we caught the safepoint code in the
-    // process of write protecting the memory serialization page.
-    // It write enables the page immediately after protecting it
-    // so we can just return to retry the write.
-    if ((sig == SIGSEGV) &&
-        os::is_memory_serialize_page(thread, addr)) {
-      // Synchronization problem in the pseudo memory barrier code (bug id 6546278)
-      // Block current thread until the memory serialize page permission restored.
-      os::block_on_serialize_page_trap();
-      return true;
-    }
-  }
-
-run_stub:
-
-  // One of the above code blocks ininitalized the stub, so we want to
-  // delegate control to that stub.
-  if (stub != NULL) {
-    // Save all thread context in case we need to restore it.
-    if (thread != NULL) thread->set_saved_exception_pc(pc);
-    uc->uc_mcontext.mc_srr0 = (unsigned long)stub;
-    return 1;
-  }
-
-run_chained_handler:
-
-  // signal-chaining
-  if (os::Bsd::chained_handler(sig, info, ucVoid)) {
-    return 1;
-  }
-  if (!abort_if_unrecognized) {
-    // caller wants another chance, so give it to him
-    return 0;
-  }
-
-report_and_die:
-
-  // Use sigthreadmask instead of sigprocmask on AIX and unmask current signal.
-  sigset_t newset;
-  sigemptyset(&newset);
-  sigaddset(&newset, sig);
-  sigprocmask(SIG_UNBLOCK, &newset, NULL);
-
-  VMError err(t, sig, pc, info, ucVoid);
-  err.report_and_die();
-
-  ShouldNotReachHere();
-  return 0;
-}
-
-void os::Bsd::init_thread_fpu_state(void) {
-  // Disable FP exceptions.
-  __asm__ __volatile__ ("mtfsfi 6,0");
-}
-
-///////////////////////////////////////////////////////////////////////////////
-// thread stack
-
-size_t os::Bsd::min_stack_allowed = 128*K;
-
-bool os::Bsd::supports_variable_stack_size() { return true; }
-
-// return default stack size for thr_type
-size_t os::Bsd::default_stack_size(os::ThreadType thr_type) {
-  // default stack size (compiler thread needs larger stack)
-  // Notice that the setting for compiler threads here have no impact
-  // because of the strange 'fallback logic' in os::create_thread().
-  // Better set CompilerThreadStackSize in globals_<os_cpu>.hpp if you want to
-  // specify a different stack size for compiler threads!
-  size_t s = (thr_type == os::compiler_thread ? 4 * M : 1024 * K);
-  return s;
-}
-
-size_t os::Bsd::default_guard_size(os::ThreadType thr_type) {
-  return 2 * page_size();
-}
-
-// Java thread:
-//
-//   Low memory addresses
-//    +------------------------+
-//    |                        |\  JavaThread created by VM does not have glibc
-//    |    glibc guard page    | - guard, attached Java thread usually has
-//    |                        |/  1 page glibc guard.
-// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
-//    |                        |\
-//    |  HotSpot Guard Pages   | - red and yellow pages
-//    |                        |/
-//    +------------------------+ JavaThread::stack_yellow_zone_base()
-//    |                        |\
-//    |      Normal Stack      | -
-//    |                        |/
-// P2 +------------------------+ Thread::stack_base()
-//
-// Non-Java thread:
-//
-//   Low memory addresses
-//    +------------------------+
-//    |                        |\
-//    |  glibc guard page      | - usually 1 page
-//    |                        |/
-// P1 +------------------------+ Thread::stack_base() - Thread::stack_size()
-//    |                        |\
-//    |      Normal Stack      | -
-//    |                        |/
-// P2 +------------------------+ Thread::stack_base()
-//
-// ** P1 (aka bottom) and size ( P2 = P1 - size) are the address and stack size returned from
-//    pthread_attr_getstack()
-
-static void current_stack_region(address * bottom, size_t * size) {
-#ifdef __APPLE__
-  pthread_t self = pthread_self();
-  void *stacktop = pthread_get_stackaddr_np(self);
-  *size = pthread_get_stacksize_np(self);
-  // workaround for OS X 10.9.0 (Mavericks)
-  // pthread_get_stacksize_np returns 128 pages even though the actual size is 2048 pages
-  if (pthread_main_np() == 1) {
-    if ((*size) < (DEFAULT_MAIN_THREAD_STACK_PAGES * (size_t)getpagesize())) {
-      char kern_osrelease[256];
-      size_t kern_osrelease_size = sizeof(kern_osrelease);
-      int ret = sysctlbyname("kern.osrelease", kern_osrelease, &kern_osrelease_size, NULL, 0);
-      if (ret == 0) {
-        // get the major number, atoi will ignore the minor amd micro portions of the version string
-        if (atoi(kern_osrelease) >= OS_X_10_9_0_KERNEL_MAJOR_VERSION) {
-          *size = (DEFAULT_MAIN_THREAD_STACK_PAGES*getpagesize());
-        }
-      }
-    }
-  }
-  *bottom = (address) stacktop - *size;
-#elif defined(__OpenBSD__)
-  stack_t ss;
-  int rslt = pthread_stackseg_np(pthread_self(), &ss);
-
-  if (rslt != 0)
-    fatal(err_msg("pthread_stackseg_np failed with err = %d", rslt));
-
-  *bottom = (address)((char *)ss.ss_sp - ss.ss_size);
-  *size   = ss.ss_size;
-#else
-  pthread_attr_t attr;
-
-  int rslt = pthread_attr_init(&attr);
-
-  // JVM needs to know exact stack location, abort if it fails
-  if (rslt != 0)
-    fatal(err_msg("pthread_attr_init failed with err = %d", rslt));
-
-  rslt = pthread_attr_get_np(pthread_self(), &attr);
-
-  if (rslt != 0)
-    fatal(err_msg("pthread_attr_get_np failed with err = %d", rslt));
-
-  if (pthread_attr_getstackaddr(&attr, (void **)bottom) != 0 ||
-    pthread_attr_getstacksize(&attr, size) != 0) {
-    fatal("Can not locate current stack attributes!");
-  }
-
-  pthread_attr_destroy(&attr);
-#endif
-  assert(os::current_stack_pointer() >= *bottom &&
-         os::current_stack_pointer() < *bottom + *size, "just checking");
-}
-
-address os::current_stack_base() {
-  address bottom;
-  size_t size;
-  current_stack_region(&bottom, &size);
-  return (bottom + size);
-}
-
-size_t os::current_stack_size() {
-  // stack size includes normal stack and HotSpot guard pages
-  address bottom;
-  size_t size;
-  current_stack_region(&bottom, &size);
-  return size;
-}
-
-/////////////////////////////////////////////////////////////////////////////
-// helper functions for fatal error handler
-
-void os::print_context(outputStream *st, void *context) {
-  if (context == NULL) return;
-
-  ucontext_t* uc = (ucontext_t*)context;
-
-  st->print_cr("Registers:");
-  st->print("pc =" INTPTR_FORMAT "  ", uc->uc_mcontext.mc_srr0);
-  st->print("lr =" INTPTR_FORMAT "  ", uc->uc_mcontext.mc_lr);
-  st->print("ctr=" INTPTR_FORMAT "  ", uc->uc_mcontext.mc_ctr);
-  st->cr();
-  for (int i = 0; i < 32; i++) {
-    st->print("r%-2d=" INTPTR_FORMAT "  ", i, uc->uc_mcontext.mc_gpr[i]);
-    if (i % 3 == 2) st->cr();
-  }
-  st->cr();
-  st->cr();
-
-  intptr_t *sp = (intptr_t *)os::Bsd::ucontext_get_sp(uc);
-  st->print_cr("Top of Stack: (sp=" PTR_FORMAT ")", p2i(sp));
-  print_hex_dump(st, (address)sp, (address)(sp + 128), sizeof(intptr_t));
-  st->cr();
-
-  // Note: it may be unsafe to inspect memory near pc. For example, pc may
-  // point to garbage if entry point in an nmethod is corrupted. Leave
-  // this at the end, and hope for the best.
-  address pc = os::Bsd::ucontext_get_pc(uc);
-  st->print_cr("Instructions: (pc=" PTR_FORMAT ")", p2i(pc));
-  print_hex_dump(st, pc - 64, pc + 64, /*instrsize=*/4);
-  st->cr();
-}
-
-void os::print_register_info(outputStream *st, void *context) {
-  if (context == NULL) return;
-
-  ucontext_t *uc = (ucontext_t*)context;
-
-  st->print_cr("Register to memory mapping:");
-  st->cr();
-
-  // this is only for the "general purpose" registers
-  for (int i = 0; i < 32; i++) {
-    st->print("r%-2d=", i);
-    print_location(st, uc->uc_mcontext.mc_gpr[i]);
-  }
-  st->cr();
-}
-
-extern "C" {
-  int SpinPause() {
-    return 0;
-  }
-}
-
-#ifndef PRODUCT
-void os::verify_stack_alignment() {
-  assert(((intptr_t)os::current_stack_pointer() & (StackAlignmentInBytes-1)) == 0, "incorrect stack alignment");
-}
-#endif
--- a/src/os_cpu/bsd_ppc/vm/os_bsd_ppc.hpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/os_bsd_ppc.hpp	Fri Jun 17 22:08:33 2016 -0700
@@ -33,38 +33,3 @@
   static bool register_code_area(char *low, char *high) { return true; }
 
 #endif // OS_CPU_BSD_PPC_VM_OS_BSD_PPC_HPP
-/*
- * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_PPC_VM_OS_BSD_PPC_HPP
-#define OS_CPU_BSD_PPC_VM_OS_BSD_PPC_HPP
-
-  static void setup_fpu() {}
-
-  // Used to register dynamic code cache area with the OS
-  // Note: Currently only used in 64 bit Windows implementations
-  static bool register_code_area(char *low, char *high) { return true; }
-
-#endif // OS_CPU_BSD_PPC_VM_OS_BSD_PPC_HPP
--- a/src/os_cpu/bsd_ppc/vm/prefetch_bsd_ppc.inline.hpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/prefetch_bsd_ppc.inline.hpp	Fri Jun 17 22:08:33 2016 -0700
@@ -48,53 +48,3 @@
 }
 
 #endif // OS_CPU_BSD_PPC_VM_PREFETCH_BSD_PPC_INLINE_HPP
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_PPC_VM_PREFETCH_BSD_PPC_INLINE_HPP
-#define OS_CPU_BSD_PPC_VM_PREFETCH_BSD_PPC_INLINE_HPP
-
-#include "runtime/prefetch.hpp"
-
-
-inline void Prefetch::read(void *loc, intx interval) {
-  __asm__ __volatile__ (
-    "   dcbt   0, %0       \n"
-    :
-    : /*%0*/"r" ( ((address)loc) +((long)interval) )
-    //:
-    );
-}
-
-inline void Prefetch::write(void *loc, intx interval) {
-  __asm__ __volatile__ (
-    "   dcbtst 0, %0       \n"
-    :
-    : /*%0*/"r" ( ((address)loc) +((long)interval) )
-    //:
-    );
-}
-
-#endif // OS_CPU_BSD_PPC_VM_PREFETCH_BSD_PPC_INLINE_HPP
--- a/src/os_cpu/bsd_ppc/vm/threadLS_bsd_ppc.cpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/threadLS_bsd_ppc.cpp	Fri Jun 17 22:08:33 2016 -0700
@@ -37,42 +37,3 @@
 void ThreadLocalStorage::pd_set_thread(Thread* thread) {
   os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
 }
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/threadLocalStorage.hpp"
-
-void ThreadLocalStorage::generate_code_for_get_thread() {
-    // nothing we can do here for user-level thread
-}
-
-void ThreadLocalStorage::pd_init() {
-  // Nothing to do
-}
-
-void ThreadLocalStorage::pd_set_thread(Thread* thread) {
-  os::thread_local_storage_at_put(ThreadLocalStorage::thread_index(), thread);
-}
--- a/src/os_cpu/bsd_ppc/vm/threadLS_bsd_ppc.hpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/threadLS_bsd_ppc.hpp	Fri Jun 17 22:08:33 2016 -0700
@@ -34,39 +34,3 @@
   }
 
 #endif // OS_CPU_BSD_PPC_VM_THREADLS_BSD_PPC_HPP
-/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_PPC_VM_THREADLS_BSD_PPC_HPP
-#define OS_CPU_BSD_PPC_VM_THREADLS_BSD_PPC_HPP
-
-  // Processor dependent parts of ThreadLocalStorage
-
-public:
-  static Thread* thread() {
-    return (Thread *) os::thread_local_storage_at(thread_index());
-  }
-
-#endif // OS_CPU_BSD_PPC_VM_THREADLS_BSD_PPC_HPP
--- a/src/os_cpu/bsd_ppc/vm/thread_bsd_ppc.cpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/thread_bsd_ppc.cpp	Fri Jun 17 22:08:33 2016 -0700
@@ -34,39 +34,3 @@
 }
 
 void JavaThread::cache_global_variables() { }
-/*
- * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2014 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#include "precompiled.hpp"
-#include "runtime/frame.hpp"
-#include "runtime/thread.hpp"
-
-// Forte Analyzer AsyncGetCallTrace profiling support is not implemented on Bsd/PPC.
-bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava) {
-  Unimplemented();
-  return false;
-}
-
-void JavaThread::cache_global_variables() { }
--- a/src/os_cpu/bsd_ppc/vm/thread_bsd_ppc.hpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/thread_bsd_ppc.hpp	Fri Jun 17 22:08:33 2016 -0700
@@ -81,86 +81,3 @@
   intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
 
 #endif // OS_CPU_BSD_PPC_VM_THREAD_BSD_PPC_HPP
-/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_PPC_VM_THREAD_BSD_PPC_HPP
-#define OS_CPU_BSD_PPC_VM_THREAD_BSD_PPC_HPP
-
- private:
-
-  void pd_initialize() {
-    _anchor.clear();
-    _last_interpreter_fp = NULL;
-  }
-
-  // The `last' frame is the youngest Java frame on the thread's stack.
-  frame pd_last_frame() {
-    assert(has_last_Java_frame(), "must have last_Java_sp() when suspended");
-
-    intptr_t* sp = last_Java_sp();
-    address pc = _anchor.last_Java_pc();
-
-    // Last_Java_pc ist not set, if we come here from compiled code.
-    if (pc == NULL) {
-      pc = (address) *(sp + 2);
-    }
-
-    return frame(sp, pc);
-  }
-
- public:
-
-  void set_base_of_stack_pointer(intptr_t* base_sp) {}
-  intptr_t* base_of_stack_pointer() { return NULL; }
-  void record_base_of_stack_pointer() {}
-
-  // These routines are only used on cpu architectures that
-  // have separate register stacks (Itanium).
-  static bool register_stack_overflow() { return false; }
-  static void enable_register_stack_guard() {}
-  static void disable_register_stack_guard() {}
-
-  bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext, bool isInJava);
-
- protected:
-
-  // -Xprof support
-  //
-  // In order to find the last Java fp from an async profile
-  // tick, we store the current interpreter fp in the thread.
-  // This value is only valid while we are in the C++ interpreter
-  // and profiling.
-  intptr_t *_last_interpreter_fp;
-
- public:
-
-  static ByteSize last_interpreter_fp_offset() {
-    return byte_offset_of(JavaThread, _last_interpreter_fp);
-  }
-
-  intptr_t* last_interpreter_fp() { return _last_interpreter_fp; }
-
-#endif // OS_CPU_BSD_PPC_VM_THREAD_BSD_PPC_HPP
--- a/src/os_cpu/bsd_ppc/vm/vmStructs_bsd_ppc.hpp	Fri Jun 17 22:06:31 2016 -0700
+++ b/src/os_cpu/bsd_ppc/vm/vmStructs_bsd_ppc.hpp	Fri Jun 17 22:08:33 2016 -0700
@@ -53,58 +53,3 @@
 #define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
 
 #endif // OS_CPU_BSD_PPC_VM_VMSTRUCTS_BSD_PPC_HPP
-/*
- * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2012, 2013 SAP AG. All rights reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
- * or visit www.oracle.com if you need additional information or have any
- * questions.
- *
- */
-
-#ifndef OS_CPU_BSD_PPC_VM_VMSTRUCTS_BSD_PPC_HPP
-#define OS_CPU_BSD_PPC_VM_VMSTRUCTS_BSD_PPC_HPP
-
-// These are the OS and CPU-specific fields, types and integer
-// constants required by the Serviceability Agent. This file is
-// referenced by vmStructs.cpp.
-
-#define VM_STRUCTS_OS_CPU(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field, nonproduct_nonstatic_field, c2_nonstatic_field, unchecked_c1_static_field, unchecked_c2_static_field) \
-                                                                                                                                     \
-  /******************************/                                                                                                   \
-  /* Threads (NOTE: incomplete) */                                                                                                   \
-  /******************************/                                                                                                   \
-  nonstatic_field(OSThread,                      _thread_id,                                      pid_t)                             \
-  nonstatic_field(OSThread,                      _pthread_id,                                     pthread_t)
-
-
-#define VM_TYPES_OS_CPU(declare_type, declare_toplevel_type, declare_oop_type, declare_integer_type, declare_unsigned_integer_type, declare_c1_toplevel_type, declare_c2_type, declare_c2_toplevel_type) \
-                                                                          \
-  /**********************/                                                \
-  /* Posix Thread IDs   */                                                \
-  /**********************/                                                \
-                                                                          \
-  declare_integer_type(pid_t)                                             \
-  declare_unsigned_integer_type(pthread_t)
-
-#define VM_INT_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
-
-#define VM_LONG_CONSTANTS_OS_CPU(declare_constant, declare_preprocessor_constant, declare_c1_constant, declare_c2_constant, declare_c2_preprocessor_constant)
-
-#endif // OS_CPU_BSD_PPC_VM_VMSTRUCTS_BSD_PPC_HPP