Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 10 additions & 2 deletions src/hotspot/cpu/aarch64/javaFrameAnchor_aarch64.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,13 @@
// 3 - restoring an old state (javaCalls)

void clear(void) {
// No hardware fence required, the members are declared volatile and a compiler barrier is
// present so the compiler will not reorder and the profiler always reads from the same
// thread and should observe the state in program order.

// clearing _last_Java_sp must be first
_last_Java_sp = nullptr;
OrderAccess::release();
compiler_barrier();
_last_Java_fp = nullptr;
_last_Java_pc = nullptr;
}
Expand All @@ -54,13 +58,17 @@
// To act like previous version (pd_cache_state) don't null _last_Java_sp
// unless the value is changing
//
// No hardware fence required, the members are declared volatile and a compiler barrier is
// present so the compiler will not reorder and the profiler always reads from the same
// thread and should observe the state in program order.
if (_last_Java_sp != src->_last_Java_sp) {
_last_Java_sp = nullptr;
OrderAccess::release();
compiler_barrier();
}
_last_Java_fp = src->_last_Java_fp;
_last_Java_pc = src->_last_Java_pc;
// Must be last so profiler will always see valid frame if has_last_frame() is true
compiler_barrier();
_last_Java_sp = src->_last_Java_sp;
}

Expand Down
5 changes: 3 additions & 2 deletions src/hotspot/cpu/aarch64/macroAssembler_aarch64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -634,12 +634,13 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp,
last_java_sp = esp;
}

str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));

// last_java_fp is optional
if (last_java_fp->is_valid()) {
str(last_java_fp, Address(rthread, JavaThread::last_Java_fp_offset()));
}

// Must be last so profiler will always see valid frame if has_last_frame() is true
str(last_java_sp, Address(rthread, JavaThread::last_Java_sp_offset()));
}

void MacroAssembler::set_last_Java_frame(Register last_java_sp,
Expand Down
7 changes: 2 additions & 5 deletions src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,11 @@

// Included in orderAccess.hpp header file.

#include "utilities/globalDefinitions.hpp"

// Compiler version last used for testing: clang 5.1
// Please update this information when this file changes

// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
static inline void compiler_barrier() {
__asm__ volatile ("" : : : "memory");
}

// x86 is TSO and hence only needs a fence for storeload
// However, a compiler barrier is still needed to prevent reordering
// between volatile and non-volatile memory accesses.
Expand Down
3 changes: 2 additions & 1 deletion src/hotspot/os_cpu/linux_s390/orderAccess_linux_s390.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
// Included in orderAccess.hpp header file.

#include "runtime/vm_version.hpp"
#include "utilities/globalDefinitions.hpp"

// Implementation of class OrderAccess.

Expand All @@ -55,7 +56,7 @@
// is needed.

// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions.
#define inlasm_compiler_barrier() __asm__ volatile ("" : : : "memory");
#define inlasm_compiler_barrier() compiler_barrier()
// "bcr 15, 0" is used as two way memory barrier.
#define inlasm_zarch_sync() __asm__ __volatile__ ("bcr 15, 0" : : : "memory");

Expand Down
7 changes: 2 additions & 5 deletions src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,13 @@

// Included in orderAccess.hpp header file.

#include "utilities/globalDefinitions.hpp"

// Compiler version last used for testing: gcc 4.8.2
// Please update this information when this file changes

// Implementation of class OrderAccess.

// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
static inline void compiler_barrier() {
__asm__ volatile ("" : : : "memory");
}

inline void OrderAccess::loadload() { compiler_barrier(); }
inline void OrderAccess::storestore() { compiler_barrier(); }
inline void OrderAccess::loadstore() { compiler_barrier(); }
Expand Down
7 changes: 2 additions & 5 deletions src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,18 +27,15 @@

// Included in orderAccess.hpp header file.

#include "utilities/globalDefinitions.hpp"

#include <intrin.h>

// Compiler version last used for testing: Microsoft Visual Studio 2010
// Please update this information when this file changes

// Implementation of class OrderAccess.

// A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
inline void compiler_barrier() {
_ReadWriteBarrier();
}

inline void OrderAccess::loadload() { compiler_barrier(); }
inline void OrderAccess::storestore() { compiler_barrier(); }
inline void OrderAccess::loadstore() { compiler_barrier(); }
Expand Down
7 changes: 7 additions & 0 deletions src/hotspot/share/utilities/globalDefinitions_gcc.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -96,4 +96,11 @@ inline int g_isfinite(jdouble f) { return isfinite(f); }
#define ALWAYSINLINE inline __attribute__ ((always_inline))
#define ATTRIBUTE_FLATTEN __attribute__ ((flatten))

// Complier barrier which prevents the compiler from reordering loads and stores.
// It does not prevent the hardware from doing so. Typically you should use
// OrderAccess instead.
static inline void compiler_barrier() {
__asm__ volatile ("" : : : "memory");
}

Comment on lines +99 to +105
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can do this in portable C++ since C++11:

// Complier barrier which prevents the compiler from reordering loads and stores.
static inline void compiler_barrier() {
  std::atomic_signal_fence(memory_order_seq_cst);
}

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are some rules about not calling the Standard C++ libraries in the Guidelines, but given that this one only prevents the compiler from moving things around and does not generate any code, I don't think that really applies. More legalisticaily-minded people might disagree, but I prefer portable code.

#endif // SHARE_UTILITIES_GLOBALDEFINITIONS_GCC_HPP
14 changes: 11 additions & 3 deletions src/hotspot/share/utilities/globalDefinitions_visCPP.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,14 @@

# include <ctype.h>
# include <fcntl.h>
# include <float.h> // for _isnan
# include <float.h> // for _isnan
# include <intrin.h> // for _ReadWriteBarrier
# include <inttypes.h>
# include <io.h> // for stream.cpp
# include <io.h> // for stream.cpp
# include <limits.h>
# include <math.h>
# include <stdarg.h>
# include <stddef.h>// for offsetof
# include <stddef.h> // for offsetof
# include <stdint.h>
# include <stdio.h>
# include <stdlib.h>
Expand Down Expand Up @@ -103,4 +104,11 @@ inline int g_isfinite(jdouble f) { return _finite(f); }
#define SSIZE_MAX LLONG_MAX
#endif // SSIZE_MAX missing

// Complier barrier which prevents the compiler from reordering loads and stores.
// It does not prevent the hardware from doing so. Typically you should use
// OrderAccess instead.
static inline void compiler_barrier() {
_ReadWriteBarrier();
}

#endif // SHARE_UTILITIES_GLOBALDEFINITIONS_VISCPP_HPP