1
1
/*
2
- * Copyright (c) 2008, 2019 , Oracle and/or its affiliates. All rights reserved.
2
+ * Copyright (c) 2008, 2022 , Oracle and/or its affiliates. All rights reserved.
3
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
4
*
5
5
* This code is free software; you can redistribute it and/or modify it
25
25
#ifndef OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP
26
26
#define OS_CPU_LINUX_ARM_ATOMIC_LINUX_ARM_HPP
27
27
28
+ #include " memory/allStatic.hpp"
28
29
#include " runtime/os.hpp"
29
30
#include " runtime/vm_version.hpp"
30
31
31
32
// Implementation of class atomic
32
33
34
+ class ARMAtomicFuncs : AllStatic {
35
+ public:
36
+ typedef int64_t (*cmpxchg_long_func_t )(int64_t , int64_t , volatile int64_t *);
37
+ typedef int64_t (*load_long_func_t )(const volatile int64_t *);
38
+ typedef void (*store_long_func_t )(int64_t , volatile int64_t *);
39
+ typedef int32_t (*atomic_add_func_t )(int32_t add_value, volatile int32_t *dest);
40
+ typedef int32_t (*atomic_xchg_func_t )(int32_t exchange_value, volatile int32_t *dest);
41
+ typedef int32_t (*cmpxchg_func_t )(int32_t , int32_t , volatile int32_t *);
42
+
43
+ static cmpxchg_long_func_t _cmpxchg_long_func;
44
+ static load_long_func_t _load_long_func;
45
+ static store_long_func_t _store_long_func;
46
+ static atomic_add_func_t _add_func;
47
+ static atomic_xchg_func_t _xchg_func;
48
+ static cmpxchg_func_t _cmpxchg_func;
49
+
50
+ static int64_t cmpxchg_long_bootstrap (int64_t , int64_t , volatile int64_t *);
51
+
52
+ static int64_t load_long_bootstrap (const volatile int64_t *);
53
+
54
+ static void store_long_bootstrap (int64_t , volatile int64_t *);
55
+
56
+ static int32_t add_bootstrap (int32_t add_value, volatile int32_t *dest);
57
+
58
+ static int32_t xchg_bootstrap (int32_t exchange_value, volatile int32_t *dest);
59
+
60
+ static int32_t cmpxchg_bootstrap (int32_t compare_value,
61
+ int32_t exchange_value,
62
+ volatile int32_t *dest);
63
+ };
64
+
33
65
/*
34
66
* Atomic long operations on 32-bit ARM
35
67
* ARM v7 supports LDREXD/STREXD synchronization instructions so no problem.
@@ -49,15 +81,15 @@ template<typename T>
49
81
inline T Atomic::PlatformLoad<8 >::operator ()(T const volatile * src) const {
50
82
STATIC_ASSERT (8 == sizeof (T));
51
83
return PrimitiveConversions::cast<T>(
52
- (*os::atomic_load_long_func )(reinterpret_cast <const volatile int64_t *>(src)));
84
+ (*ARMAtomicFuncs::_load_long_func )(reinterpret_cast <const volatile int64_t *>(src)));
53
85
}
54
86
55
87
template <>
56
88
template <typename T>
57
89
inline void Atomic::PlatformStore<8 >::operator ()(T volatile * dest,
58
90
T store_value) const {
59
91
STATIC_ASSERT (8 == sizeof (T));
60
- (*os::atomic_store_long_func )(
92
+ (*ARMAtomicFuncs::_store_long_func )(
61
93
PrimitiveConversions::cast<int64_t >(store_value), reinterpret_cast <volatile int64_t *>(dest));
62
94
}
63
95
@@ -83,7 +115,7 @@ inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value,
83
115
atomic_memory_order order) const {
84
116
STATIC_ASSERT (4 == sizeof (I));
85
117
STATIC_ASSERT (4 == sizeof (D));
86
- return add_using_helper<int32_t >(os::atomic_add_func , dest, add_value);
118
+ return add_using_helper<int32_t >(ARMAtomicFuncs::_add_func , dest, add_value);
87
119
}
88
120
89
121
@@ -93,7 +125,7 @@ inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest,
93
125
T exchange_value,
94
126
atomic_memory_order order) const {
95
127
STATIC_ASSERT (4 == sizeof (T));
96
- return xchg_using_helper<int32_t >(os::atomic_xchg_func , dest, exchange_value);
128
+ return xchg_using_helper<int32_t >(ARMAtomicFuncs::_xchg_func , dest, exchange_value);
97
129
}
98
130
99
131
@@ -108,15 +140,15 @@ inline int32_t reorder_cmpxchg_func(int32_t exchange_value,
108
140
int32_t volatile * dest,
109
141
int32_t compare_value) {
110
142
// Warning: Arguments are swapped to avoid moving them for kernel call
111
- return (*os::atomic_cmpxchg_func )(compare_value, exchange_value, dest);
143
+ return (*ARMAtomicFuncs::_cmpxchg_func )(compare_value, exchange_value, dest);
112
144
}
113
145
114
146
inline int64_t reorder_cmpxchg_long_func (int64_t exchange_value,
115
147
int64_t volatile * dest,
116
148
int64_t compare_value) {
117
149
assert (VM_Version::supports_cx8 (), " Atomic compare and exchange int64_t not supported on this architecture!" );
118
150
// Warning: Arguments are swapped to avoid moving them for kernel call
119
- return (*os::atomic_cmpxchg_long_func )(compare_value, exchange_value, dest);
151
+ return (*ARMAtomicFuncs::_cmpxchg_long_func )(compare_value, exchange_value, dest);
120
152
}
121
153
122
154
0 commit comments