Skip to content

Commit 519fd89

Browse files
committed
8295124: Atomic::add to pointer type may return wrong value
Backport-of: 1164258ec7d173944f48cba368d6c50a07b4c283
1 parent 5bc2302 commit 519fd89

File tree

2 files changed

+164
-15
lines changed

2 files changed

+164
-15
lines changed

src/hotspot/share/runtime/atomic.hpp

+46-15
Original file line numberDiff line numberDiff line change
@@ -240,10 +240,11 @@ WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointer
240240
// bytes and (if different) pointer size bytes are required. The
241241
// class must be default constructable, with these requirements:
242242
//
243-
// - dest is of type D*, an integral or pointer type.
243+
// - dest is of type D*, where D is an integral or pointer type.
244244
// - add_value is of type I, an integral type.
245245
// - sizeof(I) == sizeof(D).
246246
// - if D is an integral type, I == D.
247+
// - if D is a pointer type P*, sizeof(P) == 1.
247248
// - order is of type atomic_memory_order.
248249
// - platform_add is an object of type PlatformAdd<sizeof(D)>.
249250
//
@@ -258,9 +259,17 @@ WINDOWS_ONLY(public:) // VS2017 warns (C2027) use of undefined type if IsPointer
258259
// fetch_and_add atomically adds add_value to the value of dest,
259260
// returning the old value.
260261
//
261-
// When D is a pointer type P*, both add_and_fetch and fetch_and_add
262-
// treat it as if it were an uintptr_t; they do not perform any
263-
// scaling of add_value, as that has already been done by the caller.
262+
// When the destination type D of the Atomic operation is a pointer type P*,
263+
// the addition must scale the add_value by sizeof(P) to add that many bytes
264+
// to the destination value. Rather than requiring each platform deal with
265+
// this, the shared part of the implementation performs some adjustments
266+
// before and after calling the platform operation. It ensures the pointee
267+
// type of the destination value passed to the platform operation has size
268+
// 1, casting if needed. It also scales add_value by sizeof(P). The result
269+
// of the platform operation is cast back to P*. This means the platform
270+
// operation does not need to account for the scaling. It also makes it
271+
// easy for the platform to implement one of add_and_fetch or fetch_and_add
272+
// in terms of the other (which is a common approach).
264273
//
265274
// No definition is provided; all platforms must explicitly define
266275
// this class and any needed specializations.
@@ -690,21 +699,43 @@ struct Atomic::AddImpl<
690699
{
691700
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
692701
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
693-
typedef typename Conditional<IsSigned<I>::value,
694-
intptr_t,
695-
uintptr_t>::type CI;
696702

697-
static CI scale_addend(CI add_value) {
698-
return add_value * sizeof(P);
703+
// Type of the scaled addend. An integral type of the same size as a
704+
// pointer, and the same signedness as I.
705+
using SI = typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type;
706+
707+
// Type of the unscaled destination. A pointer type with pointee size == 1.
708+
using UP = const char*;
709+
710+
// Scale add_value by the size of the pointee.
711+
static SI scale_addend(SI add_value) {
712+
return add_value * SI(sizeof(P));
713+
}
714+
715+
// Casting between P* and UP* here intentionally uses C-style casts,
716+
// because reinterpret_cast can't cast away cv qualifiers. Using copy_cv
717+
// would be an alternative if it existed.
718+
719+
// Unscale dest to a char* pointee for consistency with scaled addend.
720+
static UP volatile* unscale_dest(P* volatile* dest) {
721+
return (UP volatile*) dest;
722+
}
723+
724+
// Convert the unscaled char* result to a P*.
725+
static P* scale_result(UP result) {
726+
return (P*) result;
699727
}
700728

701-
static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) {
702-
CI addend = add_value;
703-
return PlatformAdd<sizeof(P*)>().add_and_fetch(dest, scale_addend(addend), order);
729+
static P* add_and_fetch(P* volatile* dest, I addend, atomic_memory_order order) {
730+
return scale_result(PlatformAdd<sizeof(P*)>().add_and_fetch(unscale_dest(dest),
731+
scale_addend(addend),
732+
order));
704733
}
705-
static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) {
706-
CI addend = add_value;
707-
return PlatformAdd<sizeof(P*)>().fetch_and_add(dest, scale_addend(addend), order);
734+
735+
static P* fetch_and_add(P* volatile* dest, I addend, atomic_memory_order order) {
736+
return scale_result(PlatformAdd<sizeof(P*)>().fetch_and_add(unscale_dest(dest),
737+
scale_addend(addend),
738+
order));
708739
}
709740
};
710741

test/hotspot/gtest/runtime/test_atomic.cpp

+118
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,124 @@
2828

2929
// These tests of Atomic only verify functionality. They don't verify atomicity.
3030

31+
template<typename T>
32+
struct AtomicAddTestSupport {
33+
volatile T _test_value;
34+
35+
AtomicAddTestSupport() : _test_value{} {}
36+
37+
void test_add() {
38+
T zero = 0;
39+
T five = 5;
40+
Atomic::store(&_test_value, zero);
41+
T value = Atomic::add(&_test_value, five);
42+
EXPECT_EQ(five, value);
43+
EXPECT_EQ(five, Atomic::load(&_test_value));
44+
}
45+
46+
void test_fetch_add() {
47+
T zero = 0;
48+
T five = 5;
49+
Atomic::store(&_test_value, zero);
50+
T value = Atomic::fetch_and_add(&_test_value, five);
51+
EXPECT_EQ(zero, value);
52+
EXPECT_EQ(five, Atomic::load(&_test_value));
53+
}
54+
};
55+
56+
TEST(AtomicAddTest, int32) {
57+
using Support = AtomicAddTestSupport<int32_t>;
58+
Support().test_add();
59+
Support().test_fetch_add();
60+
}
61+
62+
// 64bit Atomic::add is only supported on 64bit platforms.
63+
#ifdef _LP64
64+
TEST(AtomicAddTest, int64) {
65+
using Support = AtomicAddTestSupport<int64_t>;
66+
Support().test_add();
67+
Support().test_fetch_add();
68+
}
69+
#endif // _LP64
70+
71+
TEST(AtomicAddTest, ptr) {
72+
uint _test_values[10] = {};
73+
uint* volatile _test_value{};
74+
75+
uint* zero = &_test_values[0];
76+
uint* five = &_test_values[5];
77+
uint* six = &_test_values[6];
78+
79+
Atomic::store(&_test_value, zero);
80+
uint* value = Atomic::add(&_test_value, 5);
81+
EXPECT_EQ(five, value);
82+
EXPECT_EQ(five, Atomic::load(&_test_value));
83+
84+
Atomic::store(&_test_value, zero);
85+
value = Atomic::fetch_and_add(&_test_value, 6);
86+
EXPECT_EQ(zero, value);
87+
EXPECT_EQ(six, Atomic::load(&_test_value));
88+
};
89+
90+
template<typename T>
91+
struct AtomicXchgTestSupport {
92+
volatile T _test_value;
93+
94+
AtomicXchgTestSupport() : _test_value{} {}
95+
96+
void test() {
97+
T zero = 0;
98+
T five = 5;
99+
Atomic::store(&_test_value, zero);
100+
T res = Atomic::xchg(&_test_value, five);
101+
EXPECT_EQ(zero, res);
102+
EXPECT_EQ(five, Atomic::load(&_test_value));
103+
}
104+
};
105+
106+
TEST(AtomicXchgTest, int32) {
107+
using Support = AtomicXchgTestSupport<int32_t>;
108+
Support().test();
109+
}
110+
111+
// 64bit Atomic::xchg is only supported on 64bit platforms.
112+
#ifdef _LP64
113+
TEST(AtomicXchgTest, int64) {
114+
using Support = AtomicXchgTestSupport<int64_t>;
115+
Support().test();
116+
}
117+
#endif // _LP64
118+
119+
template<typename T>
120+
struct AtomicCmpxchgTestSupport {
121+
volatile T _test_value;
122+
123+
AtomicCmpxchgTestSupport() : _test_value{} {}
124+
125+
void test() {
126+
T zero = 0;
127+
T five = 5;
128+
T ten = 10;
129+
Atomic::store(&_test_value, zero);
130+
T res = Atomic::cmpxchg(&_test_value, five, ten);
131+
EXPECT_EQ(zero, res);
132+
EXPECT_EQ(zero, Atomic::load(&_test_value));
133+
res = Atomic::cmpxchg(&_test_value, zero, ten);
134+
EXPECT_EQ(zero, res);
135+
EXPECT_EQ(ten, Atomic::load(&_test_value));
136+
}
137+
};
138+
139+
TEST(AtomicCmpxchgTest, int32) {
140+
using Support = AtomicCmpxchgTestSupport<int32_t>;
141+
Support().test();
142+
}
143+
144+
TEST(AtomicCmpxchgTest, int64) {
145+
using Support = AtomicCmpxchgTestSupport<int64_t>;
146+
Support().test();
147+
}
148+
31149
template<typename T>
32150
struct AtomicEnumTestSupport {
33151
volatile T _test_value;

0 commit comments

Comments
 (0)