Skip to content

Commit 1164258

Browse files
author
Kim Barrett
committed
8295124: Atomic::add to pointer type may return wrong value
Reviewed-by: tschatzl, coleenp
1 parent d3eba85 commit 1164258

File tree

2 files changed

+164
-15
lines changed

2 files changed

+164
-15
lines changed

src/hotspot/share/runtime/atomic.hpp

+46-15
Original file line numberDiff line numberDiff line change
@@ -239,10 +239,11 @@ class Atomic : AllStatic {
239239
// bytes and (if different) pointer size bytes are required. The
240240
// class must be default constructable, with these requirements:
241241
//
242-
// - dest is of type D*, an integral or pointer type.
242+
// - dest is of type D*, where D is an integral or pointer type.
243243
// - add_value is of type I, an integral type.
244244
// - sizeof(I) == sizeof(D).
245245
// - if D is an integral type, I == D.
246+
// - if D is a pointer type P*, sizeof(P) == 1.
246247
// - order is of type atomic_memory_order.
247248
// - platform_add is an object of type PlatformAdd<sizeof(D)>.
248249
//
@@ -257,9 +258,17 @@ class Atomic : AllStatic {
257258
// fetch_and_add atomically adds add_value to the value of dest,
258259
// returning the old value.
259260
//
260-
// When D is a pointer type P*, both add_and_fetch and fetch_and_add
261-
// treat it as if it were an uintptr_t; they do not perform any
262-
// scaling of add_value, as that has already been done by the caller.
261+
// When the destination type D of the Atomic operation is a pointer type P*,
262+
// the addition must scale the add_value by sizeof(P) to add that many bytes
263+
// to the destination value. Rather than requiring each platform deal with
264+
// this, the shared part of the implementation performs some adjustments
265+
// before and after calling the platform operation. It ensures the pointee
266+
// type of the destination value passed to the platform operation has size
267+
// 1, casting if needed. It also scales add_value by sizeof(P). The result
268+
// of the platform operation is cast back to P*. This means the platform
269+
// operation does not need to account for the scaling. It also makes it
270+
// easy for the platform to implement one of add_and_fetch or fetch_and_add
271+
// in terms of the other (which is a common approach).
263272
//
264273
// No definition is provided; all platforms must explicitly define
265274
// this class and any needed specializations.
@@ -689,21 +698,43 @@ struct Atomic::AddImpl<
689698
{
690699
STATIC_ASSERT(sizeof(intptr_t) == sizeof(P*));
691700
STATIC_ASSERT(sizeof(uintptr_t) == sizeof(P*));
692-
typedef typename Conditional<IsSigned<I>::value,
693-
intptr_t,
694-
uintptr_t>::type CI;
695701

696-
static CI scale_addend(CI add_value) {
697-
return add_value * sizeof(P);
702+
// Type of the scaled addend. An integral type of the same size as a
703+
// pointer, and the same signedness as I.
704+
using SI = typename Conditional<IsSigned<I>::value, intptr_t, uintptr_t>::type;
705+
706+
// Type of the unscaled destination. A pointer type with pointee size == 1.
707+
using UP = const char*;
708+
709+
// Scale add_value by the size of the pointee.
710+
static SI scale_addend(SI add_value) {
711+
return add_value * SI(sizeof(P));
712+
}
713+
714+
// Casting between P* and UP* here intentionally uses C-style casts,
715+
// because reinterpret_cast can't cast away cv qualifiers. Using copy_cv
716+
// would be an alternative if it existed.
717+
718+
// Unscale dest to a char* pointee for consistency with scaled addend.
719+
static UP volatile* unscale_dest(P* volatile* dest) {
720+
return (UP volatile*) dest;
721+
}
722+
723+
// Convert the unscaled char* result to a P*.
724+
static P* scale_result(UP result) {
725+
return (P*) result;
698726
}
699727

700-
static P* add_and_fetch(P* volatile* dest, I add_value, atomic_memory_order order) {
701-
CI addend = add_value;
702-
return PlatformAdd<sizeof(P*)>().add_and_fetch(dest, scale_addend(addend), order);
728+
static P* add_and_fetch(P* volatile* dest, I addend, atomic_memory_order order) {
729+
return scale_result(PlatformAdd<sizeof(P*)>().add_and_fetch(unscale_dest(dest),
730+
scale_addend(addend),
731+
order));
703732
}
704-
static P* fetch_and_add(P* volatile* dest, I add_value, atomic_memory_order order) {
705-
CI addend = add_value;
706-
return PlatformAdd<sizeof(P*)>().fetch_and_add(dest, scale_addend(addend), order);
733+
734+
static P* fetch_and_add(P* volatile* dest, I addend, atomic_memory_order order) {
735+
return scale_result(PlatformAdd<sizeof(P*)>().fetch_and_add(unscale_dest(dest),
736+
scale_addend(addend),
737+
order));
707738
}
708739
};
709740

test/hotspot/gtest/runtime/test_atomic.cpp

+118
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,124 @@
2828

2929
// These tests of Atomic only verify functionality. They don't verify atomicity.
3030

31+
template<typename T>
32+
struct AtomicAddTestSupport {
33+
volatile T _test_value;
34+
35+
AtomicAddTestSupport() : _test_value{} {}
36+
37+
void test_add() {
38+
T zero = 0;
39+
T five = 5;
40+
Atomic::store(&_test_value, zero);
41+
T value = Atomic::add(&_test_value, five);
42+
EXPECT_EQ(five, value);
43+
EXPECT_EQ(five, Atomic::load(&_test_value));
44+
}
45+
46+
void test_fetch_add() {
47+
T zero = 0;
48+
T five = 5;
49+
Atomic::store(&_test_value, zero);
50+
T value = Atomic::fetch_and_add(&_test_value, five);
51+
EXPECT_EQ(zero, value);
52+
EXPECT_EQ(five, Atomic::load(&_test_value));
53+
}
54+
};
55+
56+
TEST(AtomicAddTest, int32) {
57+
using Support = AtomicAddTestSupport<int32_t>;
58+
Support().test_add();
59+
Support().test_fetch_add();
60+
}
61+
62+
// 64bit Atomic::add is only supported on 64bit platforms.
63+
#ifdef _LP64
64+
TEST(AtomicAddTest, int64) {
65+
using Support = AtomicAddTestSupport<int64_t>;
66+
Support().test_add();
67+
Support().test_fetch_add();
68+
}
69+
#endif // _LP64
70+
71+
TEST(AtomicAddTest, ptr) {
72+
uint _test_values[10] = {};
73+
uint* volatile _test_value{};
74+
75+
uint* zero = &_test_values[0];
76+
uint* five = &_test_values[5];
77+
uint* six = &_test_values[6];
78+
79+
Atomic::store(&_test_value, zero);
80+
uint* value = Atomic::add(&_test_value, 5);
81+
EXPECT_EQ(five, value);
82+
EXPECT_EQ(five, Atomic::load(&_test_value));
83+
84+
Atomic::store(&_test_value, zero);
85+
value = Atomic::fetch_and_add(&_test_value, 6);
86+
EXPECT_EQ(zero, value);
87+
EXPECT_EQ(six, Atomic::load(&_test_value));
88+
};
89+
90+
template<typename T>
91+
struct AtomicXchgTestSupport {
92+
volatile T _test_value;
93+
94+
AtomicXchgTestSupport() : _test_value{} {}
95+
96+
void test() {
97+
T zero = 0;
98+
T five = 5;
99+
Atomic::store(&_test_value, zero);
100+
T res = Atomic::xchg(&_test_value, five);
101+
EXPECT_EQ(zero, res);
102+
EXPECT_EQ(five, Atomic::load(&_test_value));
103+
}
104+
};
105+
106+
TEST(AtomicXchgTest, int32) {
107+
using Support = AtomicXchgTestSupport<int32_t>;
108+
Support().test();
109+
}
110+
111+
// 64bit Atomic::xchg is only supported on 64bit platforms.
112+
#ifdef _LP64
113+
TEST(AtomicXchgTest, int64) {
114+
using Support = AtomicXchgTestSupport<int64_t>;
115+
Support().test();
116+
}
117+
#endif // _LP64
118+
119+
template<typename T>
120+
struct AtomicCmpxchgTestSupport {
121+
volatile T _test_value;
122+
123+
AtomicCmpxchgTestSupport() : _test_value{} {}
124+
125+
void test() {
126+
T zero = 0;
127+
T five = 5;
128+
T ten = 10;
129+
Atomic::store(&_test_value, zero);
130+
T res = Atomic::cmpxchg(&_test_value, five, ten);
131+
EXPECT_EQ(zero, res);
132+
EXPECT_EQ(zero, Atomic::load(&_test_value));
133+
res = Atomic::cmpxchg(&_test_value, zero, ten);
134+
EXPECT_EQ(zero, res);
135+
EXPECT_EQ(ten, Atomic::load(&_test_value));
136+
}
137+
};
138+
139+
TEST(AtomicCmpxchgTest, int32) {
140+
using Support = AtomicCmpxchgTestSupport<int32_t>;
141+
Support().test();
142+
}
143+
144+
TEST(AtomicCmpxchgTest, int64) {
145+
using Support = AtomicCmpxchgTestSupport<int64_t>;
146+
Support().test();
147+
}
148+
31149
template<typename T>
32150
struct AtomicEnumTestSupport {
33151
volatile T _test_value;

0 commit comments

Comments
 (0)