Skip to content

Commit d98a323

Browse files
committed
8301070: Replace NULL with nullptr in share/memory/
Reviewed-by: stefank, stuefe
1 parent 315398c commit d98a323

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

65 files changed

+678
-678
lines changed

src/hotspot/share/memory/allocation.cpp

+7-7
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -40,7 +40,7 @@ char* AllocateHeap(size_t size,
4040
const NativeCallStack& stack,
4141
AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) {
4242
char* p = (char*) os::malloc(size, flags, stack);
43-
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
43+
if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
4444
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap");
4545
}
4646
return p;
@@ -57,19 +57,19 @@ char* ReallocateHeap(char *old,
5757
MEMFLAGS flag,
5858
AllocFailType alloc_failmode) {
5959
char* p = (char*) os::realloc(old, size, flag, CALLER_PC);
60-
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
60+
if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
6161
vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap");
6262
}
6363
return p;
6464
}
6565

66-
// handles NULL pointers
66+
// handles null pointers
6767
void FreeHeap(void* p) {
6868
os::free(p);
6969
}
7070

71-
void* MetaspaceObj::_shared_metaspace_base = NULL;
72-
void* MetaspaceObj::_shared_metaspace_top = NULL;
71+
void* MetaspaceObj::_shared_metaspace_base = nullptr;
72+
void* MetaspaceObj::_shared_metaspace_top = nullptr;
7373

7474
void* StackObj::operator new(size_t size) throw() { ShouldNotCallThis(); return 0; }
7575
void StackObj::operator delete(void* p) { ShouldNotCallThis(); }
@@ -132,7 +132,7 @@ void* AnyObj::operator new(size_t size, const std::nothrow_t& nothrow_constant,
132132
MEMFLAGS flags) throw() {
133133
// should only call this with std::nothrow, use other operator new() otherwise
134134
address res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL);
135-
DEBUG_ONLY(if (res!= NULL) set_allocation_type(res, C_HEAP);)
135+
DEBUG_ONLY(if (res!= nullptr) set_allocation_type(res, C_HEAP);)
136136
return res;
137137
}
138138

src/hotspot/share/memory/allocation.hpp

+6-6
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -174,7 +174,7 @@ char* ReallocateHeap(char *old,
174174
MEMFLAGS flag,
175175
AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
176176

177-
// handles NULL pointers
177+
// handles null pointers
178178
void FreeHeap(void* p);
179179

180180
class CHeapObjBase {
@@ -321,7 +321,7 @@ class MetaspaceObj {
321321
// into a single contiguous memory block, so we can use these
322322
// two pointers to quickly determine if something is in the
323323
// shared metaspace.
324-
// When CDS is not enabled, both pointers are set to NULL.
324+
// When CDS is not enabled, both pointers are set to null.
325325
static void* _shared_metaspace_base; // (inclusive) low address
326326
static void* _shared_metaspace_top; // (exclusive) high address
327327

@@ -335,7 +335,7 @@ class MetaspaceObj {
335335
#if INCLUDE_CDS
336336
static bool is_shared(const MetaspaceObj* p) {
337337
// If no shared metaspace regions are mapped, _shared_metaspace_{base,top} will
338-
// both be NULL and all values of p will be rejected quickly.
338+
// both be null and all values of p will be rejected quickly.
339339
return (((void*)p) < _shared_metaspace_top &&
340340
((void*)p) >= _shared_metaspace_base);
341341
}
@@ -386,7 +386,7 @@ class MetaspaceObj {
386386
METASPACE_OBJ_TYPES_DO(METASPACE_OBJ_TYPE_NAME_CASE)
387387
default:
388388
ShouldNotReachHere();
389-
return NULL;
389+
return nullptr;
390390
}
391391
}
392392

@@ -511,7 +511,7 @@ class AnyObj {
511511
}
512512
void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() {
513513
address res = (address)resource_allocate_bytes(size, AllocFailStrategy::RETURN_NULL);
514-
DEBUG_ONLY(if (res != NULL) set_allocation_type(res, RESOURCE_AREA);)
514+
DEBUG_ONLY(if (res != nullptr) set_allocation_type(res, RESOURCE_AREA);)
515515
return res;
516516
}
517517

src/hotspot/share/memory/allocation.inline.hpp

+9-9
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -59,15 +59,15 @@ E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MEMFLAGS flags) {
5959
size_t size = size_for(length);
6060

6161
char* addr = os::reserve_memory(size, !ExecMem, flags);
62-
if (addr == NULL) {
63-
return NULL;
62+
if (addr == nullptr) {
63+
return nullptr;
6464
}
6565

6666
if (os::commit_memory(addr, size, !ExecMem)) {
6767
return (E*)addr;
6868
} else {
6969
os::release_memory(addr, size);
70-
return NULL;
70+
return nullptr;
7171
}
7272
}
7373

@@ -76,7 +76,7 @@ E* MmapArrayAllocator<E>::allocate(size_t length, MEMFLAGS flags) {
7676
size_t size = size_for(length);
7777

7878
char* addr = os::reserve_memory(size, !ExecMem, flags);
79-
if (addr == NULL) {
79+
if (addr == nullptr) {
8080
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
8181
}
8282

@@ -148,13 +148,13 @@ E* ArrayAllocator<E>::reallocate(E* old_addr, size_t old_length, size_t new_leng
148148

149149
E* new_addr = (new_length > 0)
150150
? allocate(new_length, flags)
151-
: NULL;
151+
: nullptr;
152152

153-
if (new_addr != NULL && old_addr != NULL) {
153+
if (new_addr != nullptr && old_addr != nullptr) {
154154
memcpy(new_addr, old_addr, MIN2(old_length, new_length) * sizeof(E));
155155
}
156156

157-
if (old_addr != NULL) {
157+
if (old_addr != nullptr) {
158158
free(old_addr, old_length);
159159
}
160160

@@ -173,7 +173,7 @@ void ArrayAllocator<E>::free_mmap(E* addr, size_t length) {
173173

174174
template <class E>
175175
void ArrayAllocator<E>::free(E* addr, size_t length) {
176-
if (addr != NULL) {
176+
if (addr != nullptr) {
177177
if (should_use_malloc(length)) {
178178
free_malloc(addr, length);
179179
} else {

src/hotspot/share/memory/arena.cpp

+25-25
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -56,9 +56,9 @@ class ChunkPool {
5656
static ChunkPool _pools[_num_pools];
5757

5858
public:
59-
ChunkPool(size_t size) : _first(NULL), _num_chunks(0), _size(size) {}
59+
ChunkPool(size_t size) : _first(nullptr), _num_chunks(0), _size(size) {}
6060

61-
// Allocate a chunk from the pool; returns NULL if pool is empty.
61+
// Allocate a chunk from the pool; returns null if pool is empty.
6262
Chunk* allocate() {
6363
ThreadCritical tc;
6464
Chunk* c = _first;
@@ -81,26 +81,26 @@ class ChunkPool {
8181
// Prune the pool
8282
void prune() {
8383
static const int blocksToKeep = 5;
84-
Chunk* cur = NULL;
84+
Chunk* cur = nullptr;
8585
Chunk* next;
8686
// if we have more than n chunks, free all of them
8787
ThreadCritical tc;
8888
if (_num_chunks > blocksToKeep) {
8989
// free chunks at end of queue, for better locality
9090
cur = _first;
9191
for (size_t i = 0; i < (blocksToKeep - 1); i++) {
92-
assert(cur != NULL, "counter out of sync?");
92+
assert(cur != nullptr, "counter out of sync?");
9393
cur = cur->next();
9494
}
95-
assert(cur != NULL, "counter out of sync?");
95+
assert(cur != nullptr, "counter out of sync?");
9696

9797
next = cur->next();
98-
cur->set_next(NULL);
98+
cur->set_next(nullptr);
9999
cur = next;
100100

101101
// Free all remaining chunks while in ThreadCritical lock
102102
// so NMT adjustment is stable.
103-
while(cur != NULL) {
103+
while(cur != nullptr) {
104104
next = cur->next();
105105
os::free(cur);
106106
_num_chunks--;
@@ -115,14 +115,14 @@ class ChunkPool {
115115
}
116116
}
117117

118-
// Given a (inner payload) size, return the pool responsible for it, or NULL if the size is non-standard
118+
// Given a (inner payload) size, return the pool responsible for it, or null if the size is non-standard
119119
static ChunkPool* get_pool_for_size(size_t size) {
120120
for (int i = 0; i < _num_pools; i++) {
121121
if (_pools[i]._size == size) {
122122
return _pools + i;
123123
}
124124
}
125-
return NULL;
125+
return nullptr;
126126
}
127127

128128
};
@@ -170,17 +170,17 @@ void* Chunk::operator new (size_t sizeofChunk, AllocFailType alloc_failmode, siz
170170
SIZE_FORMAT ".", length);
171171
// Try to reuse a freed chunk from the pool
172172
ChunkPool* pool = ChunkPool::get_pool_for_size(length);
173-
if (pool != NULL) {
173+
if (pool != nullptr) {
174174
Chunk* c = pool->allocate();
175-
if (c != NULL) {
175+
if (c != nullptr) {
176176
assert(c->length() == length, "wrong length?");
177177
return c;
178178
}
179179
}
180180
// Either the pool was empty, or this is a non-standard length. Allocate a new Chunk from C-heap.
181181
size_t bytes = ARENA_ALIGN(sizeofChunk) + length;
182182
void* p = os::malloc(bytes, mtChunk, CALLER_PC);
183-
if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
183+
if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
184184
vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "Chunk::new");
185185
}
186186
// We rely on arena alignment <= malloc alignment.
@@ -192,7 +192,7 @@ void Chunk::operator delete(void* p) {
192192
// If this is a standard-sized chunk, return it to its pool; otherwise free it.
193193
Chunk* c = (Chunk*)p;
194194
ChunkPool* pool = ChunkPool::get_pool_for_size(c->length());
195-
if (pool != NULL) {
195+
if (pool != nullptr) {
196196
pool->free(c);
197197
} else {
198198
ThreadCritical tc; // Free chunks under TC lock so that NMT adjustment is stable.
@@ -201,7 +201,7 @@ void Chunk::operator delete(void* p) {
201201
}
202202

203203
Chunk::Chunk(size_t length) : _len(length) {
204-
_next = NULL; // Chain on the linked list
204+
_next = nullptr; // Chain on the linked list
205205
}
206206

207207
void Chunk::chop() {
@@ -217,7 +217,7 @@ void Chunk::chop() {
217217

218218
void Chunk::next_chop() {
219219
_next->chop();
220-
_next = NULL;
220+
_next = nullptr;
221221
}
222222

223223
void Chunk::start_chunk_pool_cleaner_task() {
@@ -276,7 +276,7 @@ void Arena::destruct_contents() {
276276
// reset size before chop to avoid a rare racing condition
277277
// that can have total arena memory exceed total chunk memory
278278
set_size_in_bytes(0);
279-
if (_first != NULL) {
279+
if (_first != nullptr) {
280280
_first->chop();
281281
}
282282
reset();
@@ -312,9 +312,9 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
312312
Chunk *k = _chunk; // Get filled-up chunk address
313313
_chunk = new (alloc_failmode, len) Chunk(len);
314314

315-
if (_chunk == NULL) {
315+
if (_chunk == nullptr) {
316316
_chunk = k; // restore the previous value of _chunk
317-
return NULL;
317+
return nullptr;
318318
}
319319
if (k) k->set_next(_chunk); // Append new chunk to end of linked list
320320
else _first = _chunk;
@@ -332,11 +332,11 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
332332
void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
333333
if (new_size == 0) {
334334
Afree(old_ptr, old_size); // like realloc(3)
335-
return NULL;
335+
return nullptr;
336336
}
337-
if (old_ptr == NULL) {
337+
if (old_ptr == nullptr) {
338338
assert(old_size == 0, "sanity");
339-
return Amalloc(new_size, alloc_failmode); // as with realloc(3), a NULL old ptr is equivalent to malloc(3)
339+
return Amalloc(new_size, alloc_failmode); // as with realloc(3), a null old ptr is equivalent to malloc(3)
340340
}
341341
char *c_old = (char*)old_ptr; // Handy name
342342
// Stupid fast special case
@@ -358,8 +358,8 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail
358358

359359
// Oops, got to relocate guts
360360
void *new_ptr = Amalloc(new_size, alloc_failmode);
361-
if (new_ptr == NULL) {
362-
return NULL;
361+
if (new_ptr == nullptr) {
362+
return nullptr;
363363
}
364364
memcpy( new_ptr, c_old, old_size );
365365
Afree(c_old,old_size); // Mostly done to keep stats accurate
@@ -369,7 +369,7 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail
369369

370370
// Determine if pointer belongs to this Arena or not.
371371
bool Arena::contains( const void *ptr ) const {
372-
if (_chunk == NULL) return false;
372+
if (_chunk == nullptr) return false;
373373
if( (void*)_chunk->bottom() <= ptr && ptr < (void*)_hwm )
374374
return true; // Check for in this chunk
375375
for (Chunk *c = _first; c; c = c->next()) {

src/hotspot/share/memory/arena.hpp

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/*
2-
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
2+
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
33
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
44
*
55
* This code is free software; you can redistribute it and/or modify it
@@ -140,8 +140,8 @@ class Arena : public CHeapObjBase {
140140

141141
// Fast delete in area. Common case is: NOP (except for storage reclaimed)
142142
bool Afree(void *ptr, size_t size) {
143-
if (ptr == NULL) {
144-
return true; // as with free(3), freeing NULL is a noop.
143+
if (ptr == nullptr) {
144+
return true; // as with free(3), freeing null is a noop.
145145
}
146146
#ifdef ASSERT
147147
if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory
@@ -174,8 +174,8 @@ class Arena : public CHeapObjBase {
174174
private:
175175
// Reset this Arena to empty, access will trigger grow if necessary
176176
void reset(void) {
177-
_first = _chunk = NULL;
178-
_hwm = _max = NULL;
177+
_first = _chunk = nullptr;
178+
_hwm = _max = nullptr;
179179
set_size_in_bytes(0);
180180
}
181181
};

0 commit comments

Comments
 (0)