1
1
/*
2
- * Copyright (c) 2017, 2021 , Oracle and/or its affiliates. All rights reserved.
2
+ * Copyright (c) 2017, 2023 , Oracle and/or its affiliates. All rights reserved.
3
3
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
4
*
5
5
* This code is free software; you can redistribute it and/or modify it
@@ -56,9 +56,9 @@ class ChunkPool {
56
56
static ChunkPool _pools[_num_pools];
57
57
58
58
public:
59
- ChunkPool (size_t size) : _first(NULL ), _num_chunks(0 ), _size(size) {}
59
+ ChunkPool (size_t size) : _first(nullptr ), _num_chunks(0 ), _size(size) {}
60
60
61
- // Allocate a chunk from the pool; returns NULL if pool is empty.
61
+ // Allocate a chunk from the pool; returns null if pool is empty.
62
62
Chunk* allocate () {
63
63
ThreadCritical tc;
64
64
Chunk* c = _first;
@@ -81,26 +81,26 @@ class ChunkPool {
81
81
// Prune the pool
82
82
void prune () {
83
83
static const int blocksToKeep = 5 ;
84
- Chunk* cur = NULL ;
84
+ Chunk* cur = nullptr ;
85
85
Chunk* next;
86
86
// if we have more than n chunks, free all of them
87
87
ThreadCritical tc;
88
88
if (_num_chunks > blocksToKeep) {
89
89
// free chunks at end of queue, for better locality
90
90
cur = _first;
91
91
for (size_t i = 0 ; i < (blocksToKeep - 1 ); i++) {
92
- assert (cur != NULL , " counter out of sync?" );
92
+ assert (cur != nullptr , " counter out of sync?" );
93
93
cur = cur->next ();
94
94
}
95
- assert (cur != NULL , " counter out of sync?" );
95
+ assert (cur != nullptr , " counter out of sync?" );
96
96
97
97
next = cur->next ();
98
- cur->set_next (NULL );
98
+ cur->set_next (nullptr );
99
99
cur = next;
100
100
101
101
// Free all remaining chunks while in ThreadCritical lock
102
102
// so NMT adjustment is stable.
103
- while (cur != NULL ) {
103
+ while (cur != nullptr ) {
104
104
next = cur->next ();
105
105
os::free (cur);
106
106
_num_chunks--;
@@ -115,14 +115,14 @@ class ChunkPool {
115
115
}
116
116
}
117
117
118
- // Given a (inner payload) size, return the pool responsible for it, or NULL if the size is non-standard
118
+ // Given a (inner payload) size, return the pool responsible for it, or null if the size is non-standard
119
119
static ChunkPool* get_pool_for_size (size_t size) {
120
120
for (int i = 0 ; i < _num_pools; i++) {
121
121
if (_pools[i]._size == size) {
122
122
return _pools + i;
123
123
}
124
124
}
125
- return NULL ;
125
+ return nullptr ;
126
126
}
127
127
128
128
};
@@ -170,17 +170,17 @@ void* Chunk::operator new (size_t sizeofChunk, AllocFailType alloc_failmode, siz
170
170
SIZE_FORMAT " ." , length);
171
171
// Try to reuse a freed chunk from the pool
172
172
ChunkPool* pool = ChunkPool::get_pool_for_size (length);
173
- if (pool != NULL ) {
173
+ if (pool != nullptr ) {
174
174
Chunk* c = pool->allocate ();
175
- if (c != NULL ) {
175
+ if (c != nullptr ) {
176
176
assert (c->length () == length, " wrong length?" );
177
177
return c;
178
178
}
179
179
}
180
180
// Either the pool was empty, or this is a non-standard length. Allocate a new Chunk from C-heap.
181
181
size_t bytes = ARENA_ALIGN (sizeofChunk) + length;
182
182
void * p = os::malloc (bytes, mtChunk, CALLER_PC);
183
- if (p == NULL && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
183
+ if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) {
184
184
vm_exit_out_of_memory (bytes, OOM_MALLOC_ERROR, " Chunk::new" );
185
185
}
186
186
// We rely on arena alignment <= malloc alignment.
@@ -192,7 +192,7 @@ void Chunk::operator delete(void* p) {
192
192
// If this is a standard-sized chunk, return it to its pool; otherwise free it.
193
193
Chunk* c = (Chunk*)p;
194
194
ChunkPool* pool = ChunkPool::get_pool_for_size (c->length ());
195
- if (pool != NULL ) {
195
+ if (pool != nullptr ) {
196
196
pool->free (c);
197
197
} else {
198
198
ThreadCritical tc; // Free chunks under TC lock so that NMT adjustment is stable.
@@ -201,7 +201,7 @@ void Chunk::operator delete(void* p) {
201
201
}
202
202
203
203
Chunk::Chunk (size_t length) : _len(length) {
204
- _next = NULL ; // Chain on the linked list
204
+ _next = nullptr ; // Chain on the linked list
205
205
}
206
206
207
207
void Chunk::chop () {
@@ -217,7 +217,7 @@ void Chunk::chop() {
217
217
218
218
void Chunk::next_chop () {
219
219
_next->chop ();
220
- _next = NULL ;
220
+ _next = nullptr ;
221
221
}
222
222
223
223
void Chunk::start_chunk_pool_cleaner_task () {
@@ -276,7 +276,7 @@ void Arena::destruct_contents() {
276
276
// reset size before chop to avoid a rare racing condition
277
277
// that can have total arena memory exceed total chunk memory
278
278
set_size_in_bytes (0 );
279
- if (_first != NULL ) {
279
+ if (_first != nullptr ) {
280
280
_first->chop ();
281
281
}
282
282
reset ();
@@ -312,9 +312,9 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
312
312
Chunk *k = _chunk; // Get filled-up chunk address
313
313
_chunk = new (alloc_failmode, len) Chunk (len);
314
314
315
- if (_chunk == NULL ) {
315
+ if (_chunk == nullptr ) {
316
316
_chunk = k; // restore the previous value of _chunk
317
- return NULL ;
317
+ return nullptr ;
318
318
}
319
319
if (k) k->set_next (_chunk); // Append new chunk to end of linked list
320
320
else _first = _chunk;
@@ -332,11 +332,11 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
332
332
void *Arena::Arealloc (void * old_ptr, size_t old_size, size_t new_size, AllocFailType alloc_failmode) {
333
333
if (new_size == 0 ) {
334
334
Afree (old_ptr, old_size); // like realloc(3)
335
- return NULL ;
335
+ return nullptr ;
336
336
}
337
- if (old_ptr == NULL ) {
337
+ if (old_ptr == nullptr ) {
338
338
assert (old_size == 0 , " sanity" );
339
- return Amalloc (new_size, alloc_failmode); // as with realloc(3), a NULL old ptr is equivalent to malloc(3)
339
+ return Amalloc (new_size, alloc_failmode); // as with realloc(3), a null old ptr is equivalent to malloc(3)
340
340
}
341
341
char *c_old = (char *)old_ptr; // Handy name
342
342
// Stupid fast special case
@@ -358,8 +358,8 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail
358
358
359
359
// Oops, got to relocate guts
360
360
void *new_ptr = Amalloc (new_size, alloc_failmode);
361
- if (new_ptr == NULL ) {
362
- return NULL ;
361
+ if (new_ptr == nullptr ) {
362
+ return nullptr ;
363
363
}
364
364
memcpy ( new_ptr, c_old, old_size );
365
365
Afree (c_old,old_size); // Mostly done to keep stats accurate
@@ -369,7 +369,7 @@ void *Arena::Arealloc(void* old_ptr, size_t old_size, size_t new_size, AllocFail
369
369
370
370
// Determine if pointer belongs to this Arena or not.
371
371
bool Arena::contains ( const void *ptr ) const {
372
- if (_chunk == NULL ) return false ;
372
+ if (_chunk == nullptr ) return false ;
373
373
if ( (void *)_chunk->bottom () <= ptr && ptr < (void *)_hwm )
374
374
return true ; // Check for in this chunk
375
375
for (Chunk *c = _first; c; c = c->next ()) {
0 commit comments