|
28 | 28 | #include "runtime/globals.hpp"
|
29 | 29 | #include "utilities/globalDefinitions.hpp"
|
30 | 30 | #include "utilities/macros.hpp"
|
31 |
| -#ifdef COMPILER1 |
32 |
| -#include "c1/c1_globals.hpp" |
33 |
| -#endif |
34 |
| -#ifdef COMPILER2 |
35 |
| -#include "opto/c2_globals.hpp" |
36 |
| -#endif |
37 | 31 |
|
38 | 32 | #include <new>
|
39 | 33 |
|
40 |
| -// The byte alignment to be used by Arena::Amalloc. See bugid 4169348. |
41 |
| -// Note: this value must be a power of 2 |
42 |
| - |
43 |
| -#define ARENA_AMALLOC_ALIGNMENT (2*BytesPerWord) |
44 |
| - |
45 |
| -#define ARENA_ALIGN_M1 (((size_t)(ARENA_AMALLOC_ALIGNMENT)) - 1) |
46 |
| -#define ARENA_ALIGN_MASK (~((size_t)ARENA_ALIGN_M1)) |
47 |
| -#define ARENA_ALIGN(x) ((((size_t)(x)) + ARENA_ALIGN_M1) & ARENA_ALIGN_MASK) |
48 |
| - |
49 | 34 | class AllocFailStrategy {
|
50 | 35 | public:
|
51 | 36 | enum AllocFailEnum { EXIT_OOM, RETURN_NULL };
|
@@ -307,226 +292,15 @@ class MetaspaceObj {
|
307 | 292 |
|
308 | 293 | // Base class for classes that constitute name spaces.
|
309 | 294 |
|
| 295 | +class Arena; |
| 296 | + |
310 | 297 | class AllStatic {
|
311 | 298 | public:
|
312 | 299 | AllStatic() { ShouldNotCallThis(); }
|
313 | 300 | ~AllStatic() { ShouldNotCallThis(); }
|
314 | 301 | };
|
315 | 302 |
|
316 | 303 |
|
317 |
| -//------------------------------Chunk------------------------------------------ |
318 |
| -// Linked list of raw memory chunks |
319 |
| -class Chunk: CHeapObj<mtChunk> { |
320 |
| - friend class VMStructs; |
321 |
| - |
322 |
| - protected: |
323 |
| - Chunk* _next; // Next Chunk in list |
324 |
| - const size_t _len; // Size of this Chunk |
325 |
| - public: |
326 |
| - void* operator new(size_t size, AllocFailType alloc_failmode, size_t length) throw(); |
327 |
| - void operator delete(void* p); |
328 |
| - Chunk(size_t length); |
329 |
| - |
330 |
| - enum { |
331 |
| - // default sizes; make them slightly smaller than 2**k to guard against |
332 |
| - // buddy-system style malloc implementations |
333 |
| -#ifdef _LP64 |
334 |
| - slack = 40, // [RGV] Not sure if this is right, but make it |
335 |
| - // a multiple of 8. |
336 |
| -#else |
337 |
| - slack = 20, // suspected sizeof(Chunk) + internal malloc headers |
338 |
| -#endif |
339 |
| - |
340 |
| - tiny_size = 256 - slack, // Size of first chunk (tiny) |
341 |
| - init_size = 1*K - slack, // Size of first chunk (normal aka small) |
342 |
| - medium_size= 10*K - slack, // Size of medium-sized chunk |
343 |
| - size = 32*K - slack, // Default size of an Arena chunk (following the first) |
344 |
| - non_pool_size = init_size + 32 // An initial size which is not one of above |
345 |
| - }; |
346 |
| - |
347 |
| - void chop(); // Chop this chunk |
348 |
| - void next_chop(); // Chop next chunk |
349 |
| - static size_t aligned_overhead_size(void) { return ARENA_ALIGN(sizeof(Chunk)); } |
350 |
| - static size_t aligned_overhead_size(size_t byte_size) { return ARENA_ALIGN(byte_size); } |
351 |
| - |
352 |
| - size_t length() const { return _len; } |
353 |
| - Chunk* next() const { return _next; } |
354 |
| - void set_next(Chunk* n) { _next = n; } |
355 |
| - // Boundaries of data area (possibly unused) |
356 |
| - char* bottom() const { return ((char*) this) + aligned_overhead_size(); } |
357 |
| - char* top() const { return bottom() + _len; } |
358 |
| - bool contains(char* p) const { return bottom() <= p && p <= top(); } |
359 |
| - |
360 |
| - // Start the chunk_pool cleaner task |
361 |
| - static void start_chunk_pool_cleaner_task(); |
362 |
| - |
363 |
| - static void clean_chunk_pool(); |
364 |
| -}; |
365 |
| - |
366 |
| -//------------------------------Arena------------------------------------------ |
367 |
| -// Fast allocation of memory |
368 |
| -class Arena : public CHeapObj<mtNone> { |
369 |
| -protected: |
370 |
| - friend class ResourceMark; |
371 |
| - friend class HandleMark; |
372 |
| - friend class NoHandleMark; |
373 |
| - friend class VMStructs; |
374 |
| - |
375 |
| - MEMFLAGS _flags; // Memory tracking flags |
376 |
| - |
377 |
| - Chunk *_first; // First chunk |
378 |
| - Chunk *_chunk; // current chunk |
379 |
| - char *_hwm, *_max; // High water mark and max in current chunk |
380 |
| - // Get a new Chunk of at least size x |
381 |
| - void* grow(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); |
382 |
| - size_t _size_in_bytes; // Size of arena (used for native memory tracking) |
383 |
| - |
384 |
| - NOT_PRODUCT(static julong _bytes_allocated;) // total #bytes allocated since start |
385 |
| - friend class AllocStats; |
386 |
| - debug_only(void* malloc(size_t size);) |
387 |
| - debug_only(void* internal_malloc_4(size_t x);) |
388 |
| - NOT_PRODUCT(void inc_bytes_allocated(size_t x);) |
389 |
| - |
390 |
| - void signal_out_of_memory(size_t request, const char* whence) const; |
391 |
| - |
392 |
| - bool check_for_overflow(size_t request, const char* whence, |
393 |
| - AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) const { |
394 |
| - if (UINTPTR_MAX - request < (uintptr_t)_hwm) { |
395 |
| - if (alloc_failmode == AllocFailStrategy::RETURN_NULL) { |
396 |
| - return false; |
397 |
| - } |
398 |
| - signal_out_of_memory(request, whence); |
399 |
| - } |
400 |
| - return true; |
401 |
| - } |
402 |
| - |
403 |
| - public: |
404 |
| - Arena(MEMFLAGS memflag); |
405 |
| - Arena(MEMFLAGS memflag, size_t init_size); |
406 |
| - ~Arena(); |
407 |
| - void destruct_contents(); |
408 |
| - char* hwm() const { return _hwm; } |
409 |
| - |
410 |
| - // new operators |
411 |
| - void* operator new (size_t size) throw(); |
412 |
| - void* operator new (size_t size, const std::nothrow_t& nothrow_constant) throw(); |
413 |
| - |
414 |
| - // dynamic memory type tagging |
415 |
| - void* operator new(size_t size, MEMFLAGS flags) throw(); |
416 |
| - void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw(); |
417 |
| - void operator delete(void* p); |
418 |
| - |
419 |
| - // Fast allocate in the arena. Common case is: pointer test + increment. |
420 |
| - void* Amalloc(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { |
421 |
| - assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); |
422 |
| - x = ARENA_ALIGN(x); |
423 |
| - debug_only(if (UseMallocOnly) return malloc(x);) |
424 |
| - if (!check_for_overflow(x, "Arena::Amalloc", alloc_failmode)) |
425 |
| - return NULL; |
426 |
| - NOT_PRODUCT(inc_bytes_allocated(x);) |
427 |
| - if (_hwm + x > _max) { |
428 |
| - return grow(x, alloc_failmode); |
429 |
| - } else { |
430 |
| - char *old = _hwm; |
431 |
| - _hwm += x; |
432 |
| - return old; |
433 |
| - } |
434 |
| - } |
435 |
| - // Further assume size is padded out to words |
436 |
| - void *Amalloc_4(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { |
437 |
| - assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); |
438 |
| - debug_only(if (UseMallocOnly) return malloc(x);) |
439 |
| - if (!check_for_overflow(x, "Arena::Amalloc_4", alloc_failmode)) |
440 |
| - return NULL; |
441 |
| - NOT_PRODUCT(inc_bytes_allocated(x);) |
442 |
| - if (_hwm + x > _max) { |
443 |
| - return grow(x, alloc_failmode); |
444 |
| - } else { |
445 |
| - char *old = _hwm; |
446 |
| - _hwm += x; |
447 |
| - return old; |
448 |
| - } |
449 |
| - } |
450 |
| - |
451 |
| - // Allocate with 'double' alignment. It is 8 bytes on sparc. |
452 |
| - // In other cases Amalloc_D() should be the same as Amalloc_4(). |
453 |
| - void* Amalloc_D(size_t x, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM) { |
454 |
| - assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); |
455 |
| - debug_only(if (UseMallocOnly) return malloc(x);) |
456 |
| -#if defined(SPARC) && !defined(_LP64) |
457 |
| -#define DALIGN_M1 7 |
458 |
| - size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; |
459 |
| - x += delta; |
460 |
| -#endif |
461 |
| - if (!check_for_overflow(x, "Arena::Amalloc_D", alloc_failmode)) |
462 |
| - return NULL; |
463 |
| - NOT_PRODUCT(inc_bytes_allocated(x);) |
464 |
| - if (_hwm + x > _max) { |
465 |
| - return grow(x, alloc_failmode); // grow() returns a result aligned >= 8 bytes. |
466 |
| - } else { |
467 |
| - char *old = _hwm; |
468 |
| - _hwm += x; |
469 |
| -#if defined(SPARC) && !defined(_LP64) |
470 |
| - old += delta; // align to 8-bytes |
471 |
| -#endif |
472 |
| - return old; |
473 |
| - } |
474 |
| - } |
475 |
| - |
476 |
| - // Fast delete in area. Common case is: NOP (except for storage reclaimed) |
477 |
| - void Afree(void *ptr, size_t size) { |
478 |
| -#ifdef ASSERT |
479 |
| - if (ZapResourceArea) memset(ptr, badResourceValue, size); // zap freed memory |
480 |
| - if (UseMallocOnly) return; |
481 |
| -#endif |
482 |
| - if (((char*)ptr) + size == _hwm) _hwm = (char*)ptr; |
483 |
| - } |
484 |
| - |
485 |
| - void *Arealloc( void *old_ptr, size_t old_size, size_t new_size, |
486 |
| - AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); |
487 |
| - |
488 |
| - // Move contents of this arena into an empty arena |
489 |
| - Arena *move_contents(Arena *empty_arena); |
490 |
| - |
491 |
| - // Determine if pointer belongs to this Arena or not. |
492 |
| - bool contains( const void *ptr ) const; |
493 |
| - |
494 |
| - // Total of all chunks in use (not thread-safe) |
495 |
| - size_t used() const; |
496 |
| - |
497 |
| - // Total # of bytes used |
498 |
| - size_t size_in_bytes() const { return _size_in_bytes; }; |
499 |
| - void set_size_in_bytes(size_t size); |
500 |
| - |
501 |
| - static void free_malloced_objects(Chunk* chunk, char* hwm, char* max, char* hwm2) PRODUCT_RETURN; |
502 |
| - static void free_all(char** start, char** end) PRODUCT_RETURN; |
503 |
| - |
504 |
| -private: |
505 |
| - // Reset this Arena to empty, access will trigger grow if necessary |
506 |
| - void reset(void) { |
507 |
| - _first = _chunk = NULL; |
508 |
| - _hwm = _max = NULL; |
509 |
| - set_size_in_bytes(0); |
510 |
| - } |
511 |
| -}; |
512 |
| - |
513 |
| -// One of the following macros must be used when allocating |
514 |
| -// an array or object from an arena |
515 |
| -#define NEW_ARENA_ARRAY(arena, type, size) \ |
516 |
| - (type*) (arena)->Amalloc((size) * sizeof(type)) |
517 |
| - |
518 |
| -#define REALLOC_ARENA_ARRAY(arena, type, old, old_size, new_size) \ |
519 |
| - (type*) (arena)->Arealloc((char*)(old), (old_size) * sizeof(type), \ |
520 |
| - (new_size) * sizeof(type) ) |
521 |
| - |
522 |
| -#define FREE_ARENA_ARRAY(arena, type, old, size) \ |
523 |
| - (arena)->Afree((char*)(old), (size) * sizeof(type)) |
524 |
| - |
525 |
| -#define NEW_ARENA_OBJ(arena, type) \ |
526 |
| - NEW_ARENA_ARRAY(arena, type, 1) |
527 |
| - |
528 |
| - |
529 |
| -//%note allocation_1 |
530 | 304 | extern char* resource_allocate_bytes(size_t size,
|
531 | 305 | AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM);
|
532 | 306 | extern char* resource_allocate_bytes(Thread* thread, size_t size,
|
@@ -574,17 +348,9 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
|
574 | 348 | void* operator new [](size_t size, const std::nothrow_t& nothrow_constant,
|
575 | 349 | allocation_type type, MEMFLAGS flags) throw();
|
576 | 350 |
|
577 |
| - void* operator new(size_t size, Arena *arena) throw() { |
578 |
| - address res = (address)arena->Amalloc(size); |
579 |
| - DEBUG_ONLY(set_allocation_type(res, ARENA);) |
580 |
| - return res; |
581 |
| - } |
| 351 | + void* operator new(size_t size, Arena *arena) throw(); |
582 | 352 |
|
583 |
| - void* operator new [](size_t size, Arena *arena) throw() { |
584 |
| - address res = (address)arena->Amalloc(size); |
585 |
| - DEBUG_ONLY(set_allocation_type(res, ARENA);) |
586 |
| - return res; |
587 |
| - } |
| 353 | + void* operator new [](size_t size, Arena *arena) throw(); |
588 | 354 |
|
589 | 355 | void* operator new(size_t size) throw() {
|
590 | 356 | address res = (address)resource_allocate_bytes(size);
|
|
0 commit comments