-
Notifications
You must be signed in to change notification settings - Fork 1.7k
/
Copy pathclass_table.h
603 lines (502 loc) · 18.5 KB
/
class_table.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#ifndef RUNTIME_VM_CLASS_TABLE_H_
#define RUNTIME_VM_CLASS_TABLE_H_
#include <memory>
#include <tuple>
#include <utility>
#include "platform/allocation.h"
#include "platform/assert.h"
#include "platform/atomic.h"
#include "platform/utils.h"
#include "vm/bitfield.h"
#include "vm/class_id.h"
#include "vm/flags.h"
#include "vm/globals.h"
#include "vm/tagged_pointer.h"
namespace dart {
class Class;
class ClassTable;
class Isolate;
class IsolateGroup;
class JSONArray;
class JSONObject;
class JSONStream;
template <typename T>
class MallocGrowableArray;
class ObjectPointerVisitor;
class PersistentHandle;
// A 64-bit bitmap describing unboxed fields in a class.
//
// There is a bit for each word in an instance of the class.
//
// Words corresponding to set bits must be ignored by the GC because they
// don't contain pointers. All words beyond the first 64 words of an object
// are expected to contain pointers.
class UnboxedFieldBitmap {
public:
UnboxedFieldBitmap() : bitmap_(0) {}
explicit UnboxedFieldBitmap(uint64_t bitmap) : bitmap_(bitmap) {}
UnboxedFieldBitmap(const UnboxedFieldBitmap&) = default;
UnboxedFieldBitmap& operator=(const UnboxedFieldBitmap&) = default;
DART_FORCE_INLINE bool Get(intptr_t position) const {
if (position >= Length()) return false;
return Utils::TestBit(bitmap_, position);
}
DART_FORCE_INLINE void Set(intptr_t position) {
ASSERT(position < Length());
bitmap_ |= Utils::Bit<decltype(bitmap_)>(position);
}
DART_FORCE_INLINE void Clear(intptr_t position) {
ASSERT(position < Length());
bitmap_ &= ~Utils::Bit<decltype(bitmap_)>(position);
}
DART_FORCE_INLINE uint64_t Value() const { return bitmap_; }
DART_FORCE_INLINE bool IsEmpty() const { return bitmap_ == 0; }
DART_FORCE_INLINE void Reset() { bitmap_ = 0; }
DART_FORCE_INLINE static constexpr intptr_t Length() {
return sizeof(decltype(bitmap_)) * kBitsPerByte;
}
private:
uint64_t bitmap_;
};
// Allocator used to manage memory for ClassTable arrays and ClassTable
// objects themselves.
//
// This allocator provides delayed free functionality: normally class tables
// can't be freed unless all mutator and helper threads are stopped because
// some of these threads might be holding a pointer to a table which we
// want to free. Instead of stopping the world whenever we need to free
// a table (e.g. freeing old table after growing) we delay freeing until an
// occasional GC which will need to stop the world anyway.
class ClassTableAllocator : public ValueObject {
public:
ClassTableAllocator();
~ClassTableAllocator();
// Allocate an array of T with |len| elements.
//
// Does *not* initialize the memory.
template <class T>
inline T* Alloc(intptr_t len) {
return reinterpret_cast<T*>(dart::malloc(len * sizeof(T)));
}
// Allocate a zero initialized array of T with |len| elements.
template <class T>
inline T* AllocZeroInitialized(intptr_t len) {
return reinterpret_cast<T*>(dart::calloc(len, sizeof(T)));
}
// Clone the given |array| with |size| elements.
template <class T>
inline T* Clone(T* array, intptr_t size) {
if (array == nullptr) {
ASSERT(size == 0);
return nullptr;
}
auto result = Alloc<T>(size);
memmove(result, array, size * sizeof(T));
return result;
}
// Copy |size| elements from the given |array| into a new
// array with space for |new_size| elements. Then |Free|
// the original |array|.
//
// |new_size| is expected to be larger than |size|.
template <class T>
inline T* Realloc(T* array, intptr_t size, intptr_t new_size) {
ASSERT(size < new_size);
auto result = AllocZeroInitialized<T>(new_size);
if (size != 0) {
ASSERT(result != nullptr);
memmove(result, array, size * sizeof(T));
}
Free(array);
return result;
}
// Schedule deletion of the given ClassTable.
void Free(ClassTable* table);
// Schedule freeing of the given pointer.
void Free(void* ptr);
// Free all objects which were scheduled by |Free|. Expected to only be
// called on |IsolateGroup| shutdown or when the world is stopped and no
// thread can be using a stale class table pointer.
void FreePending();
private:
typedef void (*Deleter)(void*);
MallocGrowableArray<std::pair<void*, Deleter>>* pending_freed_;
};
// A table with the given |Columns| indexed by class id.
//
// Each column is a continuous array of a the given type. All columns have
// the same number of used elements (|num_cids()|) and the same capacity.
template <typename CidType, typename... Columns>
class CidIndexedTable {
public:
explicit CidIndexedTable(ClassTableAllocator* allocator)
: allocator_(allocator) {}
~CidIndexedTable() {
std::apply([&](auto&... column) { (allocator_->Free(column.load()), ...); },
columns_);
}
CidIndexedTable(const CidIndexedTable& other) = delete;
void SetNumCidsAndCapacity(intptr_t new_num_cids, intptr_t new_capacity) {
columns_ = std::apply(
[&](auto&... column) {
return std::make_tuple(
allocator_->Realloc(column.load(), num_cids_, new_capacity)...);
},
columns_);
capacity_ = new_capacity;
SetNumCids(new_num_cids);
}
void AllocateIndex(intptr_t index, bool* did_grow) {
*did_grow = EnsureCapacity(index);
SetNumCids(Utils::Maximum(num_cids_, index + 1));
}
intptr_t AddRow(bool* did_grow) {
*did_grow = EnsureCapacity(num_cids_);
intptr_t id = num_cids_;
SetNumCids(num_cids_ + 1);
return id;
}
void ShrinkTo(intptr_t new_num_cids) {
ASSERT(new_num_cids <= num_cids_);
num_cids_ = new_num_cids;
}
bool IsValidIndex(intptr_t index) const {
return 0 <= index && index < num_cids_;
}
void CopyFrom(const CidIndexedTable& other) {
ASSERT(allocator_ == other.allocator_);
std::apply([&](auto&... column) { (allocator_->Free(column.load()), ...); },
columns_);
columns_ = std::apply(
[&](auto&... column) {
return std::make_tuple(
allocator_->Clone(column.load(), other.num_cids_)...);
},
other.columns_);
capacity_ = num_cids_ = other.num_cids_;
}
void Remap(intptr_t* old_to_new_cid) {
CidIndexedTable clone(allocator_);
clone.CopyFrom(*this);
RemapAllColumns(clone, old_to_new_cid,
std::index_sequence_for<Columns...>{});
}
template <
intptr_t kColumnIndex,
typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
T* GetColumn() {
return std::get<kColumnIndex>(columns_).load();
}
template <
intptr_t kColumnIndex,
typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
const T* GetColumn() const {
return std::get<kColumnIndex>(columns_).load();
}
template <
intptr_t kColumnIndex,
typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
T& At(intptr_t index) {
ASSERT(IsValidIndex(index));
return GetColumn<kColumnIndex>()[index];
}
template <
intptr_t kColumnIndex,
typename T = std::tuple_element_t<kColumnIndex, std::tuple<Columns...>>>
const T& At(intptr_t index) const {
ASSERT(IsValidIndex(index));
return GetColumn<kColumnIndex>()[index];
}
intptr_t num_cids() const { return num_cids_; }
intptr_t capacity() const { return capacity_; }
private:
friend class ClassTable;
// Wrapper around AcqRelAtomic<T*> which makes it assignable and copyable
// so that we could put it inside an std::tuple.
template <typename T>
struct Ptr {
Ptr() : ptr(nullptr) {}
Ptr(T* ptr) : ptr(ptr) {} // NOLINT
Ptr(const Ptr& other) { ptr.store(other.ptr.load()); }
Ptr& operator=(const Ptr& other) {
ptr.store(other.load());
return *this;
}
T* load() const { return ptr.load(); }
AcqRelAtomic<T*> ptr = {nullptr};
};
void SetNumCids(intptr_t new_num_cids) {
if (new_num_cids > kClassIdTagMax) {
FATAL("Too many classes");
}
num_cids_ = new_num_cids;
}
bool EnsureCapacity(intptr_t index) {
if (index >= capacity_) {
SetNumCidsAndCapacity(num_cids_, index + kCapacityIncrement);
return true;
}
return false;
}
template <intptr_t kColumnIndex>
void RemapColumn(const CidIndexedTable& old, intptr_t* old_to_new_cid) {
auto new_column = GetColumn<kColumnIndex>();
auto old_column = old.GetColumn<kColumnIndex>();
for (intptr_t i = 0; i < num_cids_; i++) {
new_column[old_to_new_cid[i]] = old_column[i];
}
}
template <std::size_t... Is>
void RemapAllColumns(const CidIndexedTable& old,
intptr_t* old_to_new_cid,
std::index_sequence<Is...>) {
(RemapColumn<Is>(old, old_to_new_cid), ...);
}
static constexpr intptr_t kCapacityIncrement = 256;
ClassTableAllocator* allocator_;
intptr_t num_cids_ = 0;
intptr_t capacity_ = 0;
std::tuple<Ptr<Columns>...> columns_;
};
// Registry of all known classes.
//
// The GC will only use information about instance size and unboxed field maps
// to scan instances and will not access class objects themselves. This
// information is stored in separate columns of the |classes_| table.
//
// # Concurrency & atomicity
//
// This table is read concurrently without locking (e.g. by GC threads) so
// there are some invariants that need to be observed when working with it.
//
// * When table is updated (e.g. when the table is grown or a new class is
// registered in a table) there must be a release barrier after the update.
// Such barrier will ensure that stores which populate the table are not
// reordered past the store which exposes the new grown table or exposes
// a new class id;
// * Old versions of the table can only be freed when the world is stopped:
// no mutator and no helper threads are running. To avoid freeing a table
// which some other thread is reading from.
//
// Note that torn reads are not a concern (e.g. it is fine to use
// memmove to copy class table contents) as long as an appropriate
// barrier is issued before the copy of the table can be observed.
//
// # Hot reload
//
// Each IsolateGroup contains two ClassTable fields: |class_table| and
// |heap_walk_class_table|. GC visitors use the second field to get ClassTable
// instance which they will use for visiting pointers inside instances in
// the heap. Usually these two fields will be pointing to the same table,
// except when IsolateGroup is in the middle of reload.
//
// When reloading |class_table| will be pointing to a copy of the original
// table. Kernel loading will be modifying this table, while GC
// workers can continue using original table still available through
// |heap_walk_class_table|. If hot reload succeeds, |heap_walk_class_table|
// will be dropped and |class_table| will become the source of truth. Otherwise,
// original table will be restored from |heap_walk_class_table|.
//
// See IsolateGroup methods CloneClassTableForReload, RestoreOriginalClassTable,
// DropOriginalClassTable.
class ClassTable : public MallocAllocated {
public:
explicit ClassTable(ClassTableAllocator* allocator);
~ClassTable();
ClassTable* Clone() const { return new ClassTable(*this); }
ClassPtr At(intptr_t cid) const {
if (IsTopLevelCid(cid)) {
return top_level_classes_.At<kClassIndex>(IndexFromTopLevelCid(cid));
}
return classes_.At<kClassIndex>(cid);
}
int32_t SizeAt(intptr_t index) const {
if (IsTopLevelCid(index)) {
return 0;
}
return classes_.At<kSizeIndex>(index);
}
void SetAt(intptr_t index, ClassPtr raw_cls);
void UpdateClassSize(intptr_t cid, ClassPtr raw_cls);
bool IsValidIndex(intptr_t cid) const {
if (IsTopLevelCid(cid)) {
return top_level_classes_.IsValidIndex(IndexFromTopLevelCid(cid));
}
return classes_.IsValidIndex(cid);
}
bool HasValidClassAt(intptr_t cid) const { return At(cid) != nullptr; }
UnboxedFieldBitmap GetUnboxedFieldsMapAt(intptr_t cid) const {
ASSERT(IsValidIndex(cid));
return classes_.At<kUnboxedFieldBitmapIndex>(cid);
}
void SetUnboxedFieldsMapAt(intptr_t cid, UnboxedFieldBitmap map) {
ASSERT(IsValidIndex(cid));
classes_.At<kUnboxedFieldBitmapIndex>(cid) = map;
}
#if !defined(PRODUCT)
bool ShouldTraceAllocationFor(intptr_t cid) {
return !IsTopLevelCid(cid) &&
(classes_.At<kAllocationTracingStateIndex>(cid) != kTracingDisabled);
}
void SetTraceAllocationFor(intptr_t cid, bool trace) {
classes_.At<kAllocationTracingStateIndex>(cid) =
trace ? kTraceAllocationBit : kTracingDisabled;
}
void SetCollectInstancesFor(intptr_t cid, bool trace) {
auto& slot = classes_.At<kAllocationTracingStateIndex>(cid);
if (trace) {
slot |= kCollectInstancesBit;
} else {
slot &= ~kCollectInstancesBit;
}
}
bool CollectInstancesFor(intptr_t cid) {
auto& slot = classes_.At<kAllocationTracingStateIndex>(cid);
return (slot & kCollectInstancesBit) != 0;
}
void UpdateCachedAllocationTracingStateTablePointer() {
cached_allocation_tracing_state_table_.store(
classes_.GetColumn<kAllocationTracingStateIndex>());
}
#else
void UpdateCachedAllocationTracingStateTablePointer() {}
#endif // !defined(PRODUCT)
#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
void PopulateUserVisibleNames();
const char* UserVisibleNameFor(intptr_t cid) {
if (!classes_.IsValidIndex(cid)) {
return nullptr;
}
return classes_.At<kClassNameIndex>(cid);
}
void SetUserVisibleNameFor(intptr_t cid, const char* name) {
ASSERT(classes_.At<kClassNameIndex>(cid) == nullptr);
classes_.At<kClassNameIndex>(cid) = name;
}
#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
intptr_t NumCids() const { return classes_.num_cids(); }
intptr_t Capacity() const { return classes_.capacity(); }
intptr_t NumTopLevelCids() const { return top_level_classes_.num_cids(); }
void Register(const Class& cls);
void AllocateIndex(intptr_t index);
void RegisterTopLevel(const Class& cls);
void UnregisterTopLevel(intptr_t index);
void Remap(intptr_t* old_to_new_cids);
void VisitObjectPointers(ObjectPointerVisitor* visitor);
// If a snapshot reader has populated the class table then the
// sizes in the class table are not correct. Iterates through the
// table, updating the sizes.
void CopySizesFromClassObjects();
void Validate();
void Print();
#if defined(DART_PRECOMPILER)
void PrintObjectLayout(const char* filename);
#endif
#ifndef PRODUCT
// Describes layout of heap stats for code generation. See offset_extractor.cc
struct ArrayTraits {
static intptr_t elements_start_offset() { return 0; }
static constexpr intptr_t kElementSize = sizeof(uint8_t);
};
static intptr_t allocation_tracing_state_table_offset() {
static_assert(sizeof(cached_allocation_tracing_state_table_) == kWordSize);
return OFFSET_OF(ClassTable, cached_allocation_tracing_state_table_);
}
void AllocationProfilePrintJSON(JSONStream* stream, bool internal);
void PrintToJSONObject(JSONObject* object);
#endif // !PRODUCT
// Deallocates table copies. Do not call during concurrent access to table.
void FreeOldTables();
static bool IsTopLevelCid(intptr_t cid) { return cid >= kTopLevelCidOffset; }
static intptr_t IndexFromTopLevelCid(intptr_t cid) {
ASSERT(IsTopLevelCid(cid));
return cid - kTopLevelCidOffset;
}
static intptr_t CidFromTopLevelIndex(intptr_t index) {
return kTopLevelCidOffset + index;
}
private:
friend class ClassTableAllocator;
friend class Dart;
friend Isolate* CreateWithinExistingIsolateGroup(IsolateGroup* group,
const char* name,
char** error);
friend class IsolateGroup; // for table()
static constexpr int kInitialCapacity = 512;
static constexpr intptr_t kTopLevelCidOffset = kClassIdTagMax + 1;
ClassTable(const ClassTable& original)
: allocator_(original.allocator_),
classes_(original.allocator_),
top_level_classes_(original.allocator_) {
classes_.CopyFrom(original.classes_);
#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
// Copying classes_ doesn't perform a deep copy. Ensure we duplicate
// the class names to avoid double free crashes at shutdown.
for (intptr_t cid = 1; cid < classes_.num_cids(); ++cid) {
if (classes_.IsValidIndex(cid)) {
const char* cls_name = classes_.At<kClassNameIndex>(cid);
if (cls_name != nullptr) {
classes_.At<kClassNameIndex>(cid) = Utils::StrDup(cls_name);
}
}
}
#endif // !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
top_level_classes_.CopyFrom(original.top_level_classes_);
UpdateCachedAllocationTracingStateTablePointer();
}
void AllocateTopLevelIndex(intptr_t index);
ClassPtr* table() { return classes_.GetColumn<kClassIndex>(); }
// Used to drop recently added classes.
void SetNumCids(intptr_t num_cids, intptr_t num_tlc_cids) {
classes_.ShrinkTo(num_cids);
top_level_classes_.ShrinkTo(num_tlc_cids);
}
ClassTableAllocator* allocator_;
// Unfortunately std::tuple used by CidIndexedTable does not have a stable
// layout so we can't refer to its elements from generated code.
NOT_IN_PRODUCT(AcqRelAtomic<uint8_t*> cached_allocation_tracing_state_table_ =
{nullptr});
enum {
kClassIndex = 0,
kSizeIndex,
kUnboxedFieldBitmapIndex,
#if !defined(PRODUCT)
kAllocationTracingStateIndex,
#endif
#if !defined(PRODUCT) || defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
kClassNameIndex,
#endif
};
#if !defined(PRODUCT)
CidIndexedTable<ClassIdTagType,
ClassPtr,
uint32_t,
UnboxedFieldBitmap,
uint8_t,
const char*>
classes_;
#elif defined(FORCE_INCLUDE_SAMPLING_HEAP_PROFILER)
CidIndexedTable<ClassIdTagType,
ClassPtr,
uint32_t,
UnboxedFieldBitmap,
const char*>
classes_;
#else
CidIndexedTable<ClassIdTagType, ClassPtr, uint32_t, UnboxedFieldBitmap>
classes_;
#endif
#ifndef PRODUCT
enum {
kTracingDisabled = 0,
kTraceAllocationBit = (1 << 0),
kCollectInstancesBit = (1 << 1),
};
#endif // !PRODUCT
CidIndexedTable<classid_t, ClassPtr> top_level_classes_;
};
} // namespace dart
#endif // RUNTIME_VM_CLASS_TABLE_H_