-
Notifications
You must be signed in to change notification settings - Fork 10.3k
/
RefCount.h
1590 lines (1309 loc) · 52.6 KB
/
RefCount.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
//===--- RefCount.h ---------------------------------------------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_STDLIB_SHIMS_REFCOUNT_H
#define SWIFT_STDLIB_SHIMS_REFCOUNT_H
#include "Visibility.h"
#include "SwiftStdint.h"
#if !defined(__cplusplus)
// These definitions are placeholders for importing into Swift.
// They provide size and alignment but cannot be manipulated safely there.
typedef struct {
_Alignas(__swift_uintptr_t) __swift_uint32_t refCounts1 SWIFT_ATTRIBUTE_UNAVAILABLE;
__swift_uint32_t refCounts2 SWIFT_ATTRIBUTE_UNAVAILABLE;
} InlineRefCounts;
// not __cplusplus
#else
// __cplusplus
#include <type_traits>
#include <atomic>
#include <stddef.h>
#include <stdint.h>
#include <assert.h>
#include "llvm/Support/Compiler.h"
#include "swift/Basic/type_traits.h"
#include "swift/Runtime/Config.h"
#include "swift/Runtime/Debug.h"
// FIXME: Workaround for rdar://problem/18889711. 'Consume' does not require
// a barrier on ARM64, but LLVM doesn't know that. Although 'relaxed'
// is formally UB by C++11 language rules, we should be OK because neither
// the processor model nor the optimizer can realistically reorder our uses
// of 'consume'.
#if __arm64__ || __arm__
# define SWIFT_MEMORY_ORDER_CONSUME (std::memory_order_relaxed)
#else
# define SWIFT_MEMORY_ORDER_CONSUME (std::memory_order_consume)
#endif
/*
An object conceptually has three refcounts. These refcounts
are stored either "inline" in the field following the isa
or in a "side table entry" pointed to by the field following the isa.
The strong RC counts strong references to the object. When the strong RC
reaches zero the object is deinited, unowned reference reads become errors,
and weak reference reads become nil. The strong RC is stored as an extra
count: when the physical field is 0 the logical value is 1.
The unowned RC counts unowned references to the object. The unowned RC
also has an extra +1 on behalf of the strong references; this +1 is
decremented after deinit completes. When the unowned RC reaches zero
the object's allocation is freed.
The weak RC counts weak references to the object. The weak RC also has an
extra +1 on behalf of the unowned references; this +1 is decremented
after the object's allocation is freed. When the weak RC reaches zero
the object's side table entry is freed.
Objects initially start with no side table. They can gain a side table when:
* a weak reference is formed
and pending future implementation:
* strong RC or unowned RC overflows (inline RCs will be small on 32-bit)
* associated object storage is needed on an object
* etc
Gaining a side table entry is a one-way operation; an object with a side
table entry never loses it. This prevents some thread races.
Strong and unowned variables point at the object.
Weak variables point at the object's side table.
Storage layout:
HeapObject {
isa
InlineRefCounts {
atomic<InlineRefCountBits> {
strong RC + unowned RC + flags
OR
HeapObjectSideTableEntry*
}
}
}
HeapObjectSideTableEntry {
SideTableRefCounts {
object pointer
atomic<SideTableRefCountBits> {
strong RC + unowned RC + weak RC + flags
}
}
}
InlineRefCounts and SideTableRefCounts share some implementation
via RefCounts<T>.
InlineRefCountBits and SideTableRefCountBits share some implementation
via RefCountBitsT<bool>.
In general: The InlineRefCounts implementation tries to perform the
operation inline. If the object has a side table it calls the
HeapObjectSideTableEntry implementation which in turn calls the
SideTableRefCounts implementation.
Downside: this code is a bit twisted.
Upside: this code has less duplication than it might otherwise
Object lifecycle state machine:
LIVE without side table
The object is alive.
Object's refcounts are initialized as 1 strong, 1 unowned, 1 weak.
No side table. No weak RC storage.
Strong variable operations work normally.
Unowned variable operations work normally.
Weak variable load can't happen.
Weak variable store adds the side table, becoming LIVE with side table.
When the strong RC reaches zero deinit() is called and the object
becomes DEINITING.
LIVE with side table
Weak variable operations work normally.
Everything else is the same as LIVE.
DEINITING without side table
deinit() is in progress on the object.
Strong variable operations have no effect.
Unowned variable load halts in swift_abortRetainUnowned().
Unowned variable store works normally.
Weak variable load can't happen.
Weak variable store stores nil.
When deinit() completes, the generated code calls swift_deallocObject.
swift_deallocObject calls canBeFreedNow() checking for the fast path
of no weak or unowned references.
If canBeFreedNow() the object is freed and it becomes DEAD.
Otherwise, it decrements the unowned RC and the object becomes DEINITED.
DEINITING with side table
Weak variable load returns nil.
Weak variable store stores nil.
canBeFreedNow() is always false, so it never transitions directly to DEAD.
Everything else is the same as DEINITING.
DEINITED without side table
deinit() has completed but there are unowned references outstanding.
Strong variable operations can't happen.
Unowned variable store can't happen.
Unowned variable load halts in swift_abortRetainUnowned().
Weak variable operations can't happen.
When the unowned RC reaches zero, the object is freed and it becomes DEAD.
DEINITED with side table
Weak variable load returns nil.
Weak variable store can't happen.
When the unowned RC reaches zero, the object is freed, the weak RC is
decremented, and the object becomes FREED.
Everything else is the same as DEINITED.
FREED without side table
This state never happens.
FREED with side table
The object is freed but there are weak refs to the side table outstanding.
Strong variable operations can't happen.
Unowned variable operations can't happen.
Weak variable load returns nil.
Weak variable store can't happen.
When the weak RC reaches zero, the side table entry is freed and
the object becomes DEAD.
DEAD
The object and its side table are gone.
*/
namespace swift {
struct HeapObject;
class HeapObjectSideTableEntry;
}
// FIXME: HACK: copied from HeapObject.cpp
extern "C" LLVM_LIBRARY_VISIBILITY LLVM_ATTRIBUTE_NOINLINE LLVM_ATTRIBUTE_USED
void _swift_release_dealloc(swift::HeapObject *object)
SWIFT_CC(RegisterPreservingCC_IMPL);
namespace swift {
// RefCountIsInline: refcount stored in an object
// RefCountNotInline: refcount stored in an object's side table entry
enum RefCountInlinedness { RefCountNotInline = false, RefCountIsInline = true };
enum ClearPinnedFlag { DontClearPinnedFlag = false, DoClearPinnedFlag = true };
enum PerformDeinit { DontPerformDeinit = false, DoPerformDeinit = true };
// Raw storage of refcount bits, depending on pointer size and inlinedness.
// 32-bit inline refcount is 32-bits. All others are 64-bits.
template <RefCountInlinedness refcountIsInline, size_t sizeofPointer>
struct RefCountBitsInt;
// 64-bit inline
// 64-bit out of line
template <RefCountInlinedness refcountIsInline>
struct RefCountBitsInt<refcountIsInline, 8> {
typedef uint64_t Type;
typedef int64_t SignedType;
};
// 32-bit out of line
template <>
struct RefCountBitsInt<RefCountNotInline, 4> {
typedef uint64_t Type;
typedef int64_t SignedType;
};
// 32-bit inline
template <>
struct RefCountBitsInt<RefCountIsInline, 4> {
typedef uint32_t Type;
typedef int32_t SignedType;
};
// Layout of refcount bits.
// field value = (bits & mask) >> shift
// FIXME: redo this abstraction more cleanly
# define maskForField(name) (((uint64_t(1)<<name##BitCount)-1) << name##Shift)
# define shiftAfterField(name) (name##Shift + name##BitCount)
template <size_t sizeofPointer>
struct RefCountBitOffsets;
// 64-bit inline
// 64-bit out of line
// 32-bit out of line
template <>
struct RefCountBitOffsets<8> {
static const size_t IsPinnedShift = 0;
static const size_t IsPinnedBitCount = 1;
static const uint64_t IsPinnedMask = maskForField(IsPinned);
static const size_t UnownedRefCountShift = shiftAfterField(IsPinned);
static const size_t UnownedRefCountBitCount = 31;
static const uint64_t UnownedRefCountMask = maskForField(UnownedRefCount);
static const size_t IsDeinitingShift = shiftAfterField(UnownedRefCount);
static const size_t IsDeinitingBitCount = 1;
static const uint64_t IsDeinitingMask = maskForField(IsDeiniting);
static const size_t StrongExtraRefCountShift = shiftAfterField(IsDeiniting);
static const size_t StrongExtraRefCountBitCount = 30;
static const uint64_t StrongExtraRefCountMask = maskForField(StrongExtraRefCount);
static const size_t UseSlowRCShift = shiftAfterField(StrongExtraRefCount);
static const size_t UseSlowRCBitCount = 1;
static const uint64_t UseSlowRCMask = maskForField(UseSlowRC);
static const size_t SideTableShift = 0;
static const size_t SideTableBitCount = 62;
static const uint64_t SideTableMask = maskForField(SideTable);
static const size_t SideTableUnusedLowBits = 3;
static const size_t SideTableMarkShift = SideTableBitCount;
static const size_t SideTableMarkBitCount = 1;
static const uint64_t SideTableMarkMask = maskForField(SideTableMark);
};
// 32-bit inline
template <>
struct RefCountBitOffsets<4> {
static const size_t IsPinnedShift = 0;
static const size_t IsPinnedBitCount = 1;
static const uint32_t IsPinnedMask = maskForField(IsPinned);
static const size_t UnownedRefCountShift = shiftAfterField(IsPinned);
static const size_t UnownedRefCountBitCount = 7;
static const uint32_t UnownedRefCountMask = maskForField(UnownedRefCount);
static const size_t IsDeinitingShift = shiftAfterField(UnownedRefCount);
static const size_t IsDeinitingBitCount = 1;
static const uint32_t IsDeinitingMask = maskForField(IsDeiniting);
static const size_t StrongExtraRefCountShift = shiftAfterField(IsDeiniting);
static const size_t StrongExtraRefCountBitCount = 22;
static const uint32_t StrongExtraRefCountMask = maskForField(StrongExtraRefCount);
static const size_t UseSlowRCShift = shiftAfterField(StrongExtraRefCount);
static const size_t UseSlowRCBitCount = 1;
static const uint32_t UseSlowRCMask = maskForField(UseSlowRC);
static const size_t SideTableShift = 0;
static const size_t SideTableBitCount = 30;
static const uint32_t SideTableMask = maskForField(SideTable);
static const size_t SideTableUnusedLowBits = 2;
static const size_t SideTableMarkShift = SideTableBitCount;
static const size_t SideTableMarkBitCount = 1;
static const uint32_t SideTableMarkMask = maskForField(SideTableMark);
};
// FIXME: reinstate these assertions
#if 0
static_assert(StrongExtraRefCountShift == IsDeinitingShift + 1,
"IsDeiniting must be LSB-wards of StrongExtraRefCount");
static_assert(UseSlowRCShift + UseSlowRCBitCount == sizeof(bits)*8,
"UseSlowRC must be MSB");
static_assert(SideTableBitCount + SideTableMarkBitCount +
UseSlowRCBitCount == sizeof(bits)*8,
"wrong bit count for RefCountBits side table encoding");
static_assert(UnownedRefCountBitCount + IsPinnedBitCount +
IsDeinitingBitCount + StrongExtraRefCountBitCount +
UseSlowRCBitCount == sizeof(bits)*8,
"wrong bit count for RefCountBits refcount encoding");
#endif
// Basic encoding of refcount and flag data into the object's header.
template <RefCountInlinedness refcountIsInline>
class RefCountBitsT {
friend class RefCountBitsT<RefCountIsInline>;
friend class RefCountBitsT<RefCountNotInline>;
static const RefCountInlinedness Inlinedness = refcountIsInline;
typedef typename RefCountBitsInt<refcountIsInline, sizeof(void*)>::Type
BitsType;
typedef typename RefCountBitsInt<refcountIsInline, sizeof(void*)>::SignedType
SignedBitsType;
typedef RefCountBitOffsets<sizeof(BitsType)>
Offsets;
BitsType bits;
// "Bitfield" accessors.
# define getFieldIn(bits, offsets, name) \
((bits & offsets::name##Mask) >> offsets::name##Shift)
# define setFieldIn(bits, offsets, name, val) \
bits = ((bits & ~offsets::name##Mask) | \
(((BitsType(val) << offsets::name##Shift) & offsets::name##Mask)))
# define getField(name) getFieldIn(bits, Offsets, name)
# define setField(name, val) setFieldIn(bits, Offsets, name, val)
# define copyFieldFrom(src, name) \
setFieldIn(bits, Offsets, name, \
getFieldIn(src.bits, decltype(src)::Offsets, name))
// RefCountBits uses always_inline everywhere
// to improve performance of debug builds.
private:
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool getUseSlowRC() const {
return bool(getField(UseSlowRC));
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void setUseSlowRC(bool value) {
setField(UseSlowRC, value);
}
// Returns true if the decrement is a fast-path result.
// Returns false if the decrement should fall back to some slow path
// (for example, because UseSlowRC is set
// or because the refcount is now zero and should deinit).
template <ClearPinnedFlag clearPinnedFlag>
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE
bool doDecrementStrongExtraRefCount(uint32_t dec) {
#ifndef NDEBUG
if (!hasSideTable()) {
// Can't check these assertions with side table present.
// clearPinnedFlag assumes the flag is already set.
if (clearPinnedFlag)
assert(getIsPinned() && "unpinning reference that was not pinned");
if (getIsDeiniting())
assert(getStrongExtraRefCount() >= dec &&
"releasing reference whose refcount is already zero");
else
assert(getStrongExtraRefCount() + 1 >= dec &&
"releasing reference whose refcount is already zero");
}
#endif
BitsType unpin = (clearPinnedFlag
? (BitsType(1) << Offsets::IsPinnedShift)
: 0);
// This deliberately underflows by borrowing from the UseSlowRC field.
bits -= unpin + (BitsType(dec) << Offsets::StrongExtraRefCountShift);
return (SignedBitsType(bits) >= 0);
}
public:
LLVM_ATTRIBUTE_ALWAYS_INLINE
RefCountBitsT() = default;
LLVM_ATTRIBUTE_ALWAYS_INLINE
constexpr
RefCountBitsT(uint32_t strongExtraCount, uint32_t unownedCount)
: bits((BitsType(strongExtraCount) << Offsets::StrongExtraRefCountShift) |
(BitsType(unownedCount) << Offsets::UnownedRefCountShift))
{ }
LLVM_ATTRIBUTE_ALWAYS_INLINE
RefCountBitsT(HeapObjectSideTableEntry* side)
: bits((reinterpret_cast<BitsType>(side) >> Offsets::SideTableUnusedLowBits)
| (BitsType(1) << Offsets::UseSlowRCShift)
| (BitsType(1) << Offsets::SideTableMarkShift))
{
assert(refcountIsInline);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
RefCountBitsT(const RefCountBitsT<RefCountIsInline> *newbitsPtr) {
bits = 0;
assert(newbitsPtr && "expected non null newbits");
RefCountBitsT<RefCountIsInline> newbits = *newbitsPtr;
if (refcountIsInline || sizeof(newbits) == sizeof(*this)) {
// this and newbits are both inline
// OR this is out-of-line but the same layout as inline.
// (FIXME: use something cleaner than sizeof for same-layout test)
// Copy the bits directly.
bits = newbits.bits;
}
else {
// this is out-of-line and not the same layout as inline newbits.
// Copy field-by-field.
copyFieldFrom(newbits, UnownedRefCount);
copyFieldFrom(newbits, IsPinned);
copyFieldFrom(newbits, IsDeiniting);
copyFieldFrom(newbits, StrongExtraRefCount);
copyFieldFrom(newbits, UseSlowRC);
}
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool hasSideTable() const {
// FIXME: change this when introducing immutable RC objects
bool hasSide = getUseSlowRC();
// Side table refcount must not point to another side table.
assert((refcountIsInline || !hasSide) &&
"side table refcount must not have a side table entry of its own");
return hasSide;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
HeapObjectSideTableEntry *getSideTable() const {
assert(hasSideTable());
// Stored value is a shifted pointer.
return reinterpret_cast<HeapObjectSideTableEntry *>
(uintptr_t(getField(SideTable)) << Offsets::SideTableUnusedLowBits);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
uint32_t getUnownedRefCount() const {
assert(!hasSideTable());
return uint32_t(getField(UnownedRefCount));
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool getIsPinned() const {
assert(!hasSideTable());
return bool(getField(IsPinned));
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool getIsDeiniting() const {
assert(!hasSideTable());
return bool(getField(IsDeiniting));
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
uint32_t getStrongExtraRefCount() const {
assert(!hasSideTable());
return uint32_t(getField(StrongExtraRefCount));
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void setHasSideTable(bool value) {
bits = 0;
setUseSlowRC(value);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void setSideTable(HeapObjectSideTableEntry *side) {
assert(hasSideTable());
// Stored value is a shifted pointer.
uintptr_t value = reinterpret_cast<uintptr_t>(side);
uintptr_t storedValue = value >> Offsets::SideTableUnusedLowBits;
assert(storedValue << Offsets::SideTableUnusedLowBits == value);
setField(SideTable, storedValue);
setField(SideTableMark, 1);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void setUnownedRefCount(uint32_t value) {
assert(!hasSideTable());
setField(UnownedRefCount, value);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void setIsPinned(bool value) {
assert(!hasSideTable());
setField(IsPinned, value);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void setIsDeiniting(bool value) {
assert(!hasSideTable());
setField(IsDeiniting, value);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void setStrongExtraRefCount(uint32_t value) {
assert(!hasSideTable());
setField(StrongExtraRefCount, value);
}
// Returns true if the increment is a fast-path result.
// Returns false if the increment should fall back to some slow path
// (for example, because UseSlowRC is set or because the refcount overflowed).
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE
bool incrementStrongExtraRefCount(uint32_t inc) {
// This deliberately overflows into the UseSlowRC field.
bits += BitsType(inc) << Offsets::StrongExtraRefCountShift;
return (SignedBitsType(bits) >= 0);
}
// FIXME: I don't understand why I can't make clearPinned a template argument
// (compiler balks at calls from class RefCounts that way)
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE
bool decrementStrongExtraRefCount(uint32_t dec, bool clearPinned = false) {
if (clearPinned)
return doDecrementStrongExtraRefCount<DoClearPinnedFlag>(dec);
else
return doDecrementStrongExtraRefCount<DontClearPinnedFlag>(dec);
}
// Returns the old reference count before the increment.
LLVM_ATTRIBUTE_ALWAYS_INLINE
uint32_t incrementUnownedRefCount(uint32_t inc) {
uint32_t old = getUnownedRefCount();
setUnownedRefCount(old + inc);
return old;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void decrementUnownedRefCount(uint32_t dec) {
setUnownedRefCount(getUnownedRefCount() - dec);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool isUniquelyReferenced() {
static_assert(Offsets::IsPinnedBitCount +
Offsets::UnownedRefCountBitCount +
Offsets::IsDeinitingBitCount +
Offsets::StrongExtraRefCountBitCount +
Offsets::UseSlowRCBitCount == sizeof(bits)*8,
"inspect isUniquelyReferenced after adding fields");
// isPinned: don't care
// Unowned: don't care (FIXME: should care and redo initForNotFreeing)
// IsDeiniting: false
// StrongExtra: 0
// UseSlowRC: false
// Compiler is clever enough to optimize this.
return
!getUseSlowRC() && !getIsDeiniting() && getStrongExtraRefCount() == 0;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool isUniquelyReferencedOrPinned() {
static_assert(Offsets::IsPinnedBitCount +
Offsets::UnownedRefCountBitCount +
Offsets::IsDeinitingBitCount +
Offsets::StrongExtraRefCountBitCount +
Offsets::UseSlowRCBitCount == sizeof(bits)*8,
"inspect isUniquelyReferencedOrPinned after adding fields");
// isPinned: don't care
// Unowned: don't care (FIXME: should care and redo initForNotFreeing)
// IsDeiniting: false
// isPinned/StrongExtra: true/any OR false/0
// UseSlowRC: false
// Compiler is not clever enough to optimize this.
// return (isUniquelyReferenced() ||
// (!getUseSlowRC() && !getIsDeiniting() && getIsPinned()));
// Bit twiddling solution:
// 1. Define the fields in this order:
// bits that must be zero when not pinned | bits to ignore | IsPinned
// 2. Rotate IsPinned into the sign bit:
// IsPinned | bits that must be zero when not pinned | bits to ignore
// 3. Perform a signed comparison against X = (1 << count of ignored bits).
// IsPinned makes the value negative and thus less than X.
// Zero in the must-be-zero bits makes the value less than X.
// Non-zero and not pinned makes the value greater or equal to X.
// Count the ignored fields.
constexpr auto ignoredBitsCount =
Offsets::UnownedRefCountBitCount + Offsets::IsDeinitingBitCount;
// Make sure all fields are positioned as expected.
// -1 compensates for the rotation.
static_assert(Offsets::IsPinnedShift == 0, "IsPinned must be the LSB bit");
static_assert(
shiftAfterField(Offsets::UnownedRefCount)-1 <= ignoredBitsCount &&
shiftAfterField(Offsets::IsDeiniting)-1 <= ignoredBitsCount &&
Offsets::StrongExtraRefCountShift-1 >= ignoredBitsCount &&
Offsets::UseSlowRCShift-1 >= ignoredBitsCount,
"refcount bit layout incorrect for isUniquelyReferencedOrPinned");
BitsType X = BitsType(1) << ignoredBitsCount;
BitsType rotatedBits = ((bits >> 1) | (bits << (8*sizeof(bits) - 1)));
return SignedBitsType(rotatedBits) < SignedBitsType(X);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
BitsType getBitsValue() {
return bits;
}
# undef getFieldIn
# undef setFieldIn
# undef getField
# undef setField
# undef copyFieldFrom
};
# undef maskForField
# undef shiftAfterField
typedef RefCountBitsT<RefCountIsInline> InlineRefCountBits;
class SideTableRefCountBits : public RefCountBitsT<RefCountNotInline>
{
uint32_t weakBits;
public:
LLVM_ATTRIBUTE_ALWAYS_INLINE
SideTableRefCountBits() = default;
LLVM_ATTRIBUTE_ALWAYS_INLINE
constexpr
SideTableRefCountBits(uint32_t strongExtraCount, uint32_t unownedCount)
: RefCountBitsT<RefCountNotInline>(strongExtraCount, unownedCount)
// weak refcount starts at 1 on behalf of the unowned count
, weakBits(1)
{ }
LLVM_ATTRIBUTE_ALWAYS_INLINE
SideTableRefCountBits(HeapObjectSideTableEntry* side) = delete;
LLVM_ATTRIBUTE_ALWAYS_INLINE
SideTableRefCountBits(InlineRefCountBits newbits)
: RefCountBitsT<RefCountNotInline>(&newbits), weakBits(1)
{ }
LLVM_ATTRIBUTE_ALWAYS_INLINE
void incrementWeakRefCount() {
weakBits++;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool decrementWeakRefCount() {
assert(weakBits > 0);
weakBits--;
return weakBits == 0;
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
uint32_t getWeakRefCount() {
return weakBits;
}
// Side table ref count never has a side table of its own.
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool hasSideTable() {
return false;
}
};
// Barriers
//
// Strong refcount increment is unordered with respect to other memory locations
//
// Strong refcount decrement is a release operation with respect to other
// memory locations. When an object's reference count becomes zero,
// an acquire fence is performed before beginning Swift deinit or ObjC
// -dealloc code. This ensures that the deinit code sees all modifications
// of the object's contents that were made before the object was released.
//
// Unowned and weak increment and decrement are all unordered.
// There is no deinit equivalent for these counts so no fence is needed.
//
// Accessing the side table requires that refCounts be accessed with
// a load-consume. Only code that is guaranteed not to try dereferencing
// the side table may perform a load-relaxed of refCounts.
// Similarly, storing the new side table pointer into refCounts is a
// store-release, but most other stores into refCounts are store-relaxed.
template <typename RefCountBits>
class RefCounts {
std::atomic<RefCountBits> refCounts;
#if !__LP64__
// FIXME: hack - something somewhere is assuming a 3-word header on 32-bit
// See also other fixmes marked "small header for 32-bit"
uintptr_t unused SWIFT_ATTRIBUTE_UNAVAILABLE;
#endif
// Out-of-line slow paths.
LLVM_ATTRIBUTE_NOINLINE
void incrementSlow(RefCountBits oldbits, uint32_t inc);
LLVM_ATTRIBUTE_NOINLINE
void incrementNonAtomicSlow(RefCountBits oldbits, uint32_t inc);
LLVM_ATTRIBUTE_NOINLINE
bool tryIncrementAndPinSlow(RefCountBits oldbits);
LLVM_ATTRIBUTE_NOINLINE
bool tryIncrementAndPinNonAtomicSlow(RefCountBits);
LLVM_ATTRIBUTE_NOINLINE
bool tryIncrementSlow(RefCountBits oldbits);
LLVM_ATTRIBUTE_NOINLINE
bool tryIncrementNonAtomicSlow(RefCountBits oldbits);
LLVM_ATTRIBUTE_NOINLINE
void incrementUnownedSlow(uint32_t inc);
public:
enum Initialized_t { Initialized };
// RefCounts must be trivially constructible to avoid ObjC++
// destruction overhead at runtime. Use RefCounts(Initialized)
// to produce an initialized instance.
RefCounts() = default;
// Refcount of a new object is 1.
constexpr RefCounts(Initialized_t)
: refCounts(RefCountBits(0, 1))
#if !__LP64__ && !__has_attribute(unavailable)
, unused(0)
#endif
{ }
void init() {
refCounts.store(RefCountBits(0, 1), std::memory_order_relaxed);
}
// Initialize for a stack promoted object. This prevents that the final
// release frees the memory of the object.
// FIXME: need to mark these and assert they never get a side table,
// because the extra unowned ref will keep the side table alive forever
void initForNotFreeing() {
refCounts.store(RefCountBits(0, 2), std::memory_order_relaxed);
}
// Initialize from another refcount bits.
// Only inline -> out-of-line is allowed (used for new side table entries).
void init(InlineRefCountBits newBits) {
refCounts.store(newBits, std::memory_order_relaxed);
}
// Increment the reference count.
void increment(uint32_t inc = 1) {
auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
RefCountBits newbits;
do {
newbits = oldbits;
bool fast = newbits.incrementStrongExtraRefCount(inc);
if (!fast)
return incrementSlow(oldbits, inc);
} while (!refCounts.compare_exchange_weak(oldbits, newbits,
std::memory_order_relaxed));
}
void incrementNonAtomic(uint32_t inc = 1) {
auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
auto newbits = oldbits;
bool fast = newbits.incrementStrongExtraRefCount(inc);
if (!fast)
return incrementNonAtomicSlow(oldbits, inc);
refCounts.store(newbits, std::memory_order_relaxed);
}
// Try to simultaneously set the pinned flag and increment the
// reference count. If the flag is already set, don't increment the
// reference count.
//
// This is only a sensible protocol for strictly-nested modifications.
//
// Returns true if the flag was set by this operation.
//
// Postcondition: the flag is set.
bool tryIncrementAndPin() {
auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
RefCountBits newbits;
do {
// If the flag is already set, just fail.
if (!oldbits.hasSideTable() && oldbits.getIsPinned())
return false;
// Try to simultaneously set the flag and increment the reference count.
newbits = oldbits;
newbits.setIsPinned(true);
bool fast = newbits.incrementStrongExtraRefCount(1);
if (!fast)
return tryIncrementAndPinSlow(oldbits);
} while (!refCounts.compare_exchange_weak(oldbits, newbits,
std::memory_order_relaxed));
return true;
}
bool tryIncrementAndPinNonAtomic() {
auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
// If the flag is already set, just fail.
if (!oldbits.hasSideTable() && oldbits.getIsPinned())
return false;
// Try to simultaneously set the flag and increment the reference count.
auto newbits = oldbits;
newbits.setIsPinned(true);
bool fast = newbits.incrementStrongExtraRefCount(1);
if (!fast)
return tryIncrementAndPinNonAtomicSlow(oldbits);
refCounts.store(newbits, std::memory_order_relaxed);
return true;
}
// Increment the reference count, unless the object is deiniting.
bool tryIncrement() {
auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
RefCountBits newbits;
do {
if (!oldbits.hasSideTable() && oldbits.getIsDeiniting())
return false;
newbits = oldbits;
bool fast = newbits.incrementStrongExtraRefCount(1);
if (!fast)
return tryIncrementSlow(oldbits);
} while (!refCounts.compare_exchange_weak(oldbits, newbits,
std::memory_order_relaxed));
return true;
}
bool tryIncrementNonAtomic() {
auto oldbits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
if (!oldbits.hasSideTable() && oldbits.getIsDeiniting())
return false;
auto newbits = oldbits;
bool fast = newbits.incrementStrongExtraRefCount(1);
if (!fast)
return tryIncrementNonAtomicSlow(oldbits);
refCounts.store(newbits, std::memory_order_relaxed);
return true;
}
// Simultaneously clear the pinned flag and decrement the reference
// count. Call _swift_release_dealloc() if the reference count goes to zero.
//
// Precondition: the pinned flag is set.
LLVM_ATTRIBUTE_ALWAYS_INLINE
void decrementAndUnpinAndMaybeDeinit() {
doDecrement<DoClearPinnedFlag, DoPerformDeinit>(1);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void decrementAndUnpinAndMaybeDeinitNonAtomic() {
doDecrementNonAtomic<DoClearPinnedFlag, DoPerformDeinit>(1);
}
// Decrement the reference count.
// Return true if the caller should now deinit the object.
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool decrementShouldDeinit(uint32_t dec) {
return doDecrement<DontClearPinnedFlag, DontPerformDeinit>(dec);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
bool decrementShouldDeinitNonAtomic(uint32_t dec) {
return doDecrementNonAtomic<DontClearPinnedFlag, DontPerformDeinit>(dec);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void decrementAndMaybeDeinit(uint32_t dec) {
doDecrement<DontClearPinnedFlag, DoPerformDeinit>(dec);
}
LLVM_ATTRIBUTE_ALWAYS_INLINE
void decrementAndMaybeDeinitNonAtomic(uint32_t dec) {
doDecrementNonAtomic<DontClearPinnedFlag, DoPerformDeinit>(dec);
}
// Non-atomically release the last strong reference and mark the
// object as deiniting.
//
// Precondition: the reference count must be 1
void decrementFromOneNonAtomic() {
auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
if (bits.hasSideTable())
return bits.getSideTable()->decrementFromOneNonAtomic();
assert(!bits.getIsDeiniting());
assert(bits.getStrongExtraRefCount() == 0 && "Expect a refcount of 1");
bits.setStrongExtraRefCount(0);
bits.setIsDeiniting(true);
refCounts.store(bits, std::memory_order_relaxed);
}
// Return the reference count.
// Once deinit begins the reference count is undefined.
uint32_t getCount() const {
auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
if (bits.hasSideTable())
return bits.getSideTable()->getCount();
assert(!bits.getIsDeiniting()); // FIXME: can we assert this?
return bits.getStrongExtraRefCount() + 1;
}
// Return whether the reference count is exactly 1.
// Once deinit begins the reference count is undefined.
bool isUniquelyReferenced() const {
auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
if (bits.hasSideTable())
return bits.getSideTable()->isUniquelyReferenced();
assert(!bits.getIsDeiniting());
return bits.isUniquelyReferenced();
}
// Return whether the reference count is exactly 1 or the pin flag
// is set. Once deinit begins the reference count is undefined.
bool isUniquelyReferencedOrPinned() const {
auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
// FIXME: implement side table path if useful
// In the meantime we don't check it here.
// bits.isUniquelyReferencedOrPinned() checks it too,
// and the compiler optimizer does better if this check is not here.
// if (bits.hasSideTable())
// return false;
assert(!bits.getIsDeiniting());
// bits.isUniquelyReferencedOrPinned() also checks the side table bit
// and this path is optimized better if we don't check it here first.
if (bits.isUniquelyReferencedOrPinned()) return true;
if (!bits.hasSideTable())
return false;
return bits.getSideTable()->isUniquelyReferencedOrPinned();
}
// Return true if the object has started deiniting.
bool isDeiniting() const {
auto bits = refCounts.load(SWIFT_MEMORY_ORDER_CONSUME);
if (bits.hasSideTable())
return bits.getSideTable()->isDeiniting();
else
return bits.getIsDeiniting();
}
/// Return true if the object can be freed directly right now.