-
Notifications
You must be signed in to change notification settings - Fork 54
/
MemorySnapshot.cpp
1360 lines (1144 loc) · 46.7 KB
/
MemorySnapshot.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "MemorySnapshot.h"
#include "ipc/ChildInternal.h"
#include "mozilla/Maybe.h"
#include "DirtyMemoryHandler.h"
#include "InfallibleVector.h"
#include "ProcessRecordReplay.h"
#include "ProcessRewind.h"
#include "SpinLock.h"
#include "SplayTree.h"
#include "Thread.h"
#include <algorithm>
#include <mach/mach.h>
#include <mach/mach_vm.h>
// Define to enable the countdown debugging thread. See StartCountdown().
//#define WANT_COUNTDOWN_THREAD 1
namespace mozilla {
namespace recordreplay {
///////////////////////////////////////////////////////////////////////////////
// Memory Snapshots Overview.
//
// Checkpoints are periodically saved, storing in memory enough information
// for the process to restore the contents of all tracked memory as it
// rewinds to earlier checkpoitns. There are two components to a saved
// checkpoint:
//
// - Stack contents for each thread are completely saved on disk at each saved
// checkpoint. This is handled by ThreadSnapshot.cpp
//
// - Heap and static memory contents (tracked memory) are saved in memory as
// the contents of pages modified before either the the next saved checkpoint
// or the current execution point (if this is the last saved checkpoint).
// This is handled here.
//
// Heap memory is only tracked when allocated with TrackedMemoryKind.
//
// Snapshots of heap/static memory is modeled on the copy-on-write semantics
// used by fork. Instead of actually forking, we use write-protected memory and
// a fault handler to perform the copy-on-write, which both gives more control
// of the snapshot process and allows snapshots to be taken on platforms
// without fork (i.e. Windows). The following example shows how snapshots are
// generated:
//
// #1 Save Checkpoint A. The initial snapshot tabulates all allocated tracked
// memory in the process, and write-protects all of it.
//
// #2 Write pages P0 and P1. Writing to the pages trips the fault handler. The
// handler creates copies of the initial contents of P0 and P1 (P0a and P1a)
// and unprotects the pages.
//
// #3 Save Checkpoint B. P0a and P1a, along with any other pages modified
// between A and B, become associated with checkpoint A. All modified pages
// are reprotected.
//
// #4 Write pages P1 and P2. Again, writing to the pages trips the fault
// handler and copies P1b and P2b are created and the pages are unprotected.
//
// #5 Save Checkpoint C. P1b and P2b become associated with snapshot B, and the
// modified pages are reprotected.
//
// If we were to then rewind from C to A, we would read and restore P1b/P2b,
// followed by P0a/P1a. All data associated with checkpoints A and later is
// discarded (we can only rewind; we cannot jump forward in time).
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Snapshot Threads Overview.
//
// After step #3 above, the main thread has created a diff snapshot with the
// copies of the original contents of pages modified between two saved
// checkpoints. These page copies are initially all in memory. It is the
// responsibility of the snapshot threads to do the following:
//
// 1. When rewinding to the last saved checkpoint, snapshot threads are used to
// restore the original contents of pages using their in-memory copies.
//
// There are a fixed number of snapshot threads that are spawned when the
// first checkpoint is saved. Threads are each responsible for distinct sets of
// heap memory pages (see AddDirtyPageToWorklist), avoiding synchronization
// issues between different snapshot threads.
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
// Memory Snapshot Structures
///////////////////////////////////////////////////////////////////////////////
// A region of allocated memory which should be tracked by MemoryInfo.
struct AllocatedMemoryRegion {
uint8_t* mBase;
size_t mSize;
bool mExecutable;
AllocatedMemoryRegion() : mBase(nullptr), mSize(0), mExecutable(false) {}
AllocatedMemoryRegion(uint8_t* aBase, size_t aSize, bool aExecutable)
: mBase(aBase), mSize(aSize), mExecutable(aExecutable) {}
// For sorting regions by base address.
struct AddressSort {
typedef void* Lookup;
static void* getLookup(const AllocatedMemoryRegion& aRegion) {
return aRegion.mBase;
}
static ssize_t compare(void* aAddress,
const AllocatedMemoryRegion& aRegion) {
return (uint8_t*)aAddress - aRegion.mBase;
}
};
// For sorting regions by size, from largest to smallest.
struct SizeReverseSort {
typedef size_t Lookup;
static size_t getLookup(const AllocatedMemoryRegion& aRegion) {
return aRegion.mSize;
}
static ssize_t compare(size_t aSize, const AllocatedMemoryRegion& aRegion) {
return aRegion.mSize - aSize;
}
};
};
// Information about a page which was modified between two saved checkpoints.
struct DirtyPage {
// Base address of the page.
uint8_t* mBase;
// Copy of the page at the first checkpoint. Written by the dirty memory
// handler via HandleDirtyMemoryFault if this is in the active page set,
// otherwise accessed by snapshot threads.
uint8_t* mOriginal;
bool mExecutable;
DirtyPage(uint8_t* aBase, uint8_t* aOriginal, bool aExecutable)
: mBase(aBase), mOriginal(aOriginal), mExecutable(aExecutable) {}
struct AddressSort {
typedef uint8_t* Lookup;
static uint8_t* getLookup(const DirtyPage& aPage) { return aPage.mBase; }
static ssize_t compare(uint8_t* aBase, const DirtyPage& aPage) {
return aBase - aPage.mBase;
}
};
};
// A set of dirty pages that can be searched quickly.
typedef SplayTree<DirtyPage, DirtyPage::AddressSort,
AllocPolicy<MemoryKind::SortedDirtyPageSet>, 4>
SortedDirtyPageSet;
// A set of dirty pages associated with some checkpoint.
struct DirtyPageSet {
// Checkpoint associated with this set.
CheckpointId mCheckpoint;
// All dirty pages in the set. Pages may be added or destroyed by the main
// thread when all other threads are idle, by the dirty memory handler when
// it is active and this is the active page set, and by the snapshot thread
// which owns this set.
InfallibleVector<DirtyPage, 256, AllocPolicy<MemoryKind::DirtyPageSet>>
mPages;
explicit DirtyPageSet(const CheckpointId& aCheckpoint)
: mCheckpoint(aCheckpoint) {}
};
// Worklist used by each snapshot thread.
struct SnapshotThreadWorklist {
// Index into gMemoryInfo->mSnapshotWorklists of the thread.
size_t mThreadIndex;
// Record/replay ID of the thread.
size_t mThreadId;
// Sets of pages in the thread's worklist. Each set is for a different diff,
// with the oldest checkpoints first.
InfallibleVector<DirtyPageSet, 256, AllocPolicy<MemoryKind::Generic>> mSets;
};
// Structure used to coordinate activity between the main thread and all
// snapshot threads. The workflow with this structure is as follows:
//
// 1. The main thread calls ActivateBegin(), marking the condition as active
// and notifying each snapshot thread. The main thread blocks in this call.
//
// 2. Each snapshot thread, maybe after waking up, checks the condition, does
// any processing it needs to (knowing the main thread is blocked) and
// then calls WaitUntilNoLongerActive(), blocking in the call.
//
// 3. Once all snapshot threads are blocked in WaitUntilNoLongerActive(), the
// main thread is unblocked from ActivateBegin(). It can then do whatever
// processing it needs to (knowing all snapshot threads are blocked) and
// then calls ActivateEnd(), blocking in the call.
//
// 4. Snapshot threads are now unblocked from WaitUntilNoLongerActive(). The
// main thread does not unblock from ActivateEnd() until all snapshot
// threads have left WaitUntilNoLongerActive().
//
// The intent with this class is to ensure that the main thread knows exactly
// when the snapshot threads are operating and that there is no potential for
// races between them.
class SnapshotThreadCondition {
Atomic<bool, SequentiallyConsistent, Behavior::DontPreserve> mActive;
Atomic<int32_t, SequentiallyConsistent, Behavior::DontPreserve> mCount;
public:
void ActivateBegin();
void ActivateEnd();
bool IsActive();
void WaitUntilNoLongerActive();
};
static const size_t NumSnapshotThreads = 8;
// A set of free regions in the process. There are two of these, for the
// free regions in tracked and untracked memory.
class FreeRegionSet {
// Kind of memory being managed. This also describes the memory used by the
// set itself.
MemoryKind mKind;
// Lock protecting contents of the structure.
SpinLock mLock;
// To avoid reentrancy issues when growing the set, a chunk of pages for
// the splay tree is preallocated for use the next time the tree needs to
// expand its size.
static const size_t ChunkPages = 4;
void* mNextChunk;
// Ensure there is a chunk available for the splay tree.
void MaybeRefillNextChunk(AutoSpinLock& aLockHeld);
// Get the next chunk from the free region set for this memory kind.
void* TakeNextChunk();
struct MyAllocPolicy {
FreeRegionSet& mSet;
template <typename T>
void free_(T* aPtr, size_t aSize) {
MOZ_CRASH();
}
template <typename T>
T* pod_malloc(size_t aNumElems) {
MOZ_RELEASE_ASSERT(sizeof(T) * aNumElems <= ChunkPages * PageSize);
return (T*)mSet.TakeNextChunk();
}
explicit MyAllocPolicy(FreeRegionSet& aSet) : mSet(aSet) {}
};
// All memory in gMemoryInfo->mTrackedRegions that is not in use at the
// current point in execution.
typedef SplayTree<AllocatedMemoryRegion,
AllocatedMemoryRegion::SizeReverseSort, MyAllocPolicy,
ChunkPages>
Tree;
Tree mRegions;
void InsertLockHeld(void* aAddress, size_t aSize, AutoSpinLock& aLockHeld);
void* ExtractLockHeld(size_t aSize, AutoSpinLock& aLockHeld);
public:
explicit FreeRegionSet(MemoryKind aKind)
: mKind(aKind), mRegions(MyAllocPolicy(*this)) {}
// Get the single region set for a given memory kind.
static FreeRegionSet& Get(MemoryKind aKind);
// Add a free region to the set.
void Insert(void* aAddress, size_t aSize);
// Remove a free region of the specified size. If aAddress is specified then
// this address will be prioritized, but a different pointer may be returned.
// The resulting memory will be zeroed.
void* Extract(void* aAddress, size_t aSize);
// Return whether a memory range intersects this set at all.
bool Intersects(void* aAddress, size_t aSize);
};
// Information about the current memory state. The contents of this structure
// are in untracked memory.
struct MemoryInfo {
// Whether new dirty pages or allocated regions are allowed.
bool mMemoryChangesAllowed;
// Untracked memory regions allocated before the first checkpoint. This is
// only accessed on the main thread, and is not a vector because of reentrancy
// issues.
static const size_t MaxInitialUntrackedRegions = 512;
AllocatedMemoryRegion mInitialUntrackedRegions[MaxInitialUntrackedRegions];
SpinLock mInitialUntrackedRegionsLock;
// All tracked memory in the process. This may be updated by any thread while
// holding mTrackedRegionsLock.
SplayTree<AllocatedMemoryRegion, AllocatedMemoryRegion::AddressSort,
AllocPolicy<MemoryKind::TrackedRegions>, 4>
mTrackedRegions;
InfallibleVector<AllocatedMemoryRegion, 512,
AllocPolicy<MemoryKind::TrackedRegions>>
mTrackedRegionsByAllocationOrder;
SpinLock mTrackedRegionsLock;
// Pages from |trackedRegions| modified since the last saved checkpoint.
// Accessed by any thread (usually the dirty memory handler) when memory
// changes are allowed, and by the main thread when memory changes are not
// allowed.
SortedDirtyPageSet mActiveDirty;
SpinLock mActiveDirtyLock;
// All untracked memory which is available for new allocations.
FreeRegionSet mFreeUntrackedRegions;
// Worklists for each snapshot thread.
SnapshotThreadWorklist mSnapshotWorklists[NumSnapshotThreads];
// Whether snapshot threads should update memory to that when the last saved
// diff was started.
SnapshotThreadCondition mSnapshotThreadsShouldRestore;
// Whether snapshot threads should idle.
SnapshotThreadCondition mSnapshotThreadsShouldIdle;
// Counter used by the countdown thread.
Atomic<size_t, SequentiallyConsistent, Behavior::DontPreserve> mCountdown;
// Information for timers.
double mStartTime;
uint32_t mTimeHits[(size_t)TimerKind::Count];
double mTimeTotals[(size_t)TimerKind::Count];
// Information for memory allocation.
Atomic<ssize_t, Relaxed, Behavior::DontPreserve>
mMemoryBalance[(size_t)MemoryKind::Count];
// Recent dirty memory faults.
void* mDirtyMemoryFaults[50];
// Whether RecordReplayDirective may crash this process.
bool mIntentionalCrashesAllowed;
// Whether the CrashSoon directive has been given to this process.
bool mCrashSoon;
MemoryInfo()
: mMemoryChangesAllowed(true),
mFreeUntrackedRegions(MemoryKind::FreeRegions),
mStartTime(CurrentTime()),
mIntentionalCrashesAllowed(true) {
// The singleton MemoryInfo is allocated with zeroed memory, so other
// fields do not need explicit initialization.
}
};
static MemoryInfo* gMemoryInfo = nullptr;
void SetMemoryChangesAllowed(bool aAllowed) {
MOZ_RELEASE_ASSERT(gMemoryInfo->mMemoryChangesAllowed == !aAllowed);
gMemoryInfo->mMemoryChangesAllowed = aAllowed;
}
static void EnsureMemoryChangesAllowed() {
while (!gMemoryInfo->mMemoryChangesAllowed) {
ThreadYield();
}
}
void StartCountdown(size_t aCount) { gMemoryInfo->mCountdown = aCount; }
AutoCountdown::AutoCountdown(size_t aCount) { StartCountdown(aCount); }
AutoCountdown::~AutoCountdown() { StartCountdown(0); }
#ifdef WANT_COUNTDOWN_THREAD
static void CountdownThreadMain(void*) {
while (true) {
if (gMemoryInfo->mCountdown && --gMemoryInfo->mCountdown == 0) {
// When debugging hangs in the child process, we can break here in lldb
// to inspect what the process is doing.
child::ReportFatalError(Nothing(), "CountdownThread activated");
}
ThreadYield();
}
}
#endif // WANT_COUNTDOWN_THREAD
///////////////////////////////////////////////////////////////////////////////
// Profiling
///////////////////////////////////////////////////////////////////////////////
AutoTimer::AutoTimer(TimerKind aKind) : mKind(aKind), mStart(CurrentTime()) {}
AutoTimer::~AutoTimer() {
if (gMemoryInfo) {
gMemoryInfo->mTimeHits[(size_t)mKind]++;
gMemoryInfo->mTimeTotals[(size_t)mKind] += CurrentTime() - mStart;
}
}
static const char* gTimerKindNames[] = {
#define DefineTimerKindName(aKind) #aKind,
ForEachTimerKind(DefineTimerKindName)
#undef DefineTimerKindName
};
void DumpTimers() {
if (!gMemoryInfo) {
return;
}
Print("Times %.2fs\n", (CurrentTime() - gMemoryInfo->mStartTime) / 1000000.0);
for (size_t i = 0; i < (size_t)TimerKind::Count; i++) {
uint32_t hits = gMemoryInfo->mTimeHits[i];
double time = gMemoryInfo->mTimeTotals[i];
Print("%s: %d hits, %.2fs\n", gTimerKindNames[i], (int)hits,
time / 1000000.0);
}
}
///////////////////////////////////////////////////////////////////////////////
// Directives
///////////////////////////////////////////////////////////////////////////////
void SetAllowIntentionalCrashes(bool aAllowed) {
gMemoryInfo->mIntentionalCrashesAllowed = aAllowed;
}
extern "C" {
MOZ_EXPORT void RecordReplayInterface_InternalRecordReplayDirective(
long aDirective) {
switch ((Directive)aDirective) {
case Directive::CrashSoon:
gMemoryInfo->mCrashSoon = true;
break;
case Directive::MaybeCrash:
if (gMemoryInfo->mIntentionalCrashesAllowed && gMemoryInfo->mCrashSoon) {
PrintSpew("Intentionally Crashing!\n");
MOZ_CRASH("RecordReplayDirective intentional crash");
}
gMemoryInfo->mCrashSoon = false;
break;
case Directive::AlwaysSaveTemporaryCheckpoints:
navigation::AlwaysSaveTemporaryCheckpoints();
break;
case Directive::AlwaysMarkMajorCheckpoints:
child::NotifyAlwaysMarkMajorCheckpoints();
break;
default:
MOZ_CRASH("Unknown directive");
}
}
} // extern "C"
///////////////////////////////////////////////////////////////////////////////
// Snapshot Thread Conditions
///////////////////////////////////////////////////////////////////////////////
void SnapshotThreadCondition::ActivateBegin() {
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
MOZ_RELEASE_ASSERT(!mActive);
mActive = true;
for (size_t i = 0; i < NumSnapshotThreads; i++) {
Thread::Notify(gMemoryInfo->mSnapshotWorklists[i].mThreadId);
}
while (mCount != NumSnapshotThreads) {
Thread::WaitNoIdle();
}
}
void SnapshotThreadCondition::ActivateEnd() {
MOZ_RELEASE_ASSERT(Thread::CurrentIsMainThread());
MOZ_RELEASE_ASSERT(mActive);
mActive = false;
for (size_t i = 0; i < NumSnapshotThreads; i++) {
Thread::Notify(gMemoryInfo->mSnapshotWorklists[i].mThreadId);
}
while (mCount) {
Thread::WaitNoIdle();
}
}
bool SnapshotThreadCondition::IsActive() {
MOZ_RELEASE_ASSERT(!Thread::CurrentIsMainThread());
return mActive;
}
void SnapshotThreadCondition::WaitUntilNoLongerActive() {
MOZ_RELEASE_ASSERT(!Thread::CurrentIsMainThread());
MOZ_RELEASE_ASSERT(mActive);
if (NumSnapshotThreads == ++mCount) {
Thread::Notify(MainThreadId);
}
while (mActive) {
Thread::WaitNoIdle();
}
if (0 == --mCount) {
Thread::Notify(MainThreadId);
}
}
///////////////////////////////////////////////////////////////////////////////
// Snapshot Page Allocation
///////////////////////////////////////////////////////////////////////////////
// Get a page in untracked memory that can be used as a copy of a tracked page.
static uint8_t* AllocatePageCopy() {
return (uint8_t*)AllocateMemory(PageSize, MemoryKind::PageCopy);
}
// Free a page allocated by AllocatePageCopy.
static void FreePageCopy(uint8_t* aPage) {
DeallocateMemory(aPage, PageSize, MemoryKind::PageCopy);
}
///////////////////////////////////////////////////////////////////////////////
// Page Fault Handling
///////////////////////////////////////////////////////////////////////////////
void MemoryMove(void* aDst, const void* aSrc, size_t aSize) {
MOZ_RELEASE_ASSERT((size_t)aDst % sizeof(uint32_t) == 0);
MOZ_RELEASE_ASSERT((size_t)aSrc % sizeof(uint32_t) == 0);
MOZ_RELEASE_ASSERT(aSize % sizeof(uint32_t) == 0);
MOZ_RELEASE_ASSERT((size_t)aDst <= (size_t)aSrc ||
(size_t)aDst >= (size_t)aSrc + aSize);
uint32_t* ndst = (uint32_t*)aDst;
const uint32_t* nsrc = (const uint32_t*)aSrc;
for (size_t i = 0; i < aSize / sizeof(uint32_t); i++) {
ndst[i] = nsrc[i];
}
}
void MemoryZero(void* aDst, size_t aSize) {
MOZ_RELEASE_ASSERT((size_t)aDst % sizeof(uint32_t) == 0);
MOZ_RELEASE_ASSERT(aSize % sizeof(uint32_t) == 0);
// Use volatile here to avoid annoying clang optimizations.
volatile uint32_t* ndst = (uint32_t*)aDst;
for (size_t i = 0; i < aSize / sizeof(uint32_t); i++) {
ndst[i] = 0;
}
}
// Return whether an address is in a tracked region. This excludes memory that
// is in an active new region and is not write protected.
static bool IsTrackedAddress(void* aAddress, bool* aExecutable) {
AutoSpinLock lock(gMemoryInfo->mTrackedRegionsLock);
Maybe<AllocatedMemoryRegion> region =
gMemoryInfo->mTrackedRegions.lookupClosestLessOrEqual(aAddress);
if (region.isSome() &&
MemoryContains(region.ref().mBase, region.ref().mSize, aAddress)) {
if (aExecutable) {
*aExecutable = region.ref().mExecutable;
}
return true;
}
return false;
}
bool HandleDirtyMemoryFault(uint8_t* aAddress) {
EnsureMemoryChangesAllowed();
bool different = false;
for (size_t i = ArrayLength(gMemoryInfo->mDirtyMemoryFaults) - 1; i; i--) {
gMemoryInfo->mDirtyMemoryFaults[i] = gMemoryInfo->mDirtyMemoryFaults[i - 1];
if (gMemoryInfo->mDirtyMemoryFaults[i] != aAddress) {
different = true;
}
}
gMemoryInfo->mDirtyMemoryFaults[0] = aAddress;
if (!different) {
Print("WARNING: Repeated accesses to the same dirty address %p\n",
aAddress);
}
// Round down to the base of the page.
aAddress = PageBase(aAddress);
AutoSpinLock lock(gMemoryInfo->mActiveDirtyLock);
// Check to see if this is already an active dirty page. Once a page has been
// marked as dirty it will be accessible until the next checkpoint is saved,
// but it's possible for multiple threads to access the same protected memory
// before we have a chance to unprotect it, in which case we'll end up here
// multiple times for the page.
if (gMemoryInfo->mActiveDirty.maybeLookup(aAddress)) {
return true;
}
// Crash if this address is not in a tracked region.
bool executable;
if (!IsTrackedAddress(aAddress, &executable)) {
return false;
}
// Copy the page's original contents into the active dirty set, and unprotect
// it so that execution can proceed.
uint8_t* original = AllocatePageCopy();
MemoryMove(original, aAddress, PageSize);
gMemoryInfo->mActiveDirty.insert(aAddress,
DirtyPage(aAddress, original, executable));
DirectUnprotectMemory(aAddress, PageSize, executable);
return true;
}
bool MemoryRangeIsTracked(void* aAddress, size_t aSize) {
for (uint8_t* ptr = PageBase(aAddress); ptr < (uint8_t*)aAddress + aSize;
ptr += PageSize) {
if (!IsTrackedAddress(ptr, nullptr)) {
return false;
}
}
return true;
}
void UnrecoverableSnapshotFailure() {
if (gMemoryInfo) {
AutoSpinLock lock(gMemoryInfo->mTrackedRegionsLock);
DirectUnprotectMemory(PageBase(&errno), PageSize, false);
for (auto region : gMemoryInfo->mTrackedRegionsByAllocationOrder) {
DirectUnprotectMemory(region.mBase, region.mSize, region.mExecutable,
/* aIgnoreFailures = */ true);
}
}
}
///////////////////////////////////////////////////////////////////////////////
// Initial Memory Region Processing
///////////////////////////////////////////////////////////////////////////////
void AddInitialUntrackedMemoryRegion(uint8_t* aBase, size_t aSize) {
MOZ_RELEASE_ASSERT(!HasSavedCheckpoint());
if (gInitializationFailureMessage) {
return;
}
static void* gSkippedRegion;
if (!gSkippedRegion) {
// We are allocating gMemoryInfo itself, and will directly call this
// function again shortly.
gSkippedRegion = aBase;
return;
}
MOZ_RELEASE_ASSERT(gSkippedRegion == gMemoryInfo);
AutoSpinLock lock(gMemoryInfo->mInitialUntrackedRegionsLock);
for (AllocatedMemoryRegion& region : gMemoryInfo->mInitialUntrackedRegions) {
if (!region.mBase) {
region.mBase = aBase;
region.mSize = aSize;
return;
}
}
// If we end up here then MaxInitialUntrackedRegions should be larger.
MOZ_CRASH();
}
static void RemoveInitialUntrackedRegion(uint8_t* aBase, size_t aSize) {
MOZ_RELEASE_ASSERT(!HasSavedCheckpoint());
AutoSpinLock lock(gMemoryInfo->mInitialUntrackedRegionsLock);
for (AllocatedMemoryRegion& region : gMemoryInfo->mInitialUntrackedRegions) {
if (region.mBase == aBase) {
MOZ_RELEASE_ASSERT(region.mSize == aSize);
region.mBase = nullptr;
region.mSize = 0;
return;
}
}
MOZ_CRASH();
}
// Get information about the mapped region containing *aAddress, or the next
// mapped region afterwards if aAddress is not mapped. aAddress is updated to
// the start of that region, and aSize, aProtection, and aMaxProtection are
// updated with the size and protection status of the region. Returns false if
// there are no more mapped regions after *aAddress.
static bool QueryRegion(uint8_t** aAddress, size_t* aSize,
int* aProtection = nullptr,
int* aMaxProtection = nullptr) {
mach_vm_address_t addr = (mach_vm_address_t)*aAddress;
mach_vm_size_t nbytes;
vm_region_basic_info_64 info;
mach_msg_type_number_t info_count = sizeof(vm_region_basic_info_64);
mach_port_t some_port;
kern_return_t rv =
mach_vm_region(mach_task_self(), &addr, &nbytes, VM_REGION_BASIC_INFO,
(vm_region_info_t)&info, &info_count, &some_port);
if (rv == KERN_INVALID_ADDRESS) {
return false;
}
MOZ_RELEASE_ASSERT(rv == KERN_SUCCESS);
*aAddress = (uint8_t*)addr;
*aSize = nbytes;
if (aProtection) {
*aProtection = info.protection;
}
if (aMaxProtection) {
*aMaxProtection = info.max_protection;
}
return true;
}
static void MarkThreadStacksAsUntracked() {
AutoPassThroughThreadEvents pt;
// Thread stacks are excluded from the tracked regions.
for (size_t i = MainThreadId; i <= MaxThreadId; i++) {
Thread* thread = Thread::GetById(i);
if (!thread->StackBase()) {
continue;
}
AddInitialUntrackedMemoryRegion(thread->StackBase(), thread->StackSize());
// Look for a mapped region with no access permissions immediately after
// the thread stack's allocated region, and include this in the untracked
// memory if found. This is done to avoid confusing breakpad, which will
// scan the allocated memory in this process and will not correctly
// determine stack boundaries if we track these trailing regions and end up
// marking them as readable.
// Find the mapped region containing the end of the thread's stack.
uint8_t* base = thread->StackBase() + thread->StackSize() - 1;
size_t size;
if (!QueryRegion(&base, &size)) {
MOZ_CRASH("Could not find memory region information for thread stack");
}
// Sanity check the region size. Note that we don't mark this entire region
// as untracked, since it may contain TLS data which should be tracked.
MOZ_RELEASE_ASSERT(base + size >=
thread->StackBase() + thread->StackSize());
uint8_t* trailing = base + size;
size_t trailingSize;
int protection;
if (QueryRegion(&trailing, &trailingSize, &protection)) {
if (trailing == base + size && protection == 0) {
AddInitialUntrackedMemoryRegion(trailing, trailingSize);
}
}
}
}
// Given an address region [*aAddress, *aAddress + *aSize], return true if
// there is any intersection with an excluded region
// [aExclude, aExclude + aExcludeSize], set *aSize to contain the subregion
// starting at aAddress which which is not excluded, and *aRemaining and
// *aRemainingSize to any additional subregion which is not excluded.
static bool MaybeExtractMemoryRegion(uint8_t* aAddress, size_t* aSize,
uint8_t** aRemaining,
size_t* aRemainingSize, uint8_t* aExclude,
size_t aExcludeSize) {
uint8_t* addrLimit = aAddress + *aSize;
// Expand the excluded region out to the containing page boundaries.
MOZ_RELEASE_ASSERT((size_t)aExclude % PageSize == 0);
aExcludeSize = RoundupSizeToPageBoundary(aExcludeSize);
uint8_t* excludeLimit = aExclude + aExcludeSize;
if (excludeLimit <= aAddress || addrLimit <= aExclude) {
// No intersection.
return false;
}
*aSize = std::max<ssize_t>(aExclude - aAddress, 0);
if (aRemaining) {
*aRemaining = excludeLimit;
*aRemainingSize = std::max<ssize_t>(addrLimit - *aRemaining, 0);
}
return true;
}
// Set *aSize to describe the number of bytes starting at aAddress that should
// be considered tracked memory. *aRemaining and *aRemainingSize are set to any
// remaining portion of the initial region after the first excluded portion
// that is found.
static void ExtractTrackedInitialMemoryRegion(uint8_t* aAddress, size_t* aSize,
uint8_t** aRemaining,
size_t* aRemainingSize) {
// Look for the earliest untracked region which intersects the given region.
const AllocatedMemoryRegion* earliestIntersect = nullptr;
for (const AllocatedMemoryRegion& region :
gMemoryInfo->mInitialUntrackedRegions) {
size_t size = *aSize;
if (MaybeExtractMemoryRegion(aAddress, &size, nullptr, nullptr,
region.mBase, region.mSize)) {
// There was an intersection.
if (!earliestIntersect || region.mBase < earliestIntersect->mBase) {
earliestIntersect = ®ion;
}
}
}
if (earliestIntersect) {
if (!MaybeExtractMemoryRegion(aAddress, aSize, aRemaining, aRemainingSize,
earliestIntersect->mBase,
earliestIntersect->mSize)) {
MOZ_CRASH();
}
} else {
// If there is no intersection then the entire region is tracked.
*aRemaining = aAddress + *aSize;
*aRemainingSize = 0;
}
}
static void AddTrackedRegion(uint8_t* aAddress, size_t aSize,
bool aExecutable) {
if (aSize) {
AutoSpinLock lock(gMemoryInfo->mTrackedRegionsLock);
gMemoryInfo->mTrackedRegions.insert(
aAddress, AllocatedMemoryRegion(aAddress, aSize, aExecutable));
gMemoryInfo->mTrackedRegionsByAllocationOrder.emplaceBack(aAddress, aSize,
aExecutable);
}
}
// Add any tracked subregions of [aAddress, aAddress + aSize].
void AddInitialTrackedMemoryRegions(uint8_t* aAddress, size_t aSize,
bool aExecutable) {
while (aSize) {
uint8_t* remaining;
size_t remainingSize;
ExtractTrackedInitialMemoryRegion(aAddress, &aSize, &remaining,
&remainingSize);
AddTrackedRegion(aAddress, aSize, aExecutable);
aAddress = remaining;
aSize = remainingSize;
}
}
static void UpdateNumTrackedRegionsForSnapshot();
// Fill in the set of tracked memory regions that are currently mapped within
// this process.
static void ProcessAllInitialMemoryRegions() {
MOZ_ASSERT(!AreThreadEventsPassedThrough());
{
AutoPassThroughThreadEvents pt;
for (uint8_t* addr = nullptr;;) {
size_t size;
int maxProtection;
if (!QueryRegion(&addr, &size, nullptr, &maxProtection)) {
break;
}
// Consider all memory regions that can possibly be written to, even if
// they aren't currently writable.
if (maxProtection & VM_PROT_WRITE) {
MOZ_RELEASE_ASSERT(maxProtection & VM_PROT_READ);
AddInitialTrackedMemoryRegions(addr, size,
maxProtection & VM_PROT_EXECUTE);
}
addr += size;
}
}
UpdateNumTrackedRegionsForSnapshot();
// Write protect all tracked memory.
AutoDisallowMemoryChanges disallow;
for (const AllocatedMemoryRegion& region :
gMemoryInfo->mTrackedRegionsByAllocationOrder) {
DirectWriteProtectMemory(region.mBase, region.mSize, region.mExecutable);
}
}
///////////////////////////////////////////////////////////////////////////////
// Free Region Management
///////////////////////////////////////////////////////////////////////////////
// All memory in gMemoryInfo->mTrackedRegions that is not in use at the current
// point in execution.
static FreeRegionSet gFreeRegions(MemoryKind::Tracked);
// The size of gMemoryInfo->mTrackedRegionsByAllocationOrder we expect to see
// at the point of the last saved checkpoint.
static size_t gNumTrackedRegions;
static void UpdateNumTrackedRegionsForSnapshot() {
MOZ_ASSERT(Thread::CurrentIsMainThread());
gNumTrackedRegions = gMemoryInfo->mTrackedRegionsByAllocationOrder.length();
}
void FixupFreeRegionsAfterRewind() {
// All memory that has been allocated since the associated checkpoint was
// reached is now free, and may be reused for new allocations.
size_t newTrackedRegions =
gMemoryInfo->mTrackedRegionsByAllocationOrder.length();
for (size_t i = gNumTrackedRegions; i < newTrackedRegions; i++) {
const AllocatedMemoryRegion& region =
gMemoryInfo->mTrackedRegionsByAllocationOrder[i];
gFreeRegions.Insert(region.mBase, region.mSize);
}
gNumTrackedRegions = newTrackedRegions;
}
/* static */ FreeRegionSet& FreeRegionSet::Get(MemoryKind aKind) {
return (aKind == MemoryKind::Tracked) ? gFreeRegions
: gMemoryInfo->mFreeUntrackedRegions;
}
void* FreeRegionSet::TakeNextChunk() {
MOZ_RELEASE_ASSERT(mNextChunk);
void* res = mNextChunk;
mNextChunk = nullptr;
return res;
}
void FreeRegionSet::InsertLockHeld(void* aAddress, size_t aSize,
AutoSpinLock& aLockHeld) {
mRegions.insert(aSize,
AllocatedMemoryRegion((uint8_t*)aAddress, aSize, true));
}
void FreeRegionSet::MaybeRefillNextChunk(AutoSpinLock& aLockHeld) {
if (mNextChunk) {
return;
}
// Look for a free region we can take the next chunk from.
size_t size = ChunkPages * PageSize;
gMemoryInfo->mMemoryBalance[(size_t)mKind] += size;
mNextChunk = ExtractLockHeld(size, aLockHeld);
if (!mNextChunk) {
// Allocate memory from the system.
mNextChunk = DirectAllocateMemory(nullptr, size);
RegisterAllocatedMemory(mNextChunk, size, mKind);
}
}
void FreeRegionSet::Insert(void* aAddress, size_t aSize) {
MOZ_RELEASE_ASSERT(aAddress && aAddress == PageBase(aAddress));
MOZ_RELEASE_ASSERT(aSize && aSize == RoundupSizeToPageBoundary(aSize));
AutoSpinLock lock(mLock);
MaybeRefillNextChunk(lock);
InsertLockHeld(aAddress, aSize, lock);
}
void* FreeRegionSet::ExtractLockHeld(size_t aSize, AutoSpinLock& aLockHeld) {
Maybe<AllocatedMemoryRegion> best =
mRegions.lookupClosestLessOrEqual(aSize, /* aRemove = */ true);
if (best.isSome()) {
MOZ_RELEASE_ASSERT(best.ref().mSize >= aSize);
uint8_t* res = best.ref().mBase;
if (best.ref().mSize > aSize) {
InsertLockHeld(res + aSize, best.ref().mSize - aSize, aLockHeld);
}
MemoryZero(res, aSize);
return res;
}
return nullptr;
}
void* FreeRegionSet::Extract(void* aAddress, size_t aSize) {
MOZ_RELEASE_ASSERT(aAddress == PageBase(aAddress));
MOZ_RELEASE_ASSERT(aSize && aSize == RoundupSizeToPageBoundary(aSize));
AutoSpinLock lock(mLock);
if (aAddress) {
MaybeRefillNextChunk(lock);
// We were given a point at which to try to place the allocation. Look for
// a free region which contains [aAddress, aAddress + aSize] entirely.
for (typename Tree::Iter iter = mRegions.begin(); !iter.done(); ++iter) {
uint8_t* regionBase = iter.ref().mBase;
uint8_t* regionExtent = regionBase + iter.ref().mSize;