forked from illumos/illumos-gate
-
Notifications
You must be signed in to change notification settings - Fork 109
/
umem.c
3548 lines (3087 loc) · 100 KB
/
umem.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2014 Joyent, Inc. All rights reserved.
*/
/*
* based on usr/src/uts/common/os/kmem.c r1.64 from 2001/12/18
*
* The slab allocator, as described in the following two papers:
*
* Jeff Bonwick,
* The Slab Allocator: An Object-Caching Kernel Memory Allocator.
* Proceedings of the Summer 1994 Usenix Conference.
* Available as /shared/sac/PSARC/1994/028/materials/kmem.pdf.
*
* Jeff Bonwick and Jonathan Adams,
* Magazines and vmem: Extending the Slab Allocator to Many CPUs and
* Arbitrary Resources.
* Proceedings of the 2001 Usenix Conference.
* Available as /shared/sac/PSARC/2000/550/materials/vmem.pdf.
*
* 1. Overview
* -----------
* umem is very close to kmem in implementation. There are seven major
* areas of divergence:
*
* * Initialization
*
* * CPU handling
*
* * umem_update()
*
* * KM_SLEEP v.s. UMEM_NOFAIL
*
* * lock ordering
*
* * changing UMEM_MAXBUF
*
* * Per-thread caching for malloc/free
*
* 2. Initialization
* -----------------
* kmem is initialized early on in boot, and knows that no one will call
* into it before it is ready. umem does not have these luxuries. Instead,
* initialization is divided into two phases:
*
* * library initialization, and
*
* * first use
*
* umem's full initialization happens at the time of the first allocation
* request (via malloc() and friends, umem_alloc(), or umem_zalloc()),
* or the first call to umem_cache_create().
*
* umem_free(), and umem_cache_alloc() do not require special handling,
* since the only way to get valid arguments for them is to successfully
* call a function from the first group.
*
* 2.1. Library Initialization: umem_startup()
* -------------------------------------------
* umem_startup() is libumem.so's .init section. It calls pthread_atfork()
* to install the handlers necessary for umem's Fork1-Safety. Because of
* race condition issues, all other pre-umem_init() initialization is done
* statically (i.e. by the dynamic linker).
*
* For standalone use, umem_startup() returns everything to its initial
* state.
*
* 2.2. First use: umem_init()
* ------------------------------
* The first time any memory allocation function is used, we have to
* create the backing caches and vmem arenas which are needed for it.
* umem_init() is the central point for that task. When it completes,
* umem_ready is either UMEM_READY (all set) or UMEM_READY_INIT_FAILED (unable
* to initialize, probably due to lack of memory).
*
* There are four different paths from which umem_init() is called:
*
* * from umem_alloc() or umem_zalloc(), with 0 < size < UMEM_MAXBUF,
*
* * from umem_alloc() or umem_zalloc(), with size > UMEM_MAXBUF,
*
* * from umem_cache_create(), and
*
* * from memalign(), with align > UMEM_ALIGN.
*
* The last three just check if umem is initialized, and call umem_init()
* if it is not. For performance reasons, the first case is more complicated.
*
* 2.2.1. umem_alloc()/umem_zalloc(), with 0 < size < UMEM_MAXBUF
* -----------------------------------------------------------------
* In this case, umem_cache_alloc(&umem_null_cache, ...) is called.
* There is special case code in which causes any allocation on
* &umem_null_cache to fail by returning (NULL), regardless of the
* flags argument.
*
* So umem_cache_alloc() returns NULL, and umem_alloc()/umem_zalloc() call
* umem_alloc_retry(). umem_alloc_retry() sees that the allocation
* was agains &umem_null_cache, and calls umem_init().
*
* If initialization is successful, umem_alloc_retry() returns 1, which
* causes umem_alloc()/umem_zalloc() to start over, which causes it to load
* the (now valid) cache pointer from umem_alloc_table.
*
* 2.2.2. Dealing with race conditions
* -----------------------------------
* There are a couple race conditions resulting from the initialization
* code that we have to guard against:
*
* * In umem_cache_create(), there is a special UMC_INTERNAL cflag
* that is passed for caches created during initialization. It
* is illegal for a user to try to create a UMC_INTERNAL cache.
* This allows initialization to proceed, but any other
* umem_cache_create()s will block by calling umem_init().
*
* * Since umem_null_cache has a 1-element cache_cpu, it's cache_cpu_mask
* is always zero. umem_cache_alloc uses cp->cache_cpu_mask to
* mask the cpu number. This prevents a race between grabbing a
* cache pointer out of umem_alloc_table and growing the cpu array.
*
*
* 3. CPU handling
* ---------------
* kmem uses the CPU's sequence number to determine which "cpu cache" to
* use for an allocation. Currently, there is no way to get the sequence
* number in userspace.
*
* umem keeps track of cpu information in umem_cpus, an array of umem_max_ncpus
* umem_cpu_t structures. CURCPU() is a a "hint" function, which we then mask
* with either umem_cpu_mask or cp->cache_cpu_mask to find the actual "cpu" id.
* The mechanics of this is all in the CPU(mask) macro.
*
* Currently, umem uses _lwp_self() as its hint.
*
*
* 4. The update thread
* --------------------
* kmem uses a task queue, kmem_taskq, to do periodic maintenance on
* every kmem cache. vmem has a periodic timeout for hash table resizing.
* The kmem_taskq also provides a separate context for kmem_cache_reap()'s
* to be done in, avoiding issues of the context of kmem_reap() callers.
*
* Instead, umem has the concept of "updates", which are asynchronous requests
* for work attached to single caches. All caches with pending work are
* on a doubly linked list rooted at the umem_null_cache. All update state
* is protected by the umem_update_lock mutex, and the umem_update_cv is used
* for notification between threads.
*
* 4.1. Cache states with regards to updates
* -----------------------------------------
* A given cache is in one of three states:
*
* Inactive cache_uflags is zero, cache_u{next,prev} are NULL
*
* Work Requested cache_uflags is non-zero (but UMU_ACTIVE is not set),
* cache_u{next,prev} link the cache onto the global
* update list
*
* Active cache_uflags has UMU_ACTIVE set, cache_u{next,prev}
* are NULL, and either umem_update_thr or
* umem_st_update_thr are actively doing work on the
* cache.
*
* An update can be added to any cache in any state -- if the cache is
* Inactive, it transitions to being Work Requested. If the cache is
* Active, the worker will notice the new update and act on it before
* transitioning the cache to the Inactive state.
*
* If a cache is in the Active state, UMU_NOTIFY can be set, which asks
* the worker to broadcast the umem_update_cv when it has finished.
*
* 4.2. Update interface
* ---------------------
* umem_add_update() adds an update to a particular cache.
* umem_updateall() adds an update to all caches.
* umem_remove_updates() returns a cache to the Inactive state.
*
* umem_process_updates() process all caches in the Work Requested state.
*
* 4.3. Reaping
* ------------
* When umem_reap() is called (at the time of heap growth), it schedule
* UMU_REAP updates on every cache. It then checks to see if the update
* thread exists (umem_update_thr != 0). If it is, it broadcasts
* the umem_update_cv to wake the update thread up, and returns.
*
* If the update thread does not exist (umem_update_thr == 0), and the
* program currently has multiple threads, umem_reap() attempts to create
* a new update thread.
*
* If the process is not multithreaded, or the creation fails, umem_reap()
* calls umem_st_update() to do an inline update.
*
* 4.4. The update thread
* ----------------------
* The update thread spends most of its time in cond_timedwait() on the
* umem_update_cv. It wakes up under two conditions:
*
* * The timedwait times out, in which case it needs to run a global
* update, or
*
* * someone cond_broadcast(3THR)s the umem_update_cv, in which case
* it needs to check if there are any caches in the Work Requested
* state.
*
* When it is time for another global update, umem calls umem_cache_update()
* on every cache, then calls vmem_update(), which tunes the vmem structures.
* umem_cache_update() can request further work using umem_add_update().
*
* After any work from the global update completes, the update timer is
* reset to umem_reap_interval seconds in the future. This makes the
* updates self-throttling.
*
* Reaps are similarly self-throttling. After a UMU_REAP update has
* been scheduled on all caches, umem_reap() sets a flag and wakes up the
* update thread. The update thread notices the flag, and resets the
* reap state.
*
* 4.5. Inline updates
* -------------------
* If the update thread is not running, umem_st_update() is used instead. It
* immediately does a global update (as above), then calls
* umem_process_updates() to process both the reaps that umem_reap() added and
* any work generated by the global update. Afterwards, it resets the reap
* state.
*
* While the umem_st_update() is running, umem_st_update_thr holds the thread
* id of the thread performing the update.
*
* 4.6. Updates and fork1()
* ------------------------
* umem has fork1() pre- and post-handlers which lock up (and release) every
* mutex in every cache. They also lock up the umem_update_lock. Since
* fork1() only copies over a single lwp, other threads (including the update
* thread) could have been actively using a cache in the parent. This
* can lead to inconsistencies in the child process.
*
* Because we locked all of the mutexes, the only possible inconsistancies are:
*
* * a umem_cache_alloc() could leak its buffer.
*
* * a caller of umem_depot_alloc() could leak a magazine, and all the
* buffers contained in it.
*
* * a cache could be in the Active update state. In the child, there
* would be no thread actually working on it.
*
* * a umem_hash_rescale() could leak the new hash table.
*
* * a umem_magazine_resize() could be in progress.
*
* * a umem_reap() could be in progress.
*
* The memory leaks we can't do anything about. umem_release_child() resets
* the update state, moves any caches in the Active state to the Work Requested
* state. This might cause some updates to be re-run, but UMU_REAP and
* UMU_HASH_RESCALE are effectively idempotent, and the worst that can
* happen from umem_magazine_resize() is resizing the magazine twice in close
* succession.
*
* Much of the cleanup in umem_release_child() is skipped if
* umem_st_update_thr == thr_self(). This is so that applications which call
* fork1() from a cache callback does not break. Needless to say, any such
* application is tremendously broken.
*
*
* 5. KM_SLEEP v.s. UMEM_NOFAIL
* ----------------------------
* Allocations against kmem and vmem have two basic modes: SLEEP and
* NOSLEEP. A sleeping allocation is will go to sleep (waiting for
* more memory) instead of failing (returning NULL).
*
* SLEEP allocations presume an extremely multithreaded model, with
* a lot of allocation and deallocation activity. umem cannot presume
* that its clients have any particular type of behavior. Instead,
* it provides two types of allocations:
*
* * UMEM_DEFAULT, equivalent to KM_NOSLEEP (i.e. return NULL on
* failure)
*
* * UMEM_NOFAIL, which, on failure, calls an optional callback
* (registered with umem_nofail_callback()).
*
* The callback is invoked with no locks held, and can do an arbitrary
* amount of work. It then has a choice between:
*
* * Returning UMEM_CALLBACK_RETRY, which will cause the allocation
* to be restarted.
*
* * Returning UMEM_CALLBACK_EXIT(status), which will cause exit(2)
* to be invoked with status. If multiple threads attempt to do
* this simultaneously, only one will call exit(2).
*
* * Doing some kind of non-local exit (thr_exit(3thr), longjmp(3C),
* etc.)
*
* The default callback returns UMEM_CALLBACK_EXIT(255).
*
* To have these callbacks without risk of state corruption (in the case of
* a non-local exit), we have to ensure that the callbacks get invoked
* close to the original allocation, with no inconsistent state or held
* locks. The following steps are taken:
*
* * All invocations of vmem are VM_NOSLEEP.
*
* * All constructor callbacks (which can themselves to allocations)
* are passed UMEM_DEFAULT as their required allocation argument. This
* way, the constructor will fail, allowing the highest-level allocation
* invoke the nofail callback.
*
* If a constructor callback _does_ do a UMEM_NOFAIL allocation, and
* the nofail callback does a non-local exit, we will leak the
* partially-constructed buffer.
*
*
* 6. Lock Ordering
* ----------------
* umem has a few more locks than kmem does, mostly in the update path. The
* overall lock ordering (earlier locks must be acquired first) is:
*
* umem_init_lock
*
* vmem_list_lock
* vmem_nosleep_lock.vmpl_mutex
* vmem_t's:
* vm_lock
* sbrk_lock
*
* umem_cache_lock
* umem_update_lock
* umem_flags_lock
* umem_cache_t's:
* cache_cpu[*].cc_lock
* cache_depot_lock
* cache_lock
* umem_log_header_t's:
* lh_cpu[*].clh_lock
* lh_lock
*
* 7. Changing UMEM_MAXBUF
* -----------------------
*
* When changing UMEM_MAXBUF extra care has to be taken. It is not sufficient to
* simply increase this number. First, one must update the umem_alloc_table to
* have the appropriate number of entires based upon the new size. If this is
* not done, this will lead to libumem blowing an assertion.
*
* The second place to update, which is not required, is the umem_alloc_sizes.
* These determine the default cache sizes that we're going to support.
*
* 8. Per-thread caching for malloc/free
* -------------------------------------
*
* "Time is an illusion. Lunchtime doubly so." -- Douglas Adams
*
* Time may be an illusion, but CPU cycles aren't. While libumem is designed
* to be a highly scalable allocator, that scalability comes with a fixed cycle
* penalty even in the absence of contention: libumem must acquire (and release
* a per-CPU lock for each allocation. When contention is low and malloc(3C)
* frequency is high, this overhead can dominate execution time. To alleviate
* this, we allow for per-thread caching, a lock-free means of caching recent
* deallocations on a per-thread basis for use in satisfying subsequent calls
*
* In addition to improving performance, we also want to:
* * Minimize fragmentation
* * Not add additional memory overhead (no larger malloc tags)
*
* In the ulwp_t of each thread there is a private data structure called a
* umem_t that looks like:
*
* typedef struct {
* size_t tm_size;
* void *tm_roots[NTMEMBASE]; (Currently 16)
* } tmem_t;
*
* Each of the roots is treated as the head of a linked list. Each entry in the
* list can be thought of as a void ** which points to the next entry, until one
* of them points to NULL. If the head points to NULL, the list is empty.
*
* Each head corresponds to a umem_cache. Currently there is a linear mapping
* where the first root corresponds to the first cache, second root to the
* second cache, etc. This works because every allocation that malloc makes to
* umem_alloc that can be satisified by a umem_cache will actually return a
* number of bytes equal to the size of that cache. Because of this property and
* a one to one mapping between caches and roots we can guarantee that every
* entry in a given root's list will be able to satisfy the same requests as the
* corresponding cache.
*
* The choice of sixteen roots is based on where we believe we get the biggest
* bang for our buck. The per-thread caches will cache up to 256 byte and 448
* byte allocations on ILP32 and LP64 respectively. Generally applications plan
* more carefully how they do larger allocations than smaller ones. Therefore
* sixteen roots is a reasonable compromise between the amount of additional
* overhead per thread, and the likelihood of a program to benefit from it.
*
* The maximum amount of memory that can be cached in each thread is determined
* by the perthread_cache UMEM_OPTION. It corresponds to the umem_ptc_size
* value. The default value for this is currently 1 MB. Once umem_init() has
* finished this cannot be directly tuned without directly modifying the
* instruction text. If, upon calling free(3C), the amount cached would exceed
* this maximum, we instead actually return the buffer to the umem_cache instead
* of holding onto it in the thread.
*
* When a thread calls malloc(3C) it first determines which umem_cache it
* would be serviced by. If the allocation is not covered by ptcumem it goes to
* the normal malloc instead. Next, it checks if the tmem_root's list is empty
* or not. If it is empty, we instead go and allocate the memory from
* umem_alloc. If it is not empty, we remove the head of the list, set the
* appropriate malloc tags, and return that buffer.
*
* When a thread calls free(3C) it first looks at the malloc tag and if it is
* invalid or the allocation exceeds the largest cache in ptcumem and sends it
* off to the original free() to handle and clean up appropriately. Next, it
* checks if the allocation size is covered by one of the per-thread roots and
* if it isn't, it passes it off to the original free() to be released. Finally,
* before it inserts this buffer as the head, it checks if adding this buffer
* would put the thread over its maximum cache size. If it would, it frees the
* buffer back to the umem_cache. Otherwise it increments the threads total
* cached amount and makes the buffer the new head of the appropriate tm_root.
*
* When a thread exits, all of the buffers that it has in its per-thread cache
* will be passed to umem_free() and returned to the appropriate umem_cache.
*
* 8.1 Handling addition and removal of umem_caches
* ------------------------------------------------
*
* The set of umem_caches that are used to back calls to umem_alloc() and
* ultimately malloc() are determined at program execution time. The default set
* of caches is defined below in umem_alloc_sizes[]. Various umem_options exist
* that modify the set of caches: size_add, size_clear, and size_remove. Because
* the set of caches can only be determined once umem_init() has been called and
* we have the additional goals of minimizing additional fragmentation and
* metadata space overhead in the malloc tags, this forces our hand to go down a
* slightly different path: the one tread by fasttrap and trapstat.
*
* During umem_init we're going to dynamically construct a new version of
* malloc(3C) and free(3C) that utilizes the known cache sizes and then ensure
* that ptcmalloc and ptcfree replace malloc and free as entries in the plt. If
* ptcmalloc and ptcfree cannot handle a request, they simply jump to the
* original libumem implementations.
*
* After creating all of the umem_caches, but before making them visible,
* umem_cache_init checks that umem_genasm_supported is non-zero. This value is
* set by each architecture in $ARCH/umem_genasm.c to indicate whether or not
* they support this. If the value is zero, then this process is skipped.
* Similarly, if the cache size has been tuned to zero by UMEM_OPTIONS, then
* this is also skipped.
*
* In umem_genasm.c, each architecture's implementation implements a single
* function called umem_genasm() that is responsible for generating the
* appropriate versions of ptcmalloc() and ptcfree(), placing them in the
* appropriate memory location, and finally doing the switch from malloc() and
* free() to ptcmalloc() and ptcfree(). Once the change has been made, there is
* no way to switch back, short of restarting the program or modifying program
* text with mdb.
*
* 8.2 Modifying the Procedure Linkage Table (PLT)
* -----------------------------------------------
*
* The last piece of this puzzle is how we actually jam ptcmalloc() into the
* PLT. To handle this, we have defined two functions, _malloc and _free and
* used a special mapfile directive to place them into the a readable,
* writeable, and executable segment. Next we use a standard #pragma weak for
* malloc and free and direct them to those symbols. By default, those symbols
* have text defined as nops for our generated functions and when they're
* invoked, they jump to the default malloc and free functions.
*
* When umem_genasm() is called, it goes through and generates new malloc() and
* free() functions in the text provided for by _malloc and _free just after the
* jump. Once both have been successfully generated, umem_genasm() nops over the
* original jump so that we now call into the genasm versions of these
* functions.
*
* 8.3 umem_genasm()
* -----------------
*
* umem_genasm() is currently implemented for i386 and amd64. This section
* describes the theory behind the construction. For specific byte code to
* assembly instructions and niceish C and asm versions of ptcmalloc and
* ptcfree, see the individual umem_genasm.c files. The layout consists of the
* following sections:
*
* o. function-specfic prologue
* o. function-generic cache-selecting elements
* o. function-specific epilogue
*
* There are three different generic cache elements that exist:
*
* o. the last or only cache
* o. the intermediary caches if more than two
* o. the first one if more than one cache
*
* The malloc and free prologues and epilogues mimic the necessary portions of
* libumem's malloc and free. This includes things like checking for size
* overflow, setting and verifying the malloc tags.
*
* It is an important constraint that these functions do not make use of the
* call instruction. The only jmp outside of the individual functions is to the
* original libumem malloc and free respectively. Because doing things like
* setting errno or raising an internal umem error on improper malloc tags would
* require using calls into the PLT, whenever we encounter one of those cases we
* just jump to the original malloc and free functions reusing the same stack
* frame.
*
* Each of the above sections, the three caches, and the malloc and free
* prologue and epilogue are implemented as blocks of machine code with the
* corresponding assembly in comments. There are known offsets into each block
* that corresponds to locations of data and addresses that we only know at run
* time. These blocks are copied as necessary and the blanks filled in
* appropriately.
*
* As mentioned in section 8.2, the trampoline library uses specifically named
* variables to communicate the buffers and size to use. These variables are:
*
* o. umem_genasm_mptr: The buffer for ptcmalloc
* o. umem_genasm_msize: The size in bytes of the above buffer
* o. umem_genasm_fptr: The buffer for ptcfree
* o. umem_genasm_fsize: The size in bytes of the above buffer
*
* Finally, to enable the generated assembly we need to remove the previous jump
* to the actual malloc that exists at the start of these buffers. On x86, this
* is a five byte region. We could zero out the jump offset to be a jmp +0, but
* using nops can be faster. We specifically use a single five byte nop on x86
* as it is faster. When porting ptcumem to other architectures, the various
* opcode changes and options should be analyzed.
*
* 8.4 Interface with libc.so
* --------------------------
*
* The tmem_t structure as described in the beginning of section 8, is part of a
* private interface with libc. There are three functions that exist to cover
* this. They are not documented in man pages or header files. They are in the
* SUNWprivate part of libc's mapfile.
*
* o. _tmem_get_base(void)
*
* Returns the offset from the ulwp_t (curthread) to the tmem_t structure.
* This is a constant for all threads and is effectively a way to to do
* ::offsetof ulwp_t ul_tmem without having to know the specifics of the
* structure outside of libc.
*
* o. _tmem_get_nentries(void)
*
* Returns the number of roots that exist in the tmem_t. This is one part
* of the cap on the number of umem_caches that we can back with tmem.
*
* o. _tmem_set_cleanup(void (*)(void *, int))
*
* This sets a clean up handler that gets called back when a thread exits.
* There is one call per buffer, the void * is a pointer to the buffer on
* the list, the int is the index into the roots array for this buffer.
*
* 8.5 Tuning and disabling per-thread caching
* -------------------------------------------
*
* There is only one tunable for per-thread caching: the amount of memory each
* thread should be able to cache. This is specified via the perthread_cache
* UMEM_OPTION option. No attempt is made to to sanity check the specified
* value; the limit is simply the maximum value of a size_t.
*
* If the perthread_cache UMEM_OPTION is set to zero, nomagazines was requested,
* or UMEM_DEBUG has been turned on then we will never call into umem_genasm;
* however, the trampoline audit library and jump will still be in place.
*
* 8.6 Observing efficacy of per-thread caching
* --------------------------------------------
*
* To understand the efficacy of per-thread caching, use the ::umastat dcmd
* to see the percentage of capacity consumed on a per-thread basis, the
* degree to which each umem cache contributes to per-thread cache consumption,
* and the number of buffers in per-thread caches on a per-umem cache basis.
* If more detail is required, the specific buffers in a per-thread cache can
* be iterated over with the umem_ptc_* walkers. (These walkers allow an
* optional ulwp_t to be specified to iterate only over a particular thread's
* cache.)
*/
#include <umem_impl.h>
#include <sys/vmem_impl_user.h>
#include "umem_base.h"
#include "vmem_base.h"
#include <sys/processor.h>
#include <sys/sysmacros.h>
#include <alloca.h>
#include <errno.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <signal.h>
#include <unistd.h>
#include <atomic.h>
#include "misc.h"
#define UMEM_VMFLAGS(umflag) (VM_NOSLEEP)
size_t pagesize;
/*
* The default set of caches to back umem_alloc().
* These sizes should be reevaluated periodically.
*
* We want allocations that are multiples of the coherency granularity
* (64 bytes) to be satisfied from a cache which is a multiple of 64
* bytes, so that it will be 64-byte aligned. For all multiples of 64,
* the next kmem_cache_size greater than or equal to it must be a
* multiple of 64.
*
* This table must be in sorted order, from smallest to highest. The
* highest slot must be UMEM_MAXBUF, and every slot afterwards must be
* zero.
*/
static int umem_alloc_sizes[] = {
#ifdef _LP64
1 * 8,
1 * 16,
2 * 16,
3 * 16,
#else
1 * 8,
2 * 8,
3 * 8,
4 * 8, 5 * 8, 6 * 8, 7 * 8,
#endif
4 * 16, 5 * 16, 6 * 16, 7 * 16,
4 * 32, 5 * 32, 6 * 32, 7 * 32,
4 * 64, 5 * 64, 6 * 64, 7 * 64,
4 * 128, 5 * 128, 6 * 128, 7 * 128,
P2ALIGN(8192 / 7, 64),
P2ALIGN(8192 / 6, 64),
P2ALIGN(8192 / 5, 64),
P2ALIGN(8192 / 4, 64), 2304,
P2ALIGN(8192 / 3, 64),
P2ALIGN(8192 / 2, 64), 4544,
P2ALIGN(8192 / 1, 64), 9216,
4096 * 3,
8192 * 2, /* = 8192 * 2 */
24576, 32768, 40960, 49152, 57344, 65536, 73728, 81920,
90112, 98304, 106496, 114688, 122880, UMEM_MAXBUF, /* 128k */
/* 24 slots for user expansion */
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
};
#define NUM_ALLOC_SIZES (sizeof (umem_alloc_sizes) / sizeof (*umem_alloc_sizes))
static umem_magtype_t umem_magtype[] = {
{ 1, 8, 3200, 65536 },
{ 3, 16, 256, 32768 },
{ 7, 32, 64, 16384 },
{ 15, 64, 0, 8192 },
{ 31, 64, 0, 4096 },
{ 47, 64, 0, 2048 },
{ 63, 64, 0, 1024 },
{ 95, 64, 0, 512 },
{ 143, 64, 0, 0 },
};
/*
* umem tunables
*/
uint32_t umem_max_ncpus; /* # of CPU caches. */
uint32_t umem_stack_depth = 15; /* # stack frames in a bufctl_audit */
uint32_t umem_reap_interval = 10; /* max reaping rate (seconds) */
uint_t umem_depot_contention = 2; /* max failed trylocks per real interval */
uint_t umem_abort = 1; /* whether to abort on error */
uint_t umem_output = 0; /* whether to write to standard error */
uint_t umem_logging = 0; /* umem_log_enter() override */
uint32_t umem_mtbf = 0; /* mean time between failures [default: off] */
size_t umem_transaction_log_size; /* size of transaction log */
size_t umem_content_log_size; /* size of content log */
size_t umem_failure_log_size; /* failure log [4 pages per CPU] */
size_t umem_slab_log_size; /* slab create log [4 pages per CPU] */
size_t umem_content_maxsave = 256; /* UMF_CONTENTS max bytes to log */
size_t umem_lite_minsize = 0; /* minimum buffer size for UMF_LITE */
size_t umem_lite_maxalign = 1024; /* maximum buffer alignment for UMF_LITE */
size_t umem_maxverify; /* maximum bytes to inspect in debug routines */
size_t umem_minfirewall; /* hardware-enforced redzone threshold */
size_t umem_ptc_size = 1048576; /* size of per-thread cache (in bytes) */
uint_t umem_flags = 0;
uintptr_t umem_tmem_off;
mutex_t umem_init_lock; /* locks initialization */
cond_t umem_init_cv; /* initialization CV */
thread_t umem_init_thr; /* thread initializing */
int umem_init_env_ready; /* environ pre-initted */
int umem_ready = UMEM_READY_STARTUP;
int umem_ptc_enabled; /* per-thread caching enabled */
static umem_nofail_callback_t *nofail_callback;
static mutex_t umem_nofail_exit_lock;
static thread_t umem_nofail_exit_thr;
static umem_cache_t *umem_slab_cache;
static umem_cache_t *umem_bufctl_cache;
static umem_cache_t *umem_bufctl_audit_cache;
mutex_t umem_flags_lock;
static vmem_t *heap_arena;
static vmem_alloc_t *heap_alloc;
static vmem_free_t *heap_free;
static vmem_t *umem_internal_arena;
static vmem_t *umem_cache_arena;
static vmem_t *umem_hash_arena;
static vmem_t *umem_log_arena;
static vmem_t *umem_oversize_arena;
static vmem_t *umem_va_arena;
static vmem_t *umem_default_arena;
static vmem_t *umem_firewall_va_arena;
static vmem_t *umem_firewall_arena;
vmem_t *umem_memalign_arena;
umem_log_header_t *umem_transaction_log;
umem_log_header_t *umem_content_log;
umem_log_header_t *umem_failure_log;
umem_log_header_t *umem_slab_log;
#define CPUHINT() (thr_self())
#define CPUHINT_MAX() INT_MAX
#define CPU(mask) (umem_cpus + (CPUHINT() & (mask)))
static umem_cpu_t umem_startup_cpu = { /* initial, single, cpu */
UMEM_CACHE_SIZE(0),
0
};
static uint32_t umem_cpu_mask = 0; /* global cpu mask */
static umem_cpu_t *umem_cpus = &umem_startup_cpu; /* cpu list */
volatile uint32_t umem_reaping;
thread_t umem_update_thr;
struct timeval umem_update_next; /* timeofday of next update */
volatile thread_t umem_st_update_thr; /* only used when single-thd */
#define IN_UPDATE() (thr_self() == umem_update_thr || \
thr_self() == umem_st_update_thr)
#define IN_REAP() IN_UPDATE()
mutex_t umem_update_lock; /* cache_u{next,prev,flags} */
cond_t umem_update_cv;
volatile hrtime_t umem_reap_next; /* min hrtime of next reap */
mutex_t umem_cache_lock; /* inter-cache linkage only */
#ifdef UMEM_STANDALONE
umem_cache_t umem_null_cache;
static const umem_cache_t umem_null_cache_template = {
#else
umem_cache_t umem_null_cache = {
#endif
0, 0, 0, 0, 0,
0, 0,
0, 0,
0, 0,
"invalid_cache",
0, 0,
NULL, NULL, NULL, NULL,
NULL,
0, 0, 0, 0,
&umem_null_cache, &umem_null_cache,
&umem_null_cache, &umem_null_cache,
0,
DEFAULTMUTEX, /* start of slab layer */
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
&umem_null_cache.cache_nullslab,
{
&umem_null_cache,
NULL,
&umem_null_cache.cache_nullslab,
&umem_null_cache.cache_nullslab,
NULL,
-1,
0
},
NULL,
NULL,
DEFAULTMUTEX, /* start of depot layer */
NULL, {
NULL, 0, 0, 0, 0
}, {
NULL, 0, 0, 0, 0
}, {
{
DEFAULTMUTEX, /* start of CPU cache */
0, 0, NULL, NULL, -1, -1, 0
}
}
};
#define ALLOC_TABLE_4 \
&umem_null_cache, &umem_null_cache, &umem_null_cache, &umem_null_cache
#define ALLOC_TABLE_64 \
ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, \
ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4, ALLOC_TABLE_4
#define ALLOC_TABLE_1024 \
ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, \
ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64, ALLOC_TABLE_64
static umem_cache_t *umem_alloc_table[UMEM_MAXBUF >> UMEM_ALIGN_SHIFT] = {
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024,
ALLOC_TABLE_1024
};
/* Used to constrain audit-log stack traces */
caddr_t umem_min_stack;
caddr_t umem_max_stack;
#define UMERR_MODIFIED 0 /* buffer modified while on freelist */
#define UMERR_REDZONE 1 /* redzone violation (write past end of buf) */
#define UMERR_DUPFREE 2 /* freed a buffer twice */
#define UMERR_BADADDR 3 /* freed a bad (unallocated) address */
#define UMERR_BADBUFTAG 4 /* buftag corrupted */
#define UMERR_BADBUFCTL 5 /* bufctl corrupted */
#define UMERR_BADCACHE 6 /* freed a buffer to the wrong cache */
#define UMERR_BADSIZE 7 /* alloc size != free size */
#define UMERR_BADBASE 8 /* buffer base address wrong */
struct {
hrtime_t ump_timestamp; /* timestamp of error */
int ump_error; /* type of umem error (UMERR_*) */
void *ump_buffer; /* buffer that induced abort */
void *ump_realbuf; /* real start address for buffer */
umem_cache_t *ump_cache; /* buffer's cache according to client */
umem_cache_t *ump_realcache; /* actual cache containing buffer */
umem_slab_t *ump_slab; /* slab accoring to umem_findslab() */
umem_bufctl_t *ump_bufctl; /* bufctl */
} umem_abort_info;
static void
copy_pattern(uint64_t pattern, void *buf_arg, size_t size)
{
uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
uint64_t *buf = buf_arg;
while (buf < bufend)
*buf++ = pattern;
}
static void *
verify_pattern(uint64_t pattern, void *buf_arg, size_t size)
{
uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
uint64_t *buf;
for (buf = buf_arg; buf < bufend; buf++)
if (*buf != pattern)
return (buf);
return (NULL);
}
static void *
verify_and_copy_pattern(uint64_t old, uint64_t new, void *buf_arg, size_t size)
{
uint64_t *bufend = (uint64_t *)((char *)buf_arg + size);
uint64_t *buf;
for (buf = buf_arg; buf < bufend; buf++) {
if (*buf != old) {
copy_pattern(old, buf_arg,
(char *)buf - (char *)buf_arg);
return (buf);
}
*buf = new;
}
return (NULL);
}
void
umem_cache_applyall(void (*func)(umem_cache_t *))
{
umem_cache_t *cp;
(void) mutex_lock(&umem_cache_lock);
for (cp = umem_null_cache.cache_next; cp != &umem_null_cache;
cp = cp->cache_next)
func(cp);
(void) mutex_unlock(&umem_cache_lock);
}
static void
umem_add_update_unlocked(umem_cache_t *cp, int flags)
{
umem_cache_t *cnext, *cprev;
flags &= ~UMU_ACTIVE;
if (!flags)
return;
if (cp->cache_uflags & UMU_ACTIVE) {
cp->cache_uflags |= flags;
} else {
if (cp->cache_unext != NULL) {
ASSERT(cp->cache_uflags != 0);
cp->cache_uflags |= flags;
} else {
ASSERT(cp->cache_uflags == 0);
cp->cache_uflags = flags;
cp->cache_unext = cnext = &umem_null_cache;
cp->cache_uprev = cprev = umem_null_cache.cache_uprev;
cnext->cache_uprev = cp;
cprev->cache_unext = cp;
}
}
}
static void
umem_add_update(umem_cache_t *cp, int flags)
{
(void) mutex_lock(&umem_update_lock);
umem_add_update_unlocked(cp, flags);
if (!IN_UPDATE())
(void) cond_broadcast(&umem_update_cv);
(void) mutex_unlock(&umem_update_lock);
}
/*
* Remove a cache from the update list, waiting for any in-progress work to
* complete first.
*/
static void
umem_remove_updates(umem_cache_t *cp)
{
(void) mutex_lock(&umem_update_lock);
/*
* Get it out of the active state
*/
while (cp->cache_uflags & UMU_ACTIVE) {
int cancel_state;
ASSERT(cp->cache_unext == NULL);
cp->cache_uflags |= UMU_NOTIFY;
/*
* Make sure the update state is sane, before we wait