forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 1
/
dept.h
573 lines (487 loc) · 12.2 KB
/
dept.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
/* SPDX-License-Identifier: GPL-2.0 */
/*
* DEPT(DEPendency Tracker) - runtime dependency tracker
*
* Started by Byungchul Park <max.byungchul.park@gmail.com>:
*
* Copyright (c) 2020 LG Electronics, Inc., Byungchul Park
*/
#ifndef __LINUX_DEPT_H
#define __LINUX_DEPT_H
#ifdef CONFIG_DEPT
#include <linux/types.h>
struct task_struct;
#define DEPT_MAX_STACK_ENTRY 16
#define DEPT_MAX_WAIT_HIST 64
#define DEPT_MAX_ECXT_HELD 48
#define DEPT_MAX_SUBCLASSES 16
#define DEPT_MAX_SUBCLASSES_EVT 2
#define DEPT_MAX_SUBCLASSES_USR (DEPT_MAX_SUBCLASSES / DEPT_MAX_SUBCLASSES_EVT)
#define DEPT_MAX_SUBCLASSES_CACHE 2
#define DEPT_SIRQ 0
#define DEPT_HIRQ 1
#define DEPT_IRQS_NR 2
#define DEPT_SIRQF (1UL << DEPT_SIRQ)
#define DEPT_HIRQF (1UL << DEPT_HIRQ)
struct dept_ecxt;
struct dept_iecxt {
struct dept_ecxt *ecxt;
int enirq;
/*
* for preventing to add a new ecxt
*/
bool staled;
};
struct dept_wait;
struct dept_iwait {
struct dept_wait *wait;
int irq;
/*
* for preventing to add a new wait
*/
bool staled;
bool touched;
};
struct dept_class {
union {
struct llist_node pool_node;
struct {
/*
* reference counter for object management
*/
atomic_t ref;
/*
* unique information about the class
*/
const char *name;
unsigned long key;
int sub_id;
/*
* for BFS
*/
unsigned int bfs_gen;
int bfs_dist;
struct dept_class *bfs_parent;
/*
* for hashing this object
*/
struct hlist_node hash_node;
/*
* for linking all classes
*/
struct list_head all_node;
/*
* for associating its dependencies
*/
struct list_head dep_head;
struct list_head dep_rev_head;
/*
* for tracking IRQ dependencies
*/
struct dept_iecxt iecxt[DEPT_IRQS_NR];
struct dept_iwait iwait[DEPT_IRQS_NR];
/*
* classified by a map embedded in task_struct,
* not an explicit map
*/
bool sched_map;
};
};
};
struct dept_key {
union {
/*
* Each byte-wise address will be used as its key.
*/
char base[DEPT_MAX_SUBCLASSES];
/*
* for caching the main class pointer
*/
struct dept_class *classes[DEPT_MAX_SUBCLASSES_CACHE];
};
};
struct dept_map {
const char *name;
struct dept_key *keys;
/*
* subclass that can be set from user
*/
int sub_u;
/*
* It's local copy for fast access to the associated classes.
* Also used for dept_key for static maps.
*/
struct dept_key map_key;
/*
* wait timestamp associated to this map
*/
unsigned int wgen;
/*
* whether this map should be going to be checked or not
*/
bool nocheck;
};
#define DEPT_MAP_INITIALIZER(n, k) \
{ \
.name = #n, \
.keys = (struct dept_key *)(k), \
.sub_u = 0, \
.map_key = { .classes = { NULL, } }, \
.wgen = 0U, \
.nocheck = false, \
}
struct dept_stack {
union {
struct llist_node pool_node;
struct {
/*
* reference counter for object management
*/
atomic_t ref;
/*
* backtrace entries
*/
unsigned long raw[DEPT_MAX_STACK_ENTRY];
int nr;
};
};
};
struct dept_ecxt {
union {
struct llist_node pool_node;
struct {
/*
* reference counter for object management
*/
atomic_t ref;
/*
* function that entered to this ecxt
*/
const char *ecxt_fn;
/*
* event function
*/
const char *event_fn;
/*
* associated class
*/
struct dept_class *class;
/*
* flag indicating which IRQ has been
* enabled within the event context
*/
unsigned long enirqf;
/*
* where the IRQ-enabled happened
*/
unsigned long enirq_ip[DEPT_IRQS_NR];
struct dept_stack *enirq_stack[DEPT_IRQS_NR];
/*
* where the event context started
*/
unsigned long ecxt_ip;
struct dept_stack *ecxt_stack;
/*
* where the event triggered
*/
unsigned long event_ip;
struct dept_stack *event_stack;
};
};
};
struct dept_wait {
union {
struct llist_node pool_node;
struct {
/*
* reference counter for object management
*/
atomic_t ref;
/*
* function causing this wait
*/
const char *wait_fn;
/*
* the associated class
*/
struct dept_class *class;
/*
* which IRQ the wait was placed in
*/
unsigned long irqf;
/*
* where the IRQ wait happened
*/
unsigned long irq_ip[DEPT_IRQS_NR];
struct dept_stack *irq_stack[DEPT_IRQS_NR];
/*
* where the wait happened
*/
unsigned long wait_ip;
struct dept_stack *wait_stack;
/*
* whether this wait is for commit in scheduler
*/
bool sched_sleep;
};
};
};
struct dept_dep {
union {
struct llist_node pool_node;
struct {
/*
* reference counter for object management
*/
atomic_t ref;
/*
* key data of dependency
*/
struct dept_ecxt *ecxt;
struct dept_wait *wait;
/*
* This object can be referred without dept_lock
* held but with IRQ disabled, e.g. for hash
* lookup. So deferred deletion is needed.
*/
struct rcu_head rh;
/*
* for BFS
*/
struct list_head bfs_node;
/*
* for hashing this object
*/
struct hlist_node hash_node;
/*
* for linking to a class object
*/
struct list_head dep_node;
struct list_head dep_rev_node;
};
};
};
struct dept_hash {
/*
* hash table
*/
struct hlist_head *table;
/*
* size of the table e.i. 2^bits
*/
int bits;
};
struct dept_pool {
const char *name;
/*
* object size
*/
size_t obj_sz;
/*
* the number of the static array
*/
atomic_t obj_nr;
/*
* offset of ->pool_node
*/
size_t node_off;
/*
* pointer to the pool
*/
void *spool;
struct llist_head boot_pool;
struct llist_head __percpu *lpool;
};
struct dept_ecxt_held {
/*
* associated event context
*/
struct dept_ecxt *ecxt;
/*
* unique key for this dept_ecxt_held
*/
struct dept_map *map;
/*
* class of the ecxt of this dept_ecxt_held
*/
struct dept_class *class;
/*
* the wgen when the event context started
*/
unsigned int wgen;
/*
* subclass that only works in the local context
*/
int sub_l;
};
struct dept_wait_hist {
/*
* associated wait
*/
struct dept_wait *wait;
/*
* unique id of all waits system-wise until wrapped
*/
unsigned int wgen;
/*
* local context id to identify IRQ context
*/
unsigned int ctxt_id;
};
struct dept_task {
/*
* all event contexts that have entered and before exiting
*/
struct dept_ecxt_held ecxt_held[DEPT_MAX_ECXT_HELD];
int ecxt_held_pos;
/*
* ring buffer holding all waits that have happened
*/
struct dept_wait_hist wait_hist[DEPT_MAX_WAIT_HIST];
int wait_hist_pos;
/*
* sequential id to identify each IRQ context
*/
unsigned int irq_id[DEPT_IRQS_NR];
/*
* for tracking IRQ-enabled points with cross-event
*/
unsigned int wgen_enirq[DEPT_IRQS_NR];
/*
* for keeping up-to-date IRQ-enabled points
*/
unsigned long enirq_ip[DEPT_IRQS_NR];
/*
* current effective IRQ-enabled flag
*/
unsigned long eff_enirqf;
/*
* for reserving a current stack instance at each operation
*/
struct dept_stack *stack;
/*
* for preventing recursive call into DEPT engine
*/
int recursive;
/*
* for staging data to commit a wait
*/
struct dept_map stage_m;
bool stage_sched_map;
const char *stage_w_fn;
unsigned long stage_ip;
/*
* the number of missing ecxts
*/
int missing_ecxt;
/*
* for tracking IRQ-enable state
*/
bool hardirqs_enabled;
bool softirqs_enabled;
/*
* whether the current is on do_exit()
*/
bool task_exit;
/*
* whether the current is running __schedule()
*/
bool in_sched;
};
#define DEPT_TASK_INITIALIZER(t) \
{ \
.wait_hist = { { .wait = NULL, } }, \
.ecxt_held_pos = 0, \
.wait_hist_pos = 0, \
.irq_id = { 0U }, \
.wgen_enirq = { 0U }, \
.enirq_ip = { 0UL }, \
.eff_enirqf = 0UL, \
.stack = NULL, \
.recursive = 0, \
.stage_m = DEPT_MAP_INITIALIZER((t)->stage_m, NULL), \
.stage_sched_map = false, \
.stage_w_fn = NULL, \
.stage_ip = 0UL, \
.missing_ecxt = 0, \
.hardirqs_enabled = false, \
.softirqs_enabled = false, \
.task_exit = false, \
.in_sched = false, \
}
extern void dept_on(void);
extern void dept_off(void);
extern void dept_init(void);
extern void dept_task_init(struct task_struct *t);
extern void dept_task_exit(struct task_struct *t);
extern void dept_free_range(void *start, unsigned int sz);
extern void dept_map_init(struct dept_map *m, struct dept_key *k, int sub_u, const char *n);
extern void dept_map_reinit(struct dept_map *m, struct dept_key *k, int sub_u, const char *n);
extern void dept_map_copy(struct dept_map *to, struct dept_map *from);
extern void dept_wait(struct dept_map *m, unsigned long w_f, unsigned long ip, const char *w_fn, int sub_l);
extern void dept_stage_wait(struct dept_map *m, struct dept_key *k, unsigned long ip, const char *w_fn, bool strong);
extern void dept_request_event_wait_commit(void);
extern void dept_clean_stage(void);
extern void dept_stage_event(struct task_struct *t, unsigned long ip);
extern void dept_ecxt_enter(struct dept_map *m, unsigned long e_f, unsigned long ip, const char *c_fn, const char *e_fn, int sub_l);
extern bool dept_ecxt_holding(struct dept_map *m, unsigned long e_f);
extern void dept_request_event(struct dept_map *m);
extern void dept_event(struct dept_map *m, unsigned long e_f, unsigned long ip, const char *e_fn);
extern void dept_ecxt_exit(struct dept_map *m, unsigned long e_f, unsigned long ip);
extern void dept_sched_enter(void);
extern void dept_sched_exit(void);
static inline void dept_ecxt_enter_nokeep(struct dept_map *m)
{
dept_ecxt_enter(m, 0UL, 0UL, NULL, NULL, 0);
}
/*
* for users who want to manage external keys
*/
extern void dept_key_init(struct dept_key *k);
extern void dept_key_destroy(struct dept_key *k);
extern void dept_map_ecxt_modify(struct dept_map *m, unsigned long e_f, struct dept_key *new_k, unsigned long new_e_f, unsigned long new_ip, const char *new_c_fn, const char *new_e_fn, int new_sub_l);
extern void dept_softirq_enter(void);
extern void dept_hardirq_enter(void);
extern void dept_softirqs_on(unsigned long ip);
extern void dept_hardirqs_on(unsigned long ip);
extern void dept_softirqs_off(unsigned long ip);
extern void dept_hardirqs_off(unsigned long ip);
#else /* !CONFIG_DEPT */
struct dept_key { };
struct dept_map { };
struct dept_task { };
#define DEPT_MAP_INITIALIZER(n, k) { }
#define DEPT_TASK_INITIALIZER(t) { }
#define dept_on() do { } while (0)
#define dept_off() do { } while (0)
#define dept_init() do { } while (0)
#define dept_task_init(t) do { } while (0)
#define dept_task_exit(t) do { } while (0)
#define dept_free_range(s, sz) do { } while (0)
#define dept_map_init(m, k, su, n) do { (void)(n); (void)(k); } while (0)
#define dept_map_reinit(m, k, su, n) do { (void)(n); (void)(k); } while (0)
#define dept_map_copy(t, f) do { } while (0)
#define dept_wait(m, w_f, ip, w_fn, sl) do { (void)(w_fn); } while (0)
#define dept_stage_wait(m, k, ip, w_fn, s) do { (void)(k); (void)(w_fn); } while (0)
#define dept_request_event_wait_commit() do { } while (0)
#define dept_clean_stage() do { } while (0)
#define dept_stage_event(t, ip) do { } while (0)
#define dept_ecxt_enter(m, e_f, ip, c_fn, e_fn, sl) do { (void)(c_fn); (void)(e_fn); } while (0)
#define dept_ecxt_holding(m, e_f) false
#define dept_request_event(m) do { } while (0)
#define dept_event(m, e_f, ip, e_fn) do { (void)(e_fn); } while (0)
#define dept_ecxt_exit(m, e_f, ip) do { } while (0)
#define dept_sched_enter() do { } while (0)
#define dept_sched_exit() do { } while (0)
#define dept_ecxt_enter_nokeep(m) do { } while (0)
#define dept_key_init(k) do { (void)(k); } while (0)
#define dept_key_destroy(k) do { (void)(k); } while (0)
#define dept_map_ecxt_modify(m, e_f, n_k, n_e_f, n_ip, n_c_fn, n_e_fn, n_sl) do { (void)(n_k); (void)(n_c_fn); (void)(n_e_fn); } while (0)
#define dept_softirq_enter() do { } while (0)
#define dept_hardirq_enter() do { } while (0)
#define dept_softirqs_on(ip) do { } while (0)
#define dept_hardirqs_on(ip) do { } while (0)
#define dept_softirqs_off(ip) do { } while (0)
#define dept_hardirqs_off(ip) do { } while (0)
#endif
#endif /* __LINUX_DEPT_H */