mirrored from git://git.rockbox.org/rockbox.git
-
Notifications
You must be signed in to change notification settings - Fork 159
/
thread.c
1577 lines (1343 loc) · 50.3 KB
/
thread.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2002 by Ulf Ralberg
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "config.h"
#ifdef HAVE_SIGALTSTACK_THREADS
/*
* The sp check in glibc __longjmp_chk() will cause
* a fatal error when switching threads via longjmp().
*/
#undef _FORTIFY_SOURCE
#endif
#include "thread-internal.h"
#include "kernel.h"
#include "cpu.h"
#include "string.h"
#ifdef RB_PROFILE
#include <profile.h>
#endif
#include "core_alloc.h"
/* Define THREAD_EXTRA_CHECKS as 1 to enable additional state checks */
#ifdef DEBUG
#define THREAD_EXTRA_CHECKS 1 /* Always 1 for DEBUG */
#else
#define THREAD_EXTRA_CHECKS 0
#endif
/****************************************************************************
* ATTENTION!! *
* See notes below on implementing processor-specific portions! *
****************************************************************************
*
* General locking order to guarantee progress. Order must be observed but
* all stages are not nescessarily obligatory. Going from 1) to 3) is
* perfectly legal.
*
* 1) IRQ
* This is first because of the likelyhood of having an interrupt occur that
* also accesses one of the objects farther down the list. Any non-blocking
* synchronization done may already have a lock on something during normal
* execution and if an interrupt handler running on the same processor as
* the one that has the resource locked were to attempt to access the
* resource, the interrupt handler would wait forever waiting for an unlock
* that will never happen. There is no danger if the interrupt occurs on
* a different processor because the one that has the lock will eventually
* unlock and the other processor's handler may proceed at that time. Not
* nescessary when the resource in question is definitely not available to
* interrupt handlers.
*
* 2) Kernel Object
* 1) May be needed beforehand if the kernel object allows dual-use such as
* event queues. The kernel object must have a scheme to protect itself from
* access by another processor and is responsible for serializing the calls
* to block_thread and wakeup_thread both to themselves and to each other.
* Objects' queues are also protected here.
*
* 3) Thread Slot
* This locks access to the thread's slot such that its state cannot be
* altered by another processor when a state change is in progress such as
* when it is in the process of going on a blocked list. An attempt to wake
* a thread while it is still blocking will likely desync its state with
* the other resources used for that state.
*
* 4) Core Lists
* These lists are specific to a particular processor core and are accessible
* by all processor cores and interrupt handlers. The running (rtr) list is
* the prime example where a thread may be added by any means.
*/
/*---------------------------------------------------------------------------
* Processor specific: core_sleep/core_wake/misc. notes
*
* ARM notes:
* FIQ is not dealt with by the scheduler code and is simply restored if it
* must by masked for some reason - because threading modifies a register
* that FIQ may also modify and there's no way to accomplish it atomically.
* s3c2440 is such a case.
*
* Audio interrupts are generally treated at a higher priority than others
* usage of scheduler code with interrupts higher than HIGHEST_IRQ_LEVEL
* are not in general safe. Special cases may be constructed on a per-
* source basis and blocking operations are not available.
*
* core_sleep procedure to implement for any CPU to ensure an asychronous
* wakup never results in requiring a wait until the next tick (up to
* 10000uS!). May require assembly and careful instruction ordering.
*
* 1) On multicore, stay awake if directed to do so by another. If so, goto
* step 4.
* 2) If processor requires, atomically reenable interrupts and perform step
* 3.
* 3) Sleep the CPU core. If wakeup itself enables interrupts (stop #0x2000
* on Coldfire) goto step 5.
* 4) Enable interrupts.
* 5) Exit procedure.
*
* core_wake and multprocessor notes for sleep/wake coordination:
* If possible, to wake up another processor, the forcing of an interrupt on
* the woken core by the waker core is the easiest way to ensure a non-
* delayed wake and immediate execution of any woken threads. If that isn't
* available then some careful non-blocking synchonization is needed (as on
* PP targets at the moment).
*---------------------------------------------------------------------------
*
*
*---------------------------------------------------------------------------
* Priority distribution structure (one category for each possible priority):
*
* +----+----+----+ ... +------+
* hist: | F0 | F1 | F2 | | Fn-1 |
* +----+----+----+ ... +------+
* mask: | b0 | b1 | b2 | | bn-1 |
* +----+----+----+ ... +------+
*
* F = count of threads at priority category n (frequency)
* b = bitmask of non-zero priority categories (occupancy)
*
* / if H[n] != 0 : 1
* b[n] = |
* \ else : 0
*
*---------------------------------------------------------------------------
* Basic priority inheritance priotocol (PIP):
*
* Mn = mutex n, Tn = thread n
*
* A lower priority thread inherits the priority of the highest priority
* thread blocked waiting for it to complete an action (such as release a
* mutex or respond to a message via queue_send):
*
* 1) T2->M1->T1
*
* T1 owns M1, T2 is waiting for M1 to realease M1. If T2 has a higher
* priority than T1 then T1 inherits the priority of T2.
*
* 2) T3
* \/
* T2->M1->T1
*
* Situation is like 1) but T2 and T3 are both queued waiting for M1 and so
* T1 inherits the higher of T2 and T3.
*
* 3) T3->M2->T2->M1->T1
*
* T1 owns M1, T2 owns M2. If T3 has a higher priority than both T1 and T2,
* then T1 inherits the priority of T3 through T2.
*
* Blocking chains can grow arbitrarily complex (though it's best that they
* not form at all very often :) and build-up from these units.
*---------------------------------------------------------------------------
*/
static FORCE_INLINE void core_sleep(IF_COP_VOID(unsigned int core));
static FORCE_INLINE void store_context(void* addr);
static FORCE_INLINE void load_context(const void* addr);
/****************************************************************************
* Processor/OS-specific section - include necessary core support
*/
#include "asm/thread.c"
#if defined (CPU_PP)
#include "thread-pp.c"
#endif /* CPU_PP */
/*
* End Processor-specific section
***************************************************************************/
static NO_INLINE NORETURN_ATTR
void thread_panicf(const char *msg, struct thread_entry *thread)
{
IF_COP( const unsigned int core = thread->core; )
static char name[sizeof (((struct thread_debug_info *)0)->name)];
format_thread_name(name, sizeof (name), thread);
panicf ("%s %s" IF_COP(" (%d)"), msg, name IF_COP(, core));
}
static NO_INLINE void thread_stkov(struct thread_entry *thread)
{
thread_panicf("Stkov", thread);
}
#if THREAD_EXTRA_CHECKS
#define THREAD_PANICF(msg, thread) \
thread_panicf(msg, thread)
#define THREAD_ASSERT(exp, msg, thread) \
({ if (!({ exp; })) thread_panicf((msg), (thread)); })
#else
#define THREAD_PANICF(msg, thread) \
do {} while (1)
#define THREAD_ASSERT(exp, msg, thread) \
do {} while (0)
#endif /* THREAD_EXTRA_CHECKS */
/* Thread locking */
#if NUM_CORES > 1
#define LOCK_THREAD(thread) \
({ corelock_lock(&(thread)->slot_cl); })
#define TRY_LOCK_THREAD(thread) \
({ corelock_try_lock(&(thread)->slot_cl); })
#define UNLOCK_THREAD(thread) \
({ corelock_unlock(&(thread)->slot_cl); })
#else /* NUM_CORES == 1*/
#define LOCK_THREAD(thread) \
({ (void)(thread); })
#define TRY_LOCK_THREAD(thread) \
({ (void)(thread); })
#define UNLOCK_THREAD(thread) \
({ (void)(thread); })
#endif /* NUM_CORES */
/* RTR list */
#define RTR_LOCK(corep) \
corelock_lock(&(corep)->rtr_cl)
#define RTR_UNLOCK(corep) \
corelock_unlock(&(corep)->rtr_cl)
#ifdef HAVE_PRIORITY_SCHEDULING
#define rtr_add_entry(corep, priority) \
prio_add_entry(&(corep)->rtr_dist, (priority))
#define rtr_subtract_entry(corep, priority) \
prio_subtract_entry(&(corep)->rtr_dist, (priority))
#define rtr_move_entry(corep, from, to) \
prio_move_entry(&(corep)->rtr_dist, (from), (to))
#else /* !HAVE_PRIORITY_SCHEDULING */
#define rtr_add_entry(corep, priority) \
do {} while (0)
#define rtr_subtract_entry(corep, priority) \
do {} while (0)
#define rtr_move_entry(corep, from, to) \
do {} while (0)
#endif /* HAVE_PRIORITY_SCHEDULING */
static FORCE_INLINE void thread_store_context(struct thread_entry *thread)
{
store_context(&thread->context);
#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
thread->__errno = errno;
#endif
}
static FORCE_INLINE void thread_load_context(struct thread_entry *thread)
{
#if (CONFIG_PLATFORM & PLATFORM_HOSTED)
errno = thread->__errno;
#endif
load_context(&thread->context);
}
static FORCE_INLINE unsigned int
should_switch_tasks(struct thread_entry *thread)
{
#ifdef HAVE_PRIORITY_SCHEDULING
const unsigned int core = CURRENT_CORE;
#if NUM_CORES > 1
/* Forget about it if different CPU */
if (thread->core != core)
return THREAD_OK;
#endif
/* Just woke something therefore a thread is on the run queue */
struct thread_entry *current =
RTR_THREAD_FIRST(&__core_id_entry(core)->rtr);
if (LIKELY(thread->priority >= current->priority))
return THREAD_OK;
/* There is a thread ready to run of higher priority on the same
* core as the current one; recommend a task switch. */
return THREAD_OK | THREAD_SWITCH;
#else
return THREAD_OK;
(void)thread;
#endif /* HAVE_PRIORITY_SCHEDULING */
}
#ifdef HAVE_PRIORITY_SCHEDULING
/*---------------------------------------------------------------------------
* Increment frequency at category "priority"
*---------------------------------------------------------------------------
*/
static inline unsigned int prio_add_entry(
struct priority_distribution *pd, int priority)
{
unsigned int count = ++pd->hist[priority];
if (count == 1)
priobit_set_bit(&pd->mask, priority);
return count;
}
/*---------------------------------------------------------------------------
* Decrement frequency at category "priority"
*---------------------------------------------------------------------------
*/
static inline unsigned int prio_subtract_entry(
struct priority_distribution *pd, int priority)
{
unsigned int count = --pd->hist[priority];
if (count == 0)
priobit_clear_bit(&pd->mask, priority);
return count;
}
/*---------------------------------------------------------------------------
* Remove from one category and add to another
*---------------------------------------------------------------------------
*/
static inline void prio_move_entry(
struct priority_distribution *pd, int from, int to)
{
if (--pd->hist[from] == 0)
priobit_clear_bit(&pd->mask, from);
if (++pd->hist[to] == 1)
priobit_set_bit(&pd->mask, to);
}
#endif /* HAVE_PRIORITY_SCHEDULING */
/*---------------------------------------------------------------------------
* Common init for new thread basic info
*---------------------------------------------------------------------------
*/
static void new_thread_base_init(struct thread_entry *thread,
void **stackp, size_t *stack_sizep,
const char *name IF_PRIO(, int priority)
IF_COP(, unsigned int core))
{
ALIGN_BUFFER(*stackp, *stack_sizep, MIN_STACK_ALIGN);
thread->stack = *stackp;
thread->stack_size = *stack_sizep;
thread->name = name;
wait_queue_init(&thread->queue);
thread->wqp = NULL;
tmo_set_dequeued(thread);
#ifdef HAVE_PRIORITY_SCHEDULING
thread->skip_count = 0;
thread->blocker = NULL;
thread->base_priority = priority;
thread->priority = priority;
memset(&thread->pdist, 0, sizeof(thread->pdist));
prio_add_entry(&thread->pdist, priority);
#endif
#if NUM_CORES > 1
thread->core = core;
#endif
#ifdef HAVE_SCHEDULER_BOOSTCTRL
thread->cpu_boost = 0;
#endif
}
/*---------------------------------------------------------------------------
* Move a thread onto the core's run queue and promote it
*---------------------------------------------------------------------------
*/
static inline void core_rtr_add(struct core_entry *corep,
struct thread_entry *thread)
{
RTR_LOCK(corep);
rtr_queue_add(&corep->rtr, thread);
rtr_add_entry(corep, thread->priority);
#ifdef HAVE_PRIORITY_SCHEDULING
thread->skip_count = thread->base_priority;
#endif
thread->state = STATE_RUNNING;
RTR_UNLOCK(corep);
}
/*---------------------------------------------------------------------------
* Remove a thread from the core's run queue
*---------------------------------------------------------------------------
*/
static inline void core_rtr_remove(struct core_entry *corep,
struct thread_entry *thread)
{
RTR_LOCK(corep);
rtr_queue_remove(&corep->rtr, thread);
rtr_subtract_entry(corep, thread->priority);
/* Does not demote state */
RTR_UNLOCK(corep);
}
/*---------------------------------------------------------------------------
* Move a thread back to a running state on its core
*---------------------------------------------------------------------------
*/
static NO_INLINE void core_schedule_wakeup(struct thread_entry *thread)
{
const unsigned int core = IF_COP_CORE(thread->core);
struct core_entry *corep = __core_id_entry(core);
core_rtr_add(corep, thread);
#if NUM_CORES > 1
if (core != CURRENT_CORE)
core_wake(core);
#endif
}
#ifdef HAVE_PRIORITY_SCHEDULING
/*---------------------------------------------------------------------------
* Locks the thread registered as the owner of the block and makes sure it
* didn't change in the meantime
*---------------------------------------------------------------------------
*/
#if NUM_CORES == 1
static inline struct thread_entry * lock_blocker_thread(struct blocker *bl)
{
return bl->thread;
}
#else /* NUM_CORES > 1 */
static struct thread_entry * lock_blocker_thread(struct blocker *bl)
{
/* The blocker thread may change during the process of trying to
capture it */
while (1)
{
struct thread_entry *t = bl->thread;
/* TRY, or else deadlocks are possible */
if (!t)
{
struct blocker_splay *blsplay = (struct blocker_splay *)bl;
if (corelock_try_lock(&blsplay->cl))
{
if (!bl->thread)
return NULL; /* Still multi */
corelock_unlock(&blsplay->cl);
}
}
else
{
if (TRY_LOCK_THREAD(t))
{
if (bl->thread == t)
return t;
UNLOCK_THREAD(t);
}
}
}
}
#endif /* NUM_CORES */
static inline void unlock_blocker_thread(struct blocker *bl)
{
#if NUM_CORES > 1
struct thread_entry *blt = bl->thread;
if (blt)
UNLOCK_THREAD(blt);
else
corelock_unlock(&((struct blocker_splay *)bl)->cl);
#endif /* NUM_CORES > 1*/
(void)bl;
}
/*---------------------------------------------------------------------------
* Change the priority and rtr entry for a running thread
*---------------------------------------------------------------------------
*/
static inline void set_rtr_thread_priority(
struct thread_entry *thread, int priority)
{
const unsigned int core = IF_COP_CORE(thread->core);
struct core_entry *corep = __core_id_entry(core);
RTR_LOCK(corep);
rtr_move_entry(corep, thread->priority, priority);
thread->priority = priority;
RTR_UNLOCK(corep);
}
/*---------------------------------------------------------------------------
* Finds the highest priority thread in a list of threads. If the list is
* empty, the PRIORITY_IDLE is returned.
*
* It is possible to use the struct priority_distribution within an object
* instead of scanning the remaining threads in the list but as a compromise,
* the resulting per-object memory overhead is saved at a slight speed
* penalty under high contention.
*---------------------------------------------------------------------------
*/
static int wait_queue_find_priority(struct __wait_queue *wqp)
{
int highest_priority = PRIORITY_IDLE;
struct thread_entry *thread = WQ_THREAD_FIRST(wqp);
while (thread != NULL)
{
int priority = thread->priority;
if (priority < highest_priority)
highest_priority = priority;
thread = WQ_THREAD_NEXT(thread);
}
return highest_priority;
}
/*---------------------------------------------------------------------------
* Register priority with blocking system and bubble it down the chain if
* any until we reach the end or something is already equal or higher.
*
* NOTE: A simultaneous circular wait could spin deadlock on multiprocessor
* targets but that same action also guarantees a circular block anyway and
* those are prevented, right? :-)
*---------------------------------------------------------------------------
*/
static void inherit_priority(
struct blocker * const blocker0, struct blocker *bl,
struct thread_entry *blt, int newblpr)
{
int oldblpr = bl->priority;
while (1)
{
if (blt == NULL)
{
/* Multiple owners */
struct blocker_splay *blsplay = (struct blocker_splay *)bl;
/* Recurse down the all the branches of this; it's the only way.
We might meet the same queue several times if more than one of
these threads is waiting the same queue. That isn't a problem
for us since we early-terminate, just notable. */
FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum)
{
bl->priority = oldblpr; /* To see the change each time */
blt = __thread_slot_entry(slotnum);
LOCK_THREAD(blt);
inherit_priority(blocker0, bl, blt, newblpr);
}
corelock_unlock(&blsplay->cl);
return;
}
bl->priority = newblpr;
/* Update blocker thread inheritance record */
if (newblpr < PRIORITY_IDLE)
prio_add_entry(&blt->pdist, newblpr);
if (oldblpr < PRIORITY_IDLE)
prio_subtract_entry(&blt->pdist, oldblpr);
int oldpr = blt->priority;
int newpr = priobit_ffs(&blt->pdist.mask);
if (newpr == oldpr)
break; /* No blocker thread priority change */
if (blt->state == STATE_RUNNING)
{
set_rtr_thread_priority(blt, newpr);
break; /* Running: last in chain */
}
/* Blocker is blocked */
blt->priority = newpr;
bl = blt->blocker;
if (LIKELY(bl == NULL))
break; /* Block doesn't support PIP */
if (UNLIKELY(bl == blocker0))
break; /* Full circle - deadlock! */
/* Blocker becomes current thread and the process repeats */
struct __wait_queue *wqp = wait_queue_ptr(blt);
struct thread_entry *t = blt;
blt = lock_blocker_thread(bl);
UNLOCK_THREAD(t);
/* Adjust this wait queue */
oldblpr = bl->priority;
if (newpr <= oldblpr)
newblpr = newpr;
else if (oldpr <= oldblpr)
newblpr = wait_queue_find_priority(wqp);
if (newblpr == oldblpr)
break; /* Queue priority not changing */
}
UNLOCK_THREAD(blt);
}
/*---------------------------------------------------------------------------
* Quick-inherit of priority elevation. 'thread' must be not runnable
*---------------------------------------------------------------------------
*/
static void priority_inherit_internal_inner(struct thread_entry *thread,
int blpr)
{
if (prio_add_entry(&thread->pdist, blpr) == 1 && blpr < thread->priority)
thread->priority = blpr;
}
static inline void priority_inherit_internal(struct thread_entry *thread,
int blpr)
{
if (blpr < PRIORITY_IDLE)
priority_inherit_internal_inner(thread, blpr);
}
/*---------------------------------------------------------------------------
* Quick-disinherit of priority elevation. 'thread' must current
*---------------------------------------------------------------------------
*/
static void priority_disinherit_internal_inner(struct thread_entry *thread,
int blpr)
{
if (prio_subtract_entry(&thread->pdist, blpr) == 0 &&
blpr <= thread->priority)
{
int priority = priobit_ffs(&thread->pdist.mask);
if (priority != thread->priority)
set_rtr_thread_priority(thread, priority);
}
}
static inline void priority_disinherit_internal(struct thread_entry *thread,
int blpr)
{
if (blpr < PRIORITY_IDLE)
priority_disinherit_internal_inner(thread, blpr);
}
void priority_disinherit(struct thread_entry *thread, struct blocker *bl)
{
LOCK_THREAD(thread);
priority_disinherit_internal(thread, bl->priority);
UNLOCK_THREAD(thread);
}
/*---------------------------------------------------------------------------
* Transfer ownership from a single owner to a multi-owner splay from a wait
* queue
*---------------------------------------------------------------------------
*/
static void wakeup_thread_queue_multi_transfer(struct thread_entry *thread)
{
/* All threads will have the same blocker and queue; only we are changing
it now */
struct __wait_queue *wqp = wait_queue_ptr(thread);
struct blocker *bl = thread->blocker;
struct blocker_splay *blsplay = (struct blocker_splay *)bl;
struct thread_entry *blt = bl->thread;
/* The first thread is already locked and is assumed tagged "multi" */
int count = 1;
/* Multiple versions of the wait queue may be seen if doing more than
one thread; queue removal isn't destructive to the pointers of the node
being removed; this may lead to the blocker priority being wrong for a
time but it gets fixed up below after getting exclusive access to the
queue */
while (1)
{
thread->blocker = NULL;
wait_queue_remove(thread);
unsigned int slotnum = THREAD_ID_SLOT(thread->id);
threadbit_set_bit(&blsplay->mask, slotnum);
struct thread_entry *tnext = WQ_THREAD_NEXT(thread);
if (tnext == NULL || tnext->retval == 0)
break;
UNLOCK_THREAD(thread);
count++;
thread = tnext;
LOCK_THREAD(thread);
}
/* Locking order reverses here since the threads are no longer on the
queued side */
if (count > 1)
corelock_lock(&blsplay->cl);
LOCK_THREAD(blt);
int blpr = bl->priority;
priority_disinherit_internal(blt, blpr);
if (count > 1)
{
blsplay->blocker.thread = NULL;
blpr = wait_queue_find_priority(wqp);
FOR_EACH_BITARRAY_SET_BIT(&blsplay->mask, slotnum)
{
UNLOCK_THREAD(thread);
thread = __thread_slot_entry(slotnum);
LOCK_THREAD(thread);
priority_inherit_internal(thread, blpr);
core_schedule_wakeup(thread);
}
}
else
{
/* Becomes a simple, direct transfer */
blsplay->blocker.thread = thread;
if (thread->priority <= blpr)
blpr = wait_queue_find_priority(wqp);
priority_inherit_internal(thread, blpr);
core_schedule_wakeup(thread);
}
UNLOCK_THREAD(thread);
bl->priority = blpr;
UNLOCK_THREAD(blt);
if (count > 1)
corelock_unlock(&blsplay->cl);
blt->retval = count;
}
/*---------------------------------------------------------------------------
* Transfer ownership to a thread waiting for an objects and transfer
* inherited priority boost from other waiters. This algorithm knows that
* blocking chains may only unblock from the very end.
*
* Only the owning thread itself may call this and so the assumption that
* it is the running thread is made.
*---------------------------------------------------------------------------
*/
static void wakeup_thread_transfer(struct thread_entry *thread)
{
/* Waking thread inherits priority boost from object owner (blt) */
struct blocker *bl = thread->blocker;
struct thread_entry *blt = bl->thread;
THREAD_ASSERT(__running_self_entry() == blt,
"UPPT->wrong thread", __running_self_entry());
LOCK_THREAD(blt);
thread->blocker = NULL;
struct __wait_queue *wqp = wait_queue_remove(thread);
int blpr = bl->priority;
/* Remove the object's boost from the owning thread */
priority_disinherit_internal_inner(blt, blpr);
struct thread_entry *tnext = WQ_THREAD_FIRST(wqp);
if (LIKELY(tnext == NULL))
{
/* Expected shortcut - no more waiters */
blpr = PRIORITY_IDLE;
}
else
{
/* If thread is at the blocker priority, its removal may drop it */
if (thread->priority <= blpr)
blpr = wait_queue_find_priority(wqp);
priority_inherit_internal_inner(thread, blpr);
}
bl->thread = thread; /* This thread pwns */
core_schedule_wakeup(thread);
UNLOCK_THREAD(thread);
bl->priority = blpr; /* Save highest blocked priority */
UNLOCK_THREAD(blt);
}
/*---------------------------------------------------------------------------
* Readjust priorities when waking a thread blocked waiting for another
* in essence "releasing" the thread's effect on the object owner. Can be
* performed from any context.
*---------------------------------------------------------------------------
*/
static void wakeup_thread_release(struct thread_entry *thread)
{
struct blocker *bl = thread->blocker;
struct thread_entry *blt = lock_blocker_thread(bl);
thread->blocker = NULL;
struct __wait_queue *wqp = wait_queue_remove(thread);
/* Off to see the wizard... */
core_schedule_wakeup(thread);
if (thread->priority > bl->priority)
{
/* Queue priority won't change */
UNLOCK_THREAD(thread);
unlock_blocker_thread(bl);
return;
}
UNLOCK_THREAD(thread);
int newblpr = wait_queue_find_priority(wqp);
if (newblpr == bl->priority)
{
/* Blocker priority won't change */
unlock_blocker_thread(bl);
return;
}
inherit_priority(bl, bl, blt, newblpr);
}
#endif /* HAVE_PRIORITY_SCHEDULING */
/*---------------------------------------------------------------------------
* Explicitly wakeup a thread on a blocking queue. Only effects threads of
* STATE_BLOCKED and STATE_BLOCKED_W_TMO.
*
* INTERNAL: Intended for use by kernel and not programs.
*---------------------------------------------------------------------------
*/
unsigned int wakeup_thread_(struct thread_entry *thread
IF_PRIO(, enum wakeup_thread_protocol proto))
{
LOCK_THREAD(thread);
/* Determine thread's current state. */
switch (thread->state)
{
case STATE_BLOCKED:
case STATE_BLOCKED_W_TMO:
#ifdef HAVE_PRIORITY_SCHEDULING
/* Threads with PIP blockers cannot specify "WAKEUP_DEFAULT" */
if (thread->blocker != NULL)
{
static void (* const funcs[])(struct thread_entry *thread)
ICONST_ATTR =
{
[WAKEUP_DEFAULT] = NULL,
[WAKEUP_TRANSFER] = wakeup_thread_transfer,
[WAKEUP_RELEASE] = wakeup_thread_release,
[WAKEUP_TRANSFER_MULTI] = wakeup_thread_queue_multi_transfer,
};
/* Call the specified unblocking PIP (does the rest) */
funcs[proto](thread);
}
else
#endif /* HAVE_PRIORITY_SCHEDULING */
{
wait_queue_remove(thread);
core_schedule_wakeup(thread);
UNLOCK_THREAD(thread);
}
return should_switch_tasks(thread);
case STATE_RUNNING:
if (wait_queue_try_remove(thread))
{
UNLOCK_THREAD(thread);
return THREAD_OK; /* timed out */
}
default:
UNLOCK_THREAD(thread);
return THREAD_NONE;
}
}
/*---------------------------------------------------------------------------
* Check the core's timeout list when at least one thread is due to wake.
* Filtering for the condition is done before making the call. Resets the
* tick when the next check will occur.
*---------------------------------------------------------------------------
*/
static NO_INLINE void check_tmo_expired_inner(struct core_entry *corep)
{
const long tick = current_tick; /* snapshot the current tick */
long next_tmo_check = tick + 60*HZ; /* minimum duration: once/minute */
struct thread_entry *prev = NULL;
struct thread_entry *thread = TMO_THREAD_FIRST(&corep->tmo);
/* If there are no processes waiting for a timeout, just keep the check
tick from falling into the past. */
/* Break the loop once we have walked through the list of all
* sleeping processes or have removed them all. */
while (thread != NULL)
{
/* Check sleeping threads. Allow interrupts between checks. */
enable_irq();
struct thread_entry *next = TMO_THREAD_NEXT(thread);
/* Lock thread slot against explicit wakeup */
disable_irq();
LOCK_THREAD(thread);
unsigned int state = thread->state;
if (LIKELY(state >= TIMEOUT_STATE_FIRST &&
TIME_BEFORE(tick, thread->tmo_tick)))
{
/* Timeout still pending - this will be the usual case */
if (TIME_BEFORE(thread->tmo_tick, next_tmo_check))
{
/* Move the next check up to its time */
next_tmo_check = thread->tmo_tick;
}
prev = thread;
}
else
{
/* TODO: there are no priority-inheriting timeout blocks
right now but the procedure should be established */
/* Sleep timeout has been reached / garbage collect stale list
items */
tmo_queue_expire(&corep->tmo, prev, thread);
if (state >= TIMEOUT_STATE_FIRST)
core_rtr_add(corep, thread);
/* removed this one - prev doesn't change */
}
UNLOCK_THREAD(thread);
thread = next;
}
corep->next_tmo_check = next_tmo_check;
}
static FORCE_INLINE void check_tmo_expired(struct core_entry *corep)
{
if (!TIME_BEFORE(current_tick, corep->next_tmo_check))
check_tmo_expired_inner(corep);
}
/*---------------------------------------------------------------------------
* Prepares a the current thread to sleep forever or for the given duration.
*---------------------------------------------------------------------------
*/
static FORCE_INLINE void prepare_block(struct thread_entry *current,
unsigned int state, int timeout)
{
const unsigned int core = IF_COP_CORE(current->core);
/* Remove the thread from the list of running threads. */
struct core_entry *corep = __core_id_entry(core);
core_rtr_remove(corep, current);
if (timeout >= 0)
{
/* Sleep may expire. */
long tmo_tick = current_tick + timeout;
current->tmo_tick = tmo_tick;
if (TIME_BEFORE(tmo_tick, corep->next_tmo_check))
corep->next_tmo_check = tmo_tick;
tmo_queue_register(&corep->tmo, current);
if (state == STATE_BLOCKED)
state = STATE_BLOCKED_W_TMO;
}
/* Report new state. */
current->state = state;
}
/*---------------------------------------------------------------------------