Skip to content

Commit 2e10e71

Browse files
fweisbecIngo Molnar
authored andcommitted
sched/preempt: Rearrange a few symbols after headers merge
Adjust a few comments, and further integrate a few definitions after the dumb headers copy. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1431441711-29753-3-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 92cf211 commit 2e10e71

File tree

1 file changed

+15
-19
lines changed

1 file changed

+15
-19
lines changed

include/linux/preempt.h

Lines changed: 15 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,6 @@
99
#include <linux/linkage.h>
1010
#include <linux/list.h>
1111

12-
/*
13-
* We use the MSB mostly because its available; see <linux/preempt_mask.h> for
14-
* the other bits -- can't include that header due to inclusion hell.
15-
*/
16-
#define PREEMPT_NEED_RESCHED 0x80000000
17-
18-
#include <asm/preempt.h>
19-
2012
/*
2113
* We put the hardirq and softirq counter into the preemption
2214
* counter. The bitmask has the following meaning:
@@ -30,11 +22,12 @@
3022
* there are a few palaeontologic drivers which reenable interrupts in
3123
* the handler, so we need more than one bit here.
3224
*
33-
* PREEMPT_MASK: 0x000000ff
34-
* SOFTIRQ_MASK: 0x0000ff00
35-
* HARDIRQ_MASK: 0x000f0000
36-
* NMI_MASK: 0x00100000
37-
* PREEMPT_ACTIVE: 0x00200000
25+
* PREEMPT_MASK: 0x000000ff
26+
* SOFTIRQ_MASK: 0x0000ff00
27+
* HARDIRQ_MASK: 0x000f0000
28+
* NMI_MASK: 0x00100000
29+
* PREEMPT_ACTIVE: 0x00200000
30+
* PREEMPT_NEED_RESCHED: 0x80000000
3831
*/
3932
#define PREEMPT_BITS 8
4033
#define SOFTIRQ_BITS 8
@@ -64,6 +57,12 @@
6457
#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
6558
#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
6659

60+
/* We use the MSB mostly because its available */
61+
#define PREEMPT_NEED_RESCHED 0x80000000
62+
63+
/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
64+
#include <asm/preempt.h>
65+
6766
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
6867
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
6968
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
@@ -122,12 +121,6 @@
122121
#define in_atomic_preempt_off() \
123122
((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
124123

125-
#ifdef CONFIG_PREEMPT_COUNT
126-
# define preemptible() (preempt_count() == 0 && !irqs_disabled())
127-
#else
128-
# define preemptible() 0
129-
#endif
130-
131124
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
132125
extern void preempt_count_add(int val);
133126
extern void preempt_count_sub(int val);
@@ -160,6 +153,8 @@ do { \
160153

161154
#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
162155

156+
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
157+
163158
#ifdef CONFIG_PREEMPT
164159
#define preempt_enable() \
165160
do { \
@@ -232,6 +227,7 @@ do { \
232227
#define preempt_disable_notrace() barrier()
233228
#define preempt_enable_no_resched_notrace() barrier()
234229
#define preempt_enable_notrace() barrier()
230+
#define preemptible() 0
235231

236232
#endif /* CONFIG_PREEMPT_COUNT */
237233

0 commit comments

Comments
 (0)