|
9 | 9 | #include <linux/linkage.h> |
10 | 10 | #include <linux/list.h> |
11 | 11 |
|
12 | | -/* |
13 | | - * We use the MSB mostly because its available; see <linux/preempt_mask.h> for |
14 | | - * the other bits -- can't include that header due to inclusion hell. |
15 | | - */ |
16 | | -#define PREEMPT_NEED_RESCHED 0x80000000 |
17 | | - |
18 | | -#include <asm/preempt.h> |
19 | | - |
20 | 12 | /* |
21 | 13 | * We put the hardirq and softirq counter into the preemption |
22 | 14 | * counter. The bitmask has the following meaning: |
|
30 | 22 | * there are a few palaeontologic drivers which reenable interrupts in |
31 | 23 | * the handler, so we need more than one bit here. |
32 | 24 | * |
33 | | - * PREEMPT_MASK: 0x000000ff |
34 | | - * SOFTIRQ_MASK: 0x0000ff00 |
35 | | - * HARDIRQ_MASK: 0x000f0000 |
36 | | - * NMI_MASK: 0x00100000 |
37 | | - * PREEMPT_ACTIVE: 0x00200000 |
| 25 | + * PREEMPT_MASK: 0x000000ff |
| 26 | + * SOFTIRQ_MASK: 0x0000ff00 |
| 27 | + * HARDIRQ_MASK: 0x000f0000 |
| 28 | + * NMI_MASK: 0x00100000 |
| 29 | + * PREEMPT_ACTIVE: 0x00200000 |
| 30 | + * PREEMPT_NEED_RESCHED: 0x80000000 |
38 | 31 | */ |
39 | 32 | #define PREEMPT_BITS 8 |
40 | 33 | #define SOFTIRQ_BITS 8 |
|
64 | 57 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) |
65 | 58 | #define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT) |
66 | 59 |
|
| 60 | +/* We use the MSB mostly because its available */ |
| 61 | +#define PREEMPT_NEED_RESCHED 0x80000000 |
| 62 | + |
| 63 | +/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */ |
| 64 | +#include <asm/preempt.h> |
| 65 | + |
67 | 66 | #define hardirq_count() (preempt_count() & HARDIRQ_MASK) |
68 | 67 | #define softirq_count() (preempt_count() & SOFTIRQ_MASK) |
69 | 68 | #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ |
|
122 | 121 | #define in_atomic_preempt_off() \ |
123 | 122 | ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET) |
124 | 123 |
|
125 | | -#ifdef CONFIG_PREEMPT_COUNT |
126 | | -# define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
127 | | -#else |
128 | | -# define preemptible() 0 |
129 | | -#endif |
130 | | - |
131 | 124 | #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER) |
132 | 125 | extern void preempt_count_add(int val); |
133 | 126 | extern void preempt_count_sub(int val); |
@@ -160,6 +153,8 @@ do { \ |
160 | 153 |
|
161 | 154 | #define preempt_enable_no_resched() sched_preempt_enable_no_resched() |
162 | 155 |
|
| 156 | +#define preemptible() (preempt_count() == 0 && !irqs_disabled()) |
| 157 | + |
163 | 158 | #ifdef CONFIG_PREEMPT |
164 | 159 | #define preempt_enable() \ |
165 | 160 | do { \ |
@@ -232,6 +227,7 @@ do { \ |
232 | 227 | #define preempt_disable_notrace() barrier() |
233 | 228 | #define preempt_enable_no_resched_notrace() barrier() |
234 | 229 | #define preempt_enable_notrace() barrier() |
| 230 | +#define preemptible() 0 |
235 | 231 |
|
236 | 232 | #endif /* CONFIG_PREEMPT_COUNT */ |
237 | 233 |
|
|
0 commit comments