]>
Commit | Line | Data |
---|---|---|
da373bdb | 1 | CREDITS | 2 |
2 | Documentation/Configure.help | 11 ++++ | |
3 | Documentation/preempt-locking.txt | 104 ++++++++++++++++++++++++++++++++++++++ | |
4 | MAINTAINERS | 8 ++ | |
5 | arch/arm/config.in | 2 | |
6 | arch/arm/kernel/entry-armv.S | 40 ++++++++++++++ | |
7 | arch/arm/tools/getconstants.c | 6 ++ | |
8 | arch/i386/config.in | 8 ++ | |
9 | arch/i386/kernel/entry.S | 49 +++++++++++++++++ | |
10 | arch/i386/kernel/i387.c | 3 + | |
11 | arch/i386/kernel/smp.c | 24 ++++++-- | |
12 | arch/i386/kernel/traps.c | 2 | |
13 | arch/i386/lib/dec_and_lock.c | 1 | |
14 | arch/mips/config-shared.in | 1 | |
15 | arch/mips/kernel/i8259.c | 1 | |
16 | arch/mips/kernel/irq.c | 29 ++++++++++ | |
17 | arch/mips/mm/extable.c | 1 | |
18 | arch/ppc/config.in | 2 | |
19 | arch/ppc/kernel/entry.S | 35 ++++++++++++ | |
20 | arch/ppc/kernel/irq.c | 28 ++++++++++ | |
21 | arch/ppc/kernel/mk_defs.c | 3 + | |
22 | arch/ppc/kernel/setup.c | 14 +++++ | |
23 | arch/ppc/lib/dec_and_lock.c | 1 | |
24 | arch/sh/config.in | 1 | |
25 | arch/sh/kernel/entry.S | 104 +++++++++++++++++++++++++++++++++++--- | |
26 | arch/sh/kernel/irq.c | 17 ++++++ | |
27 | drivers/ieee1394/csr.c | 1 | |
28 | drivers/sound/sound_core.c | 1 | |
29 | fs/adfs/map.c | 1 | |
30 | fs/exec.c | 2 | |
31 | fs/fat/cache.c | 1 | |
32 | fs/nls/nls_base.c | 1 | |
33 | include/asm-arm/dma.h | 1 | |
34 | include/asm-arm/hardirq.h | 1 | |
35 | include/asm-arm/pgalloc.h | 8 ++ | |
36 | include/asm-arm/smplock.h | 11 ++++ | |
37 | include/asm-arm/softirq.h | 8 +- | |
38 | include/asm-arm/system.h | 7 ++ | |
39 | include/asm-i386/hardirq.h | 14 +++-- | |
40 | include/asm-i386/highmem.h | 7 ++ | |
41 | include/asm-i386/hw_irq.h | 19 +++++- | |
42 | include/asm-i386/i387.h | 3 - | |
43 | include/asm-i386/pgalloc.h | 12 ++++ | |
44 | include/asm-i386/smplock.h | 14 +++++ | |
45 | include/asm-i386/softirq.h | 8 +- | |
46 | include/asm-i386/spinlock.h | 18 +++--- | |
47 | include/asm-i386/system.h | 7 ++ | |
48 | include/asm-mips/smplock.h | 15 +++++ | |
49 | include/asm-mips/softirq.h | 3 + | |
50 | include/asm-mips/system.h | 14 +++++ | |
51 | include/asm-ppc/dma.h | 1 | |
52 | include/asm-ppc/hardirq.h | 1 | |
53 | include/asm-ppc/highmem.h | 6 +- | |
54 | include/asm-ppc/hw_irq.h | 6 ++ | |
55 | include/asm-ppc/mmu_context.h | 4 + | |
56 | include/asm-ppc/pgalloc.h | 9 +++ | |
57 | include/asm-ppc/smplock.h | 14 +++++ | |
58 | include/asm-ppc/softirq.h | 10 +++ | |
59 | include/asm-sh/hardirq.h | 2 | |
60 | include/asm-sh/smplock.h | 85 ++++++++++++++++++++++++++++--- | |
61 | include/asm-sh/softirq.h | 3 + | |
62 | include/asm-sh/system.h | 13 ++++ | |
63 | include/linux/brlock.h | 10 +-- | |
64 | include/linux/dcache.h | 56 +++++++++++--------- | |
65 | include/linux/fs_struct.h | 13 +++- | |
66 | include/linux/sched.h | 12 ++++ | |
67 | include/linux/smp_lock.h | 2 | |
68 | include/linux/spinlock.h | 82 +++++++++++++++++++++++++++-- | |
69 | include/linux/tqueue.h | 31 ++++++----- | |
70 | kernel/exit.c | 7 ++ | |
71 | kernel/fork.c | 7 ++ | |
72 | kernel/ksyms.c | 3 + | |
73 | kernel/sched.c | 48 ++++++++++++++++- | |
74 | lib/dec_and_lock.c | 1 | |
75 | mm/slab.c | 3 - | |
76 | net/core/dev.c | 6 ++ | |
77 | net/core/skbuff.c | 30 ++++++---- | |
78 | net/socket.c | 2 | |
79 | net/sunrpc/pmap_clnt.c | 1 | |
80 | 79 files changed, 1011 insertions(+), 131 deletions(-) | |
81 | ||
82 | ||
83 | diff -urN linux-2.4.20/arch/arm/config.in linux/arch/arm/config.in | |
84 | --- linux-2.4.20/arch/arm/config.in 2002-11-28 18:53:09.000000000 -0500 | |
85 | +++ linux/arch/arm/config.in 2002-12-11 02:34:47.000000000 -0500 | |
86 | @@ -372,7 +372,7 @@ | |
87 | else | |
88 | define_bool CONFIG_DISCONTIGMEM n | |
89 | fi | |
90 | - | |
91 | +dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_CPU_32 | |
92 | endmenu | |
93 | ||
94 | mainmenu_option next_comment | |
95 | diff -urN linux-2.4.20/arch/arm/kernel/entry-armv.S linux/arch/arm/kernel/entry-armv.S | |
96 | --- linux-2.4.20/arch/arm/kernel/entry-armv.S 2002-08-02 20:39:42.000000000 -0400 | |
97 | +++ linux/arch/arm/kernel/entry-armv.S 2002-12-11 02:34:47.000000000 -0500 | |
98 | @@ -697,6 +697,12 @@ | |
99 | add r4, sp, #S_SP | |
100 | mov r6, lr | |
101 | stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro | |
102 | +#ifdef CONFIG_PREEMPT | |
103 | + get_current_task r9 | |
104 | + ldr r8, [r9, #TSK_PREEMPT] | |
105 | + add r8, r8, #1 | |
106 | + str r8, [r9, #TSK_PREEMPT] | |
107 | +#endif | |
108 | 1: get_irqnr_and_base r0, r6, r5, lr | |
109 | movne r1, sp | |
110 | @ | |
111 | @@ -704,6 +710,25 @@ | |
112 | @ | |
113 | adrsvc ne, lr, 1b | |
114 | bne do_IRQ | |
115 | +#ifdef CONFIG_PREEMPT | |
116 | +2: ldr r8, [r9, #TSK_PREEMPT] | |
117 | + subs r8, r8, #1 | |
118 | + bne 3f | |
119 | + ldr r7, [r9, #TSK_NEED_RESCHED] | |
120 | + teq r7, #0 | |
121 | + beq 3f | |
122 | + ldr r6, .LCirqstat | |
123 | + ldr r0, [r6, #IRQSTAT_BH_COUNT] | |
124 | + teq r0, #0 | |
125 | + bne 3f | |
126 | + mov r0, #MODE_SVC | |
127 | + msr cpsr_c, r0 @ enable interrupts | |
128 | + bl SYMBOL_NAME(preempt_schedule) | |
129 | + mov r0, #I_BIT | MODE_SVC | |
130 | + msr cpsr_c, r0 @ disable interrupts | |
131 | + b 2b | |
132 | +3: str r8, [r9, #TSK_PREEMPT] | |
133 | +#endif | |
134 | ldr r0, [sp, #S_PSR] @ irqs are already disabled | |
135 | msr spsr, r0 | |
136 | ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr | |
137 | @@ -761,6 +786,9 @@ | |
138 | .LCprocfns: .word SYMBOL_NAME(processor) | |
139 | #endif | |
140 | .LCfp: .word SYMBOL_NAME(fp_enter) | |
141 | +#ifdef CONFIG_PREEMPT | |
142 | +.LCirqstat: .word SYMBOL_NAME(irq_stat) | |
143 | +#endif | |
144 | ||
145 | irq_prio_table | |
146 | ||
147 | @@ -801,6 +829,12 @@ | |
148 | stmdb r8, {sp, lr}^ | |
149 | alignment_trap r4, r7, __temp_irq | |
150 | zero_fp | |
151 | + get_current_task tsk | |
152 | +#ifdef CONFIG_PREEMPT | |
153 | + ldr r0, [tsk, #TSK_PREEMPT] | |
154 | + add r0, r0, #1 | |
155 | + str r0, [tsk, #TSK_PREEMPT] | |
156 | +#endif | |
157 | 1: get_irqnr_and_base r0, r6, r5, lr | |
158 | movne r1, sp | |
159 | adrsvc ne, lr, 1b | |
160 | @@ -808,8 +842,12 @@ | |
161 | @ routine called with r0 = irq number, r1 = struct pt_regs * | |
162 | @ | |
163 | bne do_IRQ | |
164 | +#ifdef CONFIG_PREEMPT | |
165 | + ldr r0, [tsk, #TSK_PREEMPT] | |
166 | + sub r0, r0, #1 | |
167 | + str r0, [tsk, #TSK_PREEMPT] | |
168 | +#endif | |
169 | mov why, #0 | |
170 | - get_current_task tsk | |
171 | b ret_to_user | |
172 | ||
173 | .align 5 | |
174 | diff -urN linux-2.4.20/arch/arm/tools/getconstants.c linux/arch/arm/tools/getconstants.c | |
175 | --- linux-2.4.20/arch/arm/tools/getconstants.c 2001-10-11 12:04:57.000000000 -0400 | |
176 | +++ linux/arch/arm/tools/getconstants.c 2002-12-11 02:34:47.000000000 -0500 | |
177 | @@ -13,6 +13,7 @@ | |
178 | ||
179 | #include <asm/pgtable.h> | |
180 | #include <asm/uaccess.h> | |
181 | +#include <asm/hardirq.h> | |
182 | ||
183 | /* | |
184 | * Make sure that the compiler and target are compatible. | |
185 | @@ -39,6 +40,11 @@ | |
186 | DEFN("TSS_SAVE", OFF_TSK(thread.save)); | |
187 | DEFN("TSS_FPESAVE", OFF_TSK(thread.fpstate.soft.save)); | |
188 | ||
189 | +#ifdef CONFIG_PREEMPT | |
190 | +DEFN("TSK_PREEMPT", OFF_TSK(preempt_count)); | |
191 | +DEFN("IRQSTAT_BH_COUNT", (unsigned long)&(((irq_cpustat_t *)0)->__local_bh_count)); | |
192 | +#endif | |
193 | + | |
194 | #ifdef CONFIG_CPU_32 | |
195 | DEFN("TSS_DOMAIN", OFF_TSK(thread.domain)); | |
196 | ||
197 | diff -urN linux-2.4.20/arch/i386/config.in linux/arch/i386/config.in | |
198 | --- linux-2.4.20/arch/i386/config.in 2002-11-28 18:53:09.000000000 -0500 | |
199 | +++ linux/arch/i386/config.in 2002-12-11 02:34:47.000000000 -0500 | |
200 | @@ -206,6 +206,7 @@ | |
201 | bool 'Math emulation' CONFIG_MATH_EMULATION | |
202 | bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR | |
203 | bool 'Symmetric multi-processing support' CONFIG_SMP | |
204 | +bool 'Preemptible Kernel' CONFIG_PREEMPT | |
205 | if [ "$CONFIG_SMP" != "y" ]; then | |
206 | bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC | |
207 | dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC | |
208 | @@ -224,9 +225,12 @@ | |
209 | define_bool CONFIG_X86_TSC y | |
210 | fi | |
211 | ||
212 | -if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then | |
213 | - define_bool CONFIG_HAVE_DEC_LOCK y | |
214 | +if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then | |
215 | + if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then | |
216 | + define_bool CONFIG_HAVE_DEC_LOCK y | |
217 | + fi | |
218 | fi | |
219 | + | |
220 | endmenu | |
221 | ||
222 | mainmenu_option next_comment | |
223 | diff -urN linux-2.4.20/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S | |
224 | --- linux-2.4.20/arch/i386/kernel/entry.S 2002-11-28 18:53:09.000000000 -0500 | |
225 | +++ linux/arch/i386/kernel/entry.S 2002-12-11 02:34:47.000000000 -0500 | |
226 | @@ -73,7 +73,7 @@ | |
227 | * these are offsets into the task-struct. | |
228 | */ | |
229 | state = 0 | |
230 | -flags = 4 | |
231 | +preempt_count = 4 | |
232 | sigpending = 8 | |
233 | addr_limit = 12 | |
234 | exec_domain = 16 | |
235 | @@ -81,8 +81,28 @@ | |
236 | tsk_ptrace = 24 | |
237 | processor = 52 | |
238 | ||
239 | +/* These are offsets into the irq_stat structure | |
240 | + * There is one per cpu and it is aligned to 32 | |
241 | + * byte boundry (we put that here as a shift count) | |
242 | + */ | |
243 | +irq_array_shift = CONFIG_X86_L1_CACHE_SHIFT | |
244 | + | |
245 | +irq_stat_local_irq_count = 4 | |
246 | +irq_stat_local_bh_count = 8 | |
247 | + | |
248 | ENOSYS = 38 | |
249 | ||
250 | +#ifdef CONFIG_SMP | |
251 | +#define GET_CPU_INDX movl processor(%ebx),%eax; \ | |
252 | + shll $irq_array_shift,%eax | |
253 | +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \ | |
254 | + GET_CPU_INDX | |
255 | +#define CPU_INDX (,%eax) | |
256 | +#else | |
257 | +#define GET_CPU_INDX | |
258 | +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx) | |
259 | +#define CPU_INDX | |
260 | +#endif | |
261 | ||
262 | #define SAVE_ALL \ | |
263 | cld; \ | |
264 | @@ -255,12 +275,30 @@ | |
265 | ALIGN | |
266 | ENTRY(ret_from_intr) | |
267 | GET_CURRENT(%ebx) | |
268 | +#ifdef CONFIG_PREEMPT | |
269 | + cli | |
270 | + decl preempt_count(%ebx) | |
271 | +#endif | |
272 | ret_from_exception: | |
273 | movl EFLAGS(%esp),%eax # mix EFLAGS and CS | |
274 | movb CS(%esp),%al | |
275 | testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor? | |
276 | jne ret_from_sys_call | |
277 | +#ifdef CONFIG_PREEMPT | |
278 | + cmpl $0,preempt_count(%ebx) | |
279 | + jnz restore_all | |
280 | + cmpl $0,need_resched(%ebx) | |
281 | + jz restore_all | |
282 | + movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx | |
283 | + addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx | |
284 | + jnz restore_all | |
285 | + incl preempt_count(%ebx) | |
286 | + sti | |
287 | + call SYMBOL_NAME(preempt_schedule) | |
288 | + jmp ret_from_intr | |
289 | +#else | |
290 | jmp restore_all | |
291 | +#endif | |
292 | ||
293 | ALIGN | |
294 | reschedule: | |
295 | @@ -297,6 +335,9 @@ | |
296 | GET_CURRENT(%ebx) | |
297 | call *%edi | |
298 | addl $8,%esp | |
299 | +#ifdef CONFIG_PREEMPT | |
300 | + cli | |
301 | +#endif | |
302 | jmp ret_from_exception | |
303 | ||
304 | ENTRY(coprocessor_error) | |
305 | @@ -316,12 +357,18 @@ | |
306 | movl %cr0,%eax | |
307 | testl $0x4,%eax # EM (math emulation bit) | |
308 | jne device_not_available_emulate | |
309 | +#ifdef CONFIG_PREEMPT | |
310 | + cli | |
311 | +#endif | |
312 | call SYMBOL_NAME(math_state_restore) | |
313 | jmp ret_from_exception | |
314 | device_not_available_emulate: | |
315 | pushl $0 # temporary storage for ORIG_EIP | |
316 | call SYMBOL_NAME(math_emulate) | |
317 | addl $4,%esp | |
318 | +#ifdef CONFIG_PREEMPT | |
319 | + cli | |
320 | +#endif | |
321 | jmp ret_from_exception | |
322 | ||
323 | ENTRY(debug) | |
324 | diff -urN linux-2.4.20/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c | |
325 | --- linux-2.4.20/arch/i386/kernel/i387.c 2002-08-02 20:39:42.000000000 -0400 | |
326 | +++ linux/arch/i386/kernel/i387.c 2002-12-11 02:34:47.000000000 -0500 | |
327 | @@ -10,6 +10,7 @@ | |
328 | ||
329 | #include <linux/config.h> | |
330 | #include <linux/sched.h> | |
331 | +#include <linux/spinlock.h> | |
332 | #include <linux/init.h> | |
333 | #include <asm/processor.h> | |
334 | #include <asm/i387.h> | |
335 | @@ -89,6 +90,8 @@ | |
336 | { | |
337 | struct task_struct *tsk = current; | |
338 | ||
339 | + preempt_disable(); | |
340 | + | |
341 | if (tsk->flags & PF_USEDFPU) { | |
342 | __save_init_fpu(tsk); | |
343 | return; | |
344 | diff -urN linux-2.4.20/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c | |
345 | --- linux-2.4.20/arch/i386/kernel/smp.c 2002-11-28 18:53:09.000000000 -0500 | |
346 | +++ linux/arch/i386/kernel/smp.c 2002-12-11 02:34:47.000000000 -0500 | |
347 | @@ -357,10 +357,13 @@ | |
348 | ||
349 | asmlinkage void smp_invalidate_interrupt (void) | |
350 | { | |
351 | - unsigned long cpu = smp_processor_id(); | |
352 | + unsigned long cpu; | |
353 | + | |
354 | + preempt_disable(); | |
355 | ||
356 | + cpu = smp_processor_id(); | |
357 | if (!test_bit(cpu, &flush_cpumask)) | |
358 | - return; | |
359 | + goto out; | |
360 | /* | |
361 | * This was a BUG() but until someone can quote me the | |
362 | * line from the intel manual that guarantees an IPI to | |
363 | @@ -381,6 +384,8 @@ | |
364 | } | |
365 | ack_APIC_irq(); | |
366 | clear_bit(cpu, &flush_cpumask); | |
367 | +out: | |
368 | + preempt_enable(); | |
369 | } | |
370 | ||
371 | static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm, | |
372 | @@ -430,17 +435,22 @@ | |
373 | void flush_tlb_current_task(void) | |
374 | { | |
375 | struct mm_struct *mm = current->mm; | |
376 | - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id()); | |
377 | + unsigned long cpu_mask; | |
378 | ||
379 | + preempt_disable(); | |
380 | + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id()); | |
381 | local_flush_tlb(); | |
382 | if (cpu_mask) | |
383 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | |
384 | + preempt_enable(); | |
385 | } | |
386 | ||
387 | void flush_tlb_mm (struct mm_struct * mm) | |
388 | { | |
389 | - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id()); | |
390 | + unsigned long cpu_mask; | |
391 | ||
392 | + preempt_disable(); | |
393 | + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id()); | |
394 | if (current->active_mm == mm) { | |
395 | if (current->mm) | |
396 | local_flush_tlb(); | |
397 | @@ -449,13 +459,16 @@ | |
398 | } | |
399 | if (cpu_mask) | |
400 | flush_tlb_others(cpu_mask, mm, FLUSH_ALL); | |
401 | + preempt_enable(); | |
402 | } | |
403 | ||
404 | void flush_tlb_page(struct vm_area_struct * vma, unsigned long va) | |
405 | { | |
406 | struct mm_struct *mm = vma->vm_mm; | |
407 | - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id()); | |
408 | + unsigned long cpu_mask; | |
409 | ||
410 | + preempt_disable(); | |
411 | + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id()); | |
412 | if (current->active_mm == mm) { | |
413 | if(current->mm) | |
414 | __flush_tlb_one(va); | |
415 | @@ -465,6 +478,7 @@ | |
416 | ||
417 | if (cpu_mask) | |
418 | flush_tlb_others(cpu_mask, mm, va); | |
419 | + preempt_enable(); | |
420 | } | |
421 | ||
422 | static inline void do_flush_tlb_all_local(void) | |
423 | diff -urN linux-2.4.20/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c | |
424 | --- linux-2.4.20/arch/i386/kernel/traps.c 2002-11-28 18:53:09.000000000 -0500 | |
425 | +++ linux/arch/i386/kernel/traps.c 2002-12-11 02:34:47.000000000 -0500 | |
426 | @@ -751,6 +751,8 @@ | |
427 | * | |
428 | * Careful.. There are problems with IBM-designed IRQ13 behaviour. | |
429 | * Don't touch unless you *really* know how it works. | |
430 | + * | |
431 | + * Must be called with kernel preemption disabled. | |
432 | */ | |
433 | asmlinkage void math_state_restore(struct pt_regs regs) | |
434 | { | |
435 | diff -urN linux-2.4.20/arch/i386/lib/dec_and_lock.c linux/arch/i386/lib/dec_and_lock.c | |
436 | --- linux-2.4.20/arch/i386/lib/dec_and_lock.c 2000-07-07 21:20:16.000000000 -0400 | |
437 | +++ linux/arch/i386/lib/dec_and_lock.c 2002-12-11 02:34:47.000000000 -0500 | |
438 | @@ -8,6 +8,7 @@ | |
439 | */ | |
440 | ||
441 | #include <linux/spinlock.h> | |
442 | +#include <linux/sched.h> | |
443 | #include <asm/atomic.h> | |
444 | ||
445 | int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) | |
446 | diff -urN linux-2.4.20/arch/mips/config-shared.in linux/arch/mips/config-shared.in | |
447 | --- linux-2.4.20/arch/mips/config-shared.in 2002-11-28 18:53:09.000000000 -0500 | |
448 | +++ linux/arch/mips/config-shared.in 2002-12-11 02:34:47.000000000 -0500 | |
449 | @@ -615,6 +615,7 @@ | |
450 | define_bool CONFIG_HOTPLUG_PCI n | |
451 | fi | |
452 | ||
453 | +dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_NEW_IRQ | |
454 | bool 'System V IPC' CONFIG_SYSVIPC | |
455 | bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT | |
456 | bool 'Sysctl support' CONFIG_SYSCTL | |
457 | diff -urN linux-2.4.20/arch/mips/kernel/i8259.c linux/arch/mips/kernel/i8259.c | |
458 | --- linux-2.4.20/arch/mips/kernel/i8259.c 2002-11-28 18:53:10.000000000 -0500 | |
459 | +++ linux/arch/mips/kernel/i8259.c 2002-12-11 02:34:47.000000000 -0500 | |
460 | @@ -8,6 +8,7 @@ | |
461 | * Copyright (C) 1992 Linus Torvalds | |
462 | * Copyright (C) 1994 - 2000 Ralf Baechle | |
463 | */ | |
464 | +#include <linux/sched.h> | |
465 | #include <linux/delay.h> | |
466 | #include <linux/init.h> | |
467 | #include <linux/ioport.h> | |
468 | diff -urN linux-2.4.20/arch/mips/kernel/irq.c linux/arch/mips/kernel/irq.c | |
469 | --- linux-2.4.20/arch/mips/kernel/irq.c 2002-11-28 18:53:10.000000000 -0500 | |
470 | +++ linux/arch/mips/kernel/irq.c 2002-12-11 02:34:47.000000000 -0500 | |
471 | @@ -8,6 +8,8 @@ | |
472 | * Copyright (C) 1992 Linus Torvalds | |
473 | * Copyright (C) 1994 - 2000 Ralf Baechle | |
474 | */ | |
475 | + | |
476 | +#include <linux/sched.h> | |
477 | #include <linux/config.h> | |
478 | #include <linux/kernel.h> | |
479 | #include <linux/delay.h> | |
480 | @@ -19,11 +21,13 @@ | |
481 | #include <linux/slab.h> | |
482 | #include <linux/mm.h> | |
483 | #include <linux/random.h> | |
484 | -#include <linux/sched.h> | |
485 | +#include <linux/spinlock.h> | |
486 | +#include <linux/ptrace.h> | |
487 | ||
488 | #include <asm/atomic.h> | |
489 | #include <asm/system.h> | |
490 | #include <asm/uaccess.h> | |
491 | +#include <asm/debug.h> | |
492 | ||
493 | /* | |
494 | * Controller mappings for all interrupt sources: | |
495 | @@ -429,6 +433,8 @@ | |
496 | struct irqaction * action; | |
497 | unsigned int status; | |
498 | ||
499 | + preempt_disable(); | |
500 | + | |
501 | kstat.irqs[cpu][irq]++; | |
502 | spin_lock(&desc->lock); | |
503 | desc->handler->ack(irq); | |
504 | @@ -490,6 +496,27 @@ | |
505 | ||
506 | if (softirq_pending(cpu)) | |
507 | do_softirq(); | |
508 | + | |
509 | +#if defined(CONFIG_PREEMPT) | |
510 | + while (--current->preempt_count == 0) { | |
511 | + db_assert(intr_off()); | |
512 | + db_assert(!in_interrupt()); | |
513 | + | |
514 | + if (current->need_resched == 0) { | |
515 | + break; | |
516 | + } | |
517 | + | |
518 | + current->preempt_count ++; | |
519 | + sti(); | |
520 | + if (user_mode(regs)) { | |
521 | + schedule(); | |
522 | + } else { | |
523 | + preempt_schedule(); | |
524 | + } | |
525 | + cli(); | |
526 | + } | |
527 | +#endif | |
528 | + | |
529 | return 1; | |
530 | } | |
531 | ||
532 | diff -urN linux-2.4.20/arch/mips/mm/extable.c linux/arch/mips/mm/extable.c | |
533 | --- linux-2.4.20/arch/mips/mm/extable.c 2002-11-28 18:53:10.000000000 -0500 | |
534 | +++ linux/arch/mips/mm/extable.c 2002-12-11 02:34:47.000000000 -0500 | |
535 | @@ -3,6 +3,7 @@ | |
536 | */ | |
537 | #include <linux/config.h> | |
538 | #include <linux/module.h> | |
539 | +#include <linux/sched.h> | |
540 | #include <linux/spinlock.h> | |
541 | #include <asm/uaccess.h> | |
542 | ||
543 | diff -urN linux-2.4.20/arch/ppc/config.in linux/arch/ppc/config.in | |
544 | --- linux-2.4.20/arch/ppc/config.in 2002-11-28 18:53:11.000000000 -0500 | |
545 | +++ linux/arch/ppc/config.in 2002-12-11 02:34:47.000000000 -0500 | |
546 | @@ -112,6 +112,8 @@ | |
547 | bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS | |
548 | fi | |
549 | ||
550 | +bool 'Preemptible kernel support' CONFIG_PREEMPT | |
551 | + | |
552 | if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ];then | |
553 | bool 'AltiVec Support' CONFIG_ALTIVEC | |
554 | bool 'Thermal Management Support' CONFIG_TAU | |
555 | diff -urN linux-2.4.20/arch/ppc/kernel/entry.S linux/arch/ppc/kernel/entry.S | |
556 | --- linux-2.4.20/arch/ppc/kernel/entry.S 2002-11-28 18:53:11.000000000 -0500 | |
557 | +++ linux/arch/ppc/kernel/entry.S 2002-12-11 02:34:47.000000000 -0500 | |
558 | @@ -278,6 +278,41 @@ | |
559 | */ | |
560 | cmpi 0,r3,0 | |
561 | beq restore | |
562 | +#ifdef CONFIG_PREEMPT | |
563 | + lwz r3,PREEMPT_COUNT(r2) | |
564 | + cmpi 0,r3,1 | |
565 | + bge ret_from_except | |
566 | + lwz r5,_MSR(r1) | |
567 | + andi. r5,r5,MSR_PR | |
568 | + bne do_signal_ret | |
569 | + lwz r5,NEED_RESCHED(r2) | |
570 | + cmpi 0,r5,0 | |
571 | + beq ret_from_except | |
572 | + lis r3,irq_stat@h | |
573 | + ori r3,r3,irq_stat@l | |
574 | + lwz r5,4(r3) | |
575 | + lwz r3,8(r3) | |
576 | + add r3,r3,r5 | |
577 | + cmpi 0,r3,0 | |
578 | + bne ret_from_except | |
579 | + lwz r3,PREEMPT_COUNT(r2) | |
580 | + addi r3,r3,1 | |
581 | + stw r3,PREEMPT_COUNT(r2) | |
582 | + mfmsr r0 | |
583 | + ori r0,r0,MSR_EE | |
584 | + mtmsr r0 | |
585 | + sync | |
586 | + bl preempt_schedule | |
587 | + mfmsr r0 | |
588 | + rlwinm r0,r0,0,17,15 | |
589 | + mtmsr r0 | |
590 | + sync | |
591 | + lwz r3,PREEMPT_COUNT(r2) | |
592 | + subi r3,r3,1 | |
593 | + stw r3,PREEMPT_COUNT(r2) | |
594 | + li r3,1 | |
595 | + b ret_from_intercept | |
596 | +#endif /* CONFIG_PREEMPT */ | |
597 | .globl ret_from_except | |
598 | ret_from_except: | |
599 | lwz r3,_MSR(r1) /* Returning to user mode? */ | |
600 | diff -urN linux-2.4.20/arch/ppc/kernel/irq.c linux/arch/ppc/kernel/irq.c | |
601 | --- linux-2.4.20/arch/ppc/kernel/irq.c 2002-11-28 18:53:11.000000000 -0500 | |
602 | +++ linux/arch/ppc/kernel/irq.c 2002-12-11 02:34:47.000000000 -0500 | |
603 | @@ -556,6 +556,34 @@ | |
604 | return 1; /* lets ret_from_int know we can do checks */ | |
605 | } | |
606 | ||
607 | +#ifdef CONFIG_PREEMPT | |
608 | +int | |
609 | +preempt_intercept(struct pt_regs *regs) | |
610 | +{ | |
611 | + int ret; | |
612 | + | |
613 | + preempt_disable(); | |
614 | + | |
615 | + switch(regs->trap) { | |
616 | + case 0x500: | |
617 | + ret = do_IRQ(regs); | |
618 | + break; | |
619 | +#ifndef CONFIG_4xx | |
620 | + case 0x900: | |
621 | +#else | |
622 | + case 0x1000: | |
623 | +#endif | |
624 | + ret = timer_interrupt(regs); | |
625 | + break; | |
626 | + default: | |
627 | + BUG(); | |
628 | + } | |
629 | + | |
630 | + preempt_enable(); | |
631 | + return ret; | |
632 | +} | |
633 | +#endif /* CONFIG_PREEMPT */ | |
634 | + | |
635 | unsigned long probe_irq_on (void) | |
636 | { | |
637 | return 0; | |
638 | diff -urN linux-2.4.20/arch/ppc/kernel/mk_defs.c linux/arch/ppc/kernel/mk_defs.c | |
639 | --- linux-2.4.20/arch/ppc/kernel/mk_defs.c 2001-08-28 09:58:33.000000000 -0400 | |
640 | +++ linux/arch/ppc/kernel/mk_defs.c 2002-12-11 02:34:47.000000000 -0500 | |
641 | @@ -42,6 +42,9 @@ | |
642 | DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending)); | |
643 | DEFINE(THREAD, offsetof(struct task_struct, thread)); | |
644 | DEFINE(MM, offsetof(struct task_struct, mm)); | |
645 | +#ifdef CONFIG_PREEMPT | |
646 | + DEFINE(PREEMPT_COUNT, offsetof(struct task_struct, preempt_count)); | |
647 | +#endif | |
648 | DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm)); | |
649 | DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); | |
650 | DEFINE(KSP, offsetof(struct thread_struct, ksp)); | |
651 | diff -urN linux-2.4.20/arch/ppc/kernel/setup.c linux/arch/ppc/kernel/setup.c | |
652 | --- linux-2.4.20/arch/ppc/kernel/setup.c 2002-11-28 18:53:11.000000000 -0500 | |
653 | +++ linux/arch/ppc/kernel/setup.c 2002-12-11 02:34:47.000000000 -0500 | |
654 | @@ -498,6 +498,20 @@ | |
655 | strcpy(cmd_line, CONFIG_CMDLINE); | |
656 | #endif /* CONFIG_CMDLINE */ | |
657 | ||
658 | +#ifdef CONFIG_PREEMPT | |
659 | + /* Override the irq routines for external & timer interrupts here, | |
660 | + * as the MMU has only been minimally setup at this point and | |
661 | + * there are no protections on page zero. | |
662 | + */ | |
663 | + { | |
664 | + extern int preempt_intercept(struct pt_regs *); | |
665 | + | |
666 | + do_IRQ_intercept = (unsigned long) &preempt_intercept; | |
667 | + timer_interrupt_intercept = (unsigned long) &preempt_intercept; | |
668 | + | |
669 | + } | |
670 | +#endif /* CONFIG_PREEMPT */ | |
671 | + | |
672 | platform_init(r3, r4, r5, r6, r7); | |
673 | ||
674 | if (ppc_md.progress) | |
675 | diff -urN linux-2.4.20/arch/ppc/lib/dec_and_lock.c linux/arch/ppc/lib/dec_and_lock.c | |
676 | --- linux-2.4.20/arch/ppc/lib/dec_and_lock.c 2001-11-16 13:10:08.000000000 -0500 | |
677 | +++ linux/arch/ppc/lib/dec_and_lock.c 2002-12-11 02:34:47.000000000 -0500 | |
678 | @@ -1,4 +1,5 @@ | |
679 | #include <linux/module.h> | |
680 | +#include <linux/sched.h> | |
681 | #include <linux/spinlock.h> | |
682 | #include <asm/atomic.h> | |
683 | #include <asm/system.h> | |
684 | diff -urN linux-2.4.20/arch/sh/config.in linux/arch/sh/config.in | |
685 | --- linux-2.4.20/arch/sh/config.in 2002-11-28 18:53:11.000000000 -0500 | |
686 | +++ linux/arch/sh/config.in 2002-12-11 02:34:47.000000000 -0500 | |
687 | @@ -124,6 +124,7 @@ | |
688 | hex 'Physical memory start address' CONFIG_MEMORY_START 08000000 | |
689 | hex 'Physical memory size' CONFIG_MEMORY_SIZE 00400000 | |
690 | fi | |
691 | +bool 'Preemptible Kernel' CONFIG_PREEMPT | |
692 | endmenu | |
693 | ||
694 | if [ "$CONFIG_SH_HP690" = "y" ]; then | |
695 | diff -urN linux-2.4.20/arch/sh/kernel/entry.S linux/arch/sh/kernel/entry.S | |
696 | --- linux-2.4.20/arch/sh/kernel/entry.S 2002-08-02 20:39:43.000000000 -0400 | |
697 | +++ linux/arch/sh/kernel/entry.S 2002-12-11 02:34:47.000000000 -0500 | |
698 | @@ -60,10 +60,18 @@ | |
699 | /* | |
700 | * These are offsets into the task-struct. | |
701 | */ | |
702 | -flags = 4 | |
703 | +preempt_count = 4 | |
704 | sigpending = 8 | |
705 | need_resched = 20 | |
706 | tsk_ptrace = 24 | |
707 | +flags = 84 | |
708 | + | |
709 | +/* | |
710 | + * These offsets are into irq_stat. | |
711 | + * (Find irq_cpustat_t in asm-sh/hardirq.h) | |
712 | + */ | |
713 | +local_irq_count = 8 | |
714 | +local_bh_count = 12 | |
715 | ||
716 | PT_TRACESYS = 0x00000002 | |
717 | PF_USEDFPU = 0x00100000 | |
718 | @@ -143,7 +151,7 @@ | |
719 | mov.l __INV_IMASK, r11; \ | |
720 | stc sr, r10; \ | |
721 | and r11, r10; \ | |
722 | - stc k_g_imask, r11; \ | |
723 | + stc k_g_imask, r11; \ | |
724 | or r11, r10; \ | |
725 | ldc r10, sr | |
726 | ||
727 | @@ -304,8 +312,8 @@ | |
728 | mov.l @(tsk_ptrace,r0), r0 ! Is current PTRACE_SYSCALL'd? | |
729 | mov #PT_TRACESYS, r1 | |
730 | tst r1, r0 | |
731 | - bt ret_from_syscall | |
732 | - bra syscall_ret_trace | |
733 | + bf syscall_ret_trace | |
734 | + bra ret_from_syscall | |
735 | nop | |
736 | ||
737 | .align 2 | |
738 | @@ -505,8 +513,6 @@ | |
739 | .long syscall_ret_trace | |
740 | __syscall_ret: | |
741 | .long syscall_ret | |
742 | -__INV_IMASK: | |
743 | - .long 0xffffff0f ! ~(IMASK) | |
744 | ||
745 | ||
746 | .align 2 | |
747 | @@ -518,7 +524,84 @@ | |
748 | .align 2 | |
749 | 1: .long SYMBOL_NAME(schedule) | |
750 | ||
751 | +#ifdef CONFIG_PREEMPT | |
752 | + ! | |
753 | + ! Returning from interrupt during kernel mode: check if | |
754 | + ! preempt_schedule should be called. If need_resched flag | |
755 | + ! is set, preempt_count is zero, and we're not currently | |
756 | + ! in an interrupt handler (local irq or bottom half) then | |
757 | + ! call preempt_schedule. | |
758 | + ! | |
759 | + ! Increment preempt_count to prevent a nested interrupt | |
760 | + ! from reentering preempt_schedule, then decrement after | |
761 | + ! and drop through to regular interrupt return which will | |
762 | + ! jump back and check again in case such an interrupt did | |
763 | + ! come in (and didn't preempt due to preempt_count). | |
764 | + ! | |
765 | + ! NOTE: because we just checked that preempt_count was | |
766 | + ! zero before getting to the call, can't we use immediate | |
767 | + ! values (1 and 0) rather than inc/dec? Also, rather than | |
768 | + ! drop through to ret_from_irq, we already know this thread | |
769 | + ! is kernel mode, can't we go direct to ret_from_kirq? In | |
770 | + ! fact, with proper interrupt nesting and so forth could | |
771 | + ! the loop simply be on the need_resched w/o checking the | |
772 | + ! other stuff again? Optimize later... | |
773 | + ! | |
774 | + .align 2 | |
775 | +ret_from_kirq: | |
776 | + ! Nonzero preempt_count prevents scheduling | |
777 | + stc k_current, r1 | |
778 | + mov.l @(preempt_count,r1), r0 | |
779 | + cmp/eq #0, r0 | |
780 | + bf restore_all | |
781 | + ! Zero need_resched prevents scheduling | |
782 | + mov.l @(need_resched,r1), r0 | |
783 | + cmp/eq #0, r0 | |
784 | + bt restore_all | |
785 | + ! If in_interrupt(), don't schedule | |
786 | + mov.l __irq_stat, r1 | |
787 | + mov.l @(local_irq_count,r1), r0 | |
788 | + mov.l @(local_bh_count,r1), r1 | |
789 | + or r1, r0 | |
790 | + cmp/eq #0, r0 | |
791 | + bf restore_all | |
792 | + ! Allow scheduling using preempt_schedule | |
793 | + ! Adjust preempt_count and SR as needed. | |
794 | + stc k_current, r1 | |
795 | + mov.l @(preempt_count,r1), r0 ! Could replace this ... | |
796 | + add #1, r0 ! ... and this w/mov #1? | |
797 | + mov.l r0, @(preempt_count,r1) | |
798 | + STI() | |
799 | + mov.l __preempt_schedule, r0 | |
800 | + jsr @r0 | |
801 | + nop | |
802 | + /* CLI */ | |
803 | + stc sr, r0 | |
804 | + or #0xf0, r0 | |
805 | + ldc r0, sr | |
806 | + ! | |
807 | + stc k_current, r1 | |
808 | + mov.l @(preempt_count,r1), r0 ! Could replace this ... | |
809 | + add #-1, r0 ! ... and this w/mov #0? | |
810 | + mov.l r0, @(preempt_count,r1) | |
811 | + ! Maybe should bra ret_from_kirq, or loop over need_resched? | |
812 | + ! For now, fall through to ret_from_irq again... | |
813 | +#endif /* CONFIG_PREEMPT */ | |
814 | + | |
815 | ret_from_irq: | |
816 | + mov #OFF_SR, r0 | |
817 | + mov.l @(r0,r15), r0 ! get status register | |
818 | + shll r0 | |
819 | + shll r0 ! kernel space? | |
820 | +#ifndef CONFIG_PREEMPT | |
821 | + bt restore_all ! Yes, it's from kernel, go back soon | |
822 | +#else /* CONFIG_PREEMPT */ | |
823 | + bt ret_from_kirq ! From kernel: maybe preempt_schedule | |
824 | +#endif /* CONFIG_PREEMPT */ | |
825 | + ! | |
826 | + bra ret_from_syscall | |
827 | + nop | |
828 | + | |
829 | ret_from_exception: | |
830 | mov #OFF_SR, r0 | |
831 | mov.l @(r0,r15), r0 ! get status register | |
832 | @@ -564,6 +647,13 @@ | |
833 | .long SYMBOL_NAME(do_signal) | |
834 | __irq_stat: | |
835 | .long SYMBOL_NAME(irq_stat) | |
836 | +#ifdef CONFIG_PREEMPT | |
837 | +__preempt_schedule: | |
838 | + .long SYMBOL_NAME(preempt_schedule) | |
839 | +#endif /* CONFIG_PREEMPT */ | |
840 | +__INV_IMASK: | |
841 | + .long 0xffffff0f ! ~(IMASK) | |
842 | + | |
843 | ||
844 | .align 2 | |
845 | restore_all: | |
846 | @@ -679,7 +769,7 @@ | |
847 | __fpu_prepare_fd: | |
848 | .long SYMBOL_NAME(fpu_prepare_fd) | |
849 | __init_task_flags: | |
850 | - .long SYMBOL_NAME(init_task_union)+4 | |
851 | + .long SYMBOL_NAME(init_task_union)+flags | |
852 | __PF_USEDFPU: | |
853 | .long PF_USEDFPU | |
854 | #endif | |
855 | diff -urN linux-2.4.20/arch/sh/kernel/irq.c linux/arch/sh/kernel/irq.c | |
856 | --- linux-2.4.20/arch/sh/kernel/irq.c 2001-09-08 15:29:09.000000000 -0400 | |
857 | +++ linux/arch/sh/kernel/irq.c 2002-12-11 02:34:47.000000000 -0500 | |
858 | @@ -229,6 +229,14 @@ | |
859 | struct irqaction * action; | |
860 | unsigned int status; | |
861 | ||
862 | + /* | |
863 | + * At this point we're now about to actually call handlers, | |
864 | + * and interrupts might get reenabled during them... bump | |
865 | + * preempt_count to prevent any preemption while the handler | |
866 | + * called here is pending... | |
867 | + */ | |
868 | + preempt_disable(); | |
869 | + | |
870 | /* Get IRQ number */ | |
871 | asm volatile("stc r2_bank, %0\n\t" | |
872 | "shlr2 %0\n\t" | |
873 | @@ -298,8 +306,17 @@ | |
874 | desc->handler->end(irq); | |
875 | spin_unlock(&desc->lock); | |
876 | ||
877 | + | |
878 | if (softirq_pending(cpu)) | |
879 | do_softirq(); | |
880 | + | |
881 | + /* | |
882 | + * We're done with the handlers, interrupts should be | |
883 | + * currently disabled; decrement preempt_count now so | |
884 | + * as we return preemption may be allowed... | |
885 | + */ | |
886 | + preempt_enable_no_resched(); | |
887 | + | |
888 | return 1; | |
889 | } | |
890 | ||
891 | diff -urN linux-2.4.20/CREDITS linux/CREDITS | |
892 | --- linux-2.4.20/CREDITS 2002-11-28 18:53:08.000000000 -0500 | |
893 | +++ linux/CREDITS 2002-12-11 02:34:47.000000000 -0500 | |
894 | @@ -1001,8 +1001,8 @@ | |
895 | ||
896 | N: Nigel Gamble | |
897 | E: nigel@nrg.org | |
898 | -E: nigel@sgi.com | |
899 | D: Interrupt-driven printer driver | |
900 | +D: Preemptible kernel | |
901 | S: 120 Alley Way | |
902 | S: Mountain View, California 94040 | |
903 | S: USA | |
904 | diff -urN linux-2.4.20/Documentation/Configure.help linux/Documentation/Configure.help | |
905 | --- linux-2.4.20/Documentation/Configure.help 2002-11-28 18:53:08.000000000 -0500 | |
906 | +++ linux/Documentation/Configure.help 2002-12-11 02:34:47.000000000 -0500 | |
907 | @@ -279,6 +279,17 @@ | |
908 | If you have a system with several CPUs, you do not need to say Y | |
909 | here: the local APIC will be used automatically. | |
910 | ||
911 | +Preemptible Kernel | |
912 | +CONFIG_PREEMPT | |
913 | + This option reduces the latency of the kernel when reacting to | |
914 | + real-time or interactive events by allowing a low priority process to | |
915 | + be preempted even if it is in kernel mode executing a system call. | |
916 | + This allows applications to run more reliably even when the system is | |
917 | + under load. | |
918 | + | |
919 | + Say Y here if you are building a kernel for a desktop, embedded or | |
920 | + real-time system. Say N if you are unsure. | |
921 | + | |
922 | Kernel math emulation | |
923 | CONFIG_MATH_EMULATION | |
924 | Linux can emulate a math coprocessor (used for floating point | |
925 | diff -urN linux-2.4.20/Documentation/preempt-locking.txt linux/Documentation/preempt-locking.txt | |
926 | --- linux-2.4.20/Documentation/preempt-locking.txt 1969-12-31 19:00:00.000000000 -0500 | |
927 | +++ linux/Documentation/preempt-locking.txt 2002-12-11 02:34:47.000000000 -0500 | |
928 | @@ -0,0 +1,104 @@ | |
929 | + Proper Locking Under a Preemptible Kernel: | |
930 | + Keeping Kernel Code Preempt-Safe | |
931 | + Robert Love <rml@tech9.net> | |
932 | + Last Updated: 22 Jan 2002 | |
933 | + | |
934 | + | |
935 | +INTRODUCTION | |
936 | + | |
937 | + | |
938 | +A preemptible kernel creates new locking issues. The issues are the same as | |
939 | +those under SMP: concurrency and reentrancy. Thankfully, the Linux preemptible | |
940 | +kernel model leverages existing SMP locking mechanisms. Thus, the kernel | |
941 | +requires explicit additional locking for very few additional situations. | |
942 | + | |
943 | +This document is for all kernel hackers. Developing code in the kernel | |
944 | +requires protecting these situations. | |
945 | + | |
946 | + | |
947 | +RULE #1: Per-CPU data structures need explicit protection | |
948 | + | |
949 | + | |
950 | +Two similar problems arise. An example code snippet: | |
951 | + | |
952 | + struct this_needs_locking tux[NR_CPUS]; | |
953 | + tux[smp_processor_id()] = some_value; | |
954 | + /* task is preempted here... */ | |
955 | + something = tux[smp_processor_id()]; | |
956 | + | |
957 | +First, since the data is per-CPU, it may not have explicit SMP locking, but | |
958 | +require it otherwise. Second, when a preempted task is finally rescheduled, | |
959 | +the previous value of smp_processor_id may not equal the current. You must | |
960 | +protect these situations by disabling preemption around them. | |
961 | + | |
962 | + | |
963 | +RULE #2: CPU state must be protected. | |
964 | + | |
965 | + | |
966 | +Under preemption, the state of the CPU must be protected. This is arch- | |
967 | +dependent, but includes CPU structures and state not preserved over a context | |
968 | +switch. For example, on x86, entering and exiting FPU mode is now a critical | |
969 | +section that must occur while preemption is disabled. Think what would happen | |
970 | +if the kernel is executing a floating-point instruction and is then preempted. | |
971 | +Remember, the kernel does not save FPU state except for user tasks. Therefore, | |
972 | +upon preemption, the FPU registers will be sold to the lowest bidder. Thus, | |
973 | +preemption must be disabled around such regions. | |
974 | + | |
975 | +Note, some FPU functions are already explicitly preempt safe. For example, | |
976 | +kernel_fpu_begin and kernel_fpu_end will disable and enable preemption. | |
977 | +However, math_state_restore must be called with preemption disabled. | |
978 | + | |
979 | + | |
980 | +RULE #3: Lock acquire and release must be performed by same task | |
981 | + | |
982 | + | |
983 | +A lock acquired in one task must be released by the same task. This | |
984 | +means you can't do oddball things like acquire a lock and go off to | |
985 | +play while another task releases it. If you want to do something | |
986 | +like this, acquire and release the task in the same code path and | |
987 | +have the caller wait on an event by the other task. | |
988 | + | |
989 | + | |
990 | +SOLUTION | |
991 | + | |
992 | + | |
993 | +Data protection under preemption is achieved by disabling preemption for the | |
994 | +duration of the critical region. | |
995 | + | |
996 | +preempt_enable() decrement the preempt counter | |
997 | +preempt_disable() increment the preempt counter | |
998 | +preempt_enable_no_resched() decrement, but do not immediately preempt | |
999 | +preempt_get_count() return the preempt counter | |
1000 | + | |
1001 | +The functions are nestable. In other words, you can call preempt_disable | |
1002 | +n-times in a code path, and preemption will not be reenabled until the n-th | |
1003 | +call to preempt_enable. The preempt statements define to nothing if | |
1004 | +preemption is not enabled. | |
1005 | + | |
1006 | +Note that you do not need to explicitly prevent preemption if you are holding | |
1007 | +any locks or interrupts are disabled, since preemption is implicitly disabled | |
1008 | +in those cases. | |
1009 | + | |
1010 | +Example: | |
1011 | + | |
1012 | + cpucache_t *cc; /* this is per-CPU */ | |
1013 | + preempt_disable(); | |
1014 | + cc = cc_data(searchp); | |
1015 | + if (cc && cc->avail) { | |
1016 | + __free_block(searchp, cc_entry(cc), cc->avail); | |
1017 | + cc->avail = 0; | |
1018 | + } | |
1019 | + preempt_enable(); | |
1020 | + return 0; | |
1021 | + | |
1022 | +Notice how the preemption statements must encompass every reference of the | |
1023 | +critical variables. Another example: | |
1024 | + | |
1025 | + int buf[NR_CPUS]; | |
1026 | + set_cpu_val(buf); | |
1027 | + if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n"); | |
1028 | + spin_lock(&buf_lock); | |
1029 | + /* ... */ | |
1030 | + | |
1031 | +This code is not preempt-safe, but see how easily we can fix it by simply | |
1032 | +moving the spin_lock up two lines. | |
1033 | diff -urN linux-2.4.20/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c | |
1034 | --- linux-2.4.20/drivers/ieee1394/csr.c 2002-11-28 18:53:13.000000000 -0500 | |
1035 | +++ linux/drivers/ieee1394/csr.c 2002-12-11 02:34:47.000000000 -0500 | |
1036 | @@ -10,6 +10,7 @@ | |
1037 | */ | |
1038 | ||
1039 | #include <linux/string.h> | |
1040 | +#include <linux/sched.h> | |
1041 | ||
1042 | #include "ieee1394_types.h" | |
1043 | #include "hosts.h" | |
1044 | diff -urN linux-2.4.20/drivers/sound/sound_core.c linux/drivers/sound/sound_core.c | |
1045 | --- linux-2.4.20/drivers/sound/sound_core.c 2001-09-30 15:26:08.000000000 -0400 | |
1046 | +++ linux/drivers/sound/sound_core.c 2002-12-11 02:34:47.000000000 -0500 | |
1047 | @@ -37,6 +37,7 @@ | |
1048 | #include <linux/config.h> | |
1049 | #include <linux/module.h> | |
1050 | #include <linux/init.h> | |
1051 | +#include <linux/sched.h> | |
1052 | #include <linux/slab.h> | |
1053 | #include <linux/types.h> | |
1054 | #include <linux/kernel.h> | |
1055 | diff -urN linux-2.4.20/fs/adfs/map.c linux/fs/adfs/map.c | |
1056 | --- linux-2.4.20/fs/adfs/map.c 2001-10-25 16:53:53.000000000 -0400 | |
1057 | +++ linux/fs/adfs/map.c 2002-12-11 02:34:47.000000000 -0500 | |
1058 | @@ -12,6 +12,7 @@ | |
1059 | #include <linux/fs.h> | |
1060 | #include <linux/adfs_fs.h> | |
1061 | #include <linux/spinlock.h> | |
1062 | +#include <linux/sched.h> | |
1063 | ||
1064 | #include "adfs.h" | |
1065 | ||
1066 | diff -urN linux-2.4.20/fs/exec.c linux/fs/exec.c | |
1067 | --- linux-2.4.20/fs/exec.c 2002-11-28 18:53:15.000000000 -0500 | |
1068 | +++ linux/fs/exec.c 2002-12-11 02:34:47.000000000 -0500 | |
1069 | @@ -440,8 +440,8 @@ | |
1070 | active_mm = current->active_mm; | |
1071 | current->mm = mm; | |
1072 | current->active_mm = mm; | |
1073 | - task_unlock(current); | |
1074 | activate_mm(active_mm, mm); | |
1075 | + task_unlock(current); | |
1076 | mm_release(); | |
1077 | if (old_mm) { | |
1078 | if (active_mm != old_mm) BUG(); | |
1079 | diff -urN linux-2.4.20/fs/fat/cache.c linux/fs/fat/cache.c | |
1080 | --- linux-2.4.20/fs/fat/cache.c 2001-10-12 16:48:42.000000000 -0400 | |
1081 | +++ linux/fs/fat/cache.c 2002-12-11 02:34:47.000000000 -0500 | |
1082 | @@ -14,6 +14,7 @@ | |
1083 | #include <linux/string.h> | |
1084 | #include <linux/stat.h> | |
1085 | #include <linux/fat_cvf.h> | |
1086 | +#include <linux/sched.h> | |
1087 | ||
1088 | #if 0 | |
1089 | # define PRINTK(x) printk x | |
1090 | diff -urN linux-2.4.20/fs/nls/nls_base.c linux/fs/nls/nls_base.c | |
1091 | --- linux-2.4.20/fs/nls/nls_base.c 2002-08-02 20:39:45.000000000 -0400 | |
1092 | +++ linux/fs/nls/nls_base.c 2002-12-11 02:34:47.000000000 -0500 | |
1093 | @@ -18,6 +18,7 @@ | |
1094 | #ifdef CONFIG_KMOD | |
1095 | #include <linux/kmod.h> | |
1096 | #endif | |
1097 | +#include <linux/sched.h> | |
1098 | #include <linux/spinlock.h> | |
1099 | ||
1100 | static struct nls_table *tables; | |
1101 | diff -urN linux-2.4.20/include/asm-arm/dma.h linux/include/asm-arm/dma.h | |
1102 | --- linux-2.4.20/include/asm-arm/dma.h 2001-08-12 14:14:00.000000000 -0400 | |
1103 | +++ linux/include/asm-arm/dma.h 2002-12-11 02:34:47.000000000 -0500 | |
1104 | @@ -5,6 +5,7 @@ | |
1105 | ||
1106 | #include <linux/config.h> | |
1107 | #include <linux/spinlock.h> | |
1108 | +#include <linux/sched.h> | |
1109 | #include <asm/system.h> | |
1110 | #include <asm/memory.h> | |
1111 | #include <asm/scatterlist.h> | |
1112 | diff -urN linux-2.4.20/include/asm-arm/hardirq.h linux/include/asm-arm/hardirq.h | |
1113 | --- linux-2.4.20/include/asm-arm/hardirq.h 2001-10-11 12:04:57.000000000 -0400 | |
1114 | +++ linux/include/asm-arm/hardirq.h 2002-12-11 02:34:47.000000000 -0500 | |
1115 | @@ -34,6 +34,7 @@ | |
1116 | #define irq_exit(cpu,irq) (local_irq_count(cpu)--) | |
1117 | ||
1118 | #define synchronize_irq() do { } while (0) | |
1119 | +#define release_irqlock(cpu) do { } while (0) | |
1120 | ||
1121 | #else | |
1122 | #error SMP not supported | |
1123 | diff -urN linux-2.4.20/include/asm-arm/pgalloc.h linux/include/asm-arm/pgalloc.h | |
1124 | --- linux-2.4.20/include/asm-arm/pgalloc.h 2001-08-12 14:14:00.000000000 -0400 | |
1125 | +++ linux/include/asm-arm/pgalloc.h 2002-12-11 02:34:47.000000000 -0500 | |
1126 | @@ -57,40 +57,48 @@ | |
1127 | { | |
1128 | unsigned long *ret; | |
1129 | ||
1130 | + preempt_disable(); | |
1131 | if ((ret = pgd_quicklist) != NULL) { | |
1132 | pgd_quicklist = (unsigned long *)__pgd_next(ret); | |
1133 | ret[1] = ret[2]; | |
1134 | clean_dcache_entry(ret + 1); | |
1135 | pgtable_cache_size--; | |
1136 | } | |
1137 | + preempt_enable(); | |
1138 | return (pgd_t *)ret; | |
1139 | } | |
1140 | ||
1141 | static inline void free_pgd_fast(pgd_t *pgd) | |
1142 | { | |
1143 | + preempt_disable(); | |
1144 | __pgd_next(pgd) = (unsigned long) pgd_quicklist; | |
1145 | pgd_quicklist = (unsigned long *) pgd; | |
1146 | pgtable_cache_size++; | |
1147 | + preempt_enable(); | |
1148 | } | |
1149 | ||
1150 | static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address) | |
1151 | { | |
1152 | unsigned long *ret; | |
1153 | ||
1154 | + preempt_disable(); | |
1155 | if((ret = pte_quicklist) != NULL) { | |
1156 | pte_quicklist = (unsigned long *)__pte_next(ret); | |
1157 | ret[0] = 0; | |
1158 | clean_dcache_entry(ret); | |
1159 | pgtable_cache_size--; | |
1160 | } | |
1161 | + preempt_enable(); | |
1162 | return (pte_t *)ret; | |
1163 | } | |
1164 | ||
1165 | static inline void free_pte_fast(pte_t *pte) | |
1166 | { | |
1167 | + preempt_disable(); | |
1168 | __pte_next(pte) = (unsigned long) pte_quicklist; | |
1169 | pte_quicklist = (unsigned long *) pte; | |
1170 | pgtable_cache_size++; | |
1171 | + preempt_enable(); | |
1172 | } | |
1173 | ||
1174 | #else /* CONFIG_NO_PGT_CACHE */ | |
1175 | diff -urN linux-2.4.20/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h | |
1176 | --- linux-2.4.20/include/asm-arm/smplock.h 2001-08-12 14:14:00.000000000 -0400 | |
1177 | +++ linux/include/asm-arm/smplock.h 2002-12-11 02:34:47.000000000 -0500 | |
1178 | @@ -3,12 +3,17 @@ | |
1179 | * | |
1180 | * Default SMP lock implementation | |
1181 | */ | |
1182 | +#include <linux/config.h> | |
1183 | #include <linux/interrupt.h> | |
1184 | #include <linux/spinlock.h> | |
1185 | ||
1186 | extern spinlock_t kernel_flag; | |
1187 | ||
1188 | +#ifdef CONFIG_PREEMPT | |
1189 | +#define kernel_locked() preempt_get_count() | |
1190 | +#else | |
1191 | #define kernel_locked() spin_is_locked(&kernel_flag) | |
1192 | +#endif | |
1193 | ||
1194 | /* | |
1195 | * Release global kernel lock and global interrupt lock | |
1196 | @@ -40,8 +45,14 @@ | |
1197 | */ | |
1198 | static inline void lock_kernel(void) | |
1199 | { | |
1200 | +#ifdef CONFIG_PREEMPT | |
1201 | + if (current->lock_depth == -1) | |
1202 | + spin_lock(&kernel_flag); | |
1203 | + ++current->lock_depth; | |
1204 | +#else | |
1205 | if (!++current->lock_depth) | |
1206 | spin_lock(&kernel_flag); | |
1207 | +#endif | |
1208 | } | |
1209 | ||
1210 | static inline void unlock_kernel(void) | |
1211 | diff -urN linux-2.4.20/include/asm-arm/softirq.h linux/include/asm-arm/softirq.h | |
1212 | --- linux-2.4.20/include/asm-arm/softirq.h 2001-09-08 15:02:31.000000000 -0400 | |
1213 | +++ linux/include/asm-arm/softirq.h 2002-12-11 02:34:47.000000000 -0500 | |
1214 | @@ -5,20 +5,22 @@ | |
1215 | #include <asm/hardirq.h> | |
1216 | ||
1217 | #define __cpu_bh_enable(cpu) \ | |
1218 | - do { barrier(); local_bh_count(cpu)--; } while (0) | |
1219 | + do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0) | |
1220 | #define cpu_bh_disable(cpu) \ | |
1221 | - do { local_bh_count(cpu)++; barrier(); } while (0) | |
1222 | + do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0) | |
1223 | ||
1224 | #define local_bh_disable() cpu_bh_disable(smp_processor_id()) | |
1225 | #define __local_bh_enable() __cpu_bh_enable(smp_processor_id()) | |
1226 | ||
1227 | #define in_softirq() (local_bh_count(smp_processor_id()) != 0) | |
1228 | ||
1229 | -#define local_bh_enable() \ | |
1230 | +#define _local_bh_enable() \ | |
1231 | do { \ | |
1232 | unsigned int *ptr = &local_bh_count(smp_processor_id()); \ | |
1233 | if (!--*ptr && ptr[-2]) \ | |
1234 | __asm__("bl%? __do_softirq": : : "lr");/* out of line */\ | |
1235 | } while (0) | |
1236 | ||
1237 | +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0) | |
1238 | + | |
1239 | #endif /* __ASM_SOFTIRQ_H */ | |
1240 | diff -urN linux-2.4.20/include/asm-arm/system.h linux/include/asm-arm/system.h | |
1241 | --- linux-2.4.20/include/asm-arm/system.h 2000-11-27 20:07:59.000000000 -0500 | |
1242 | +++ linux/include/asm-arm/system.h 2002-12-11 02:34:47.000000000 -0500 | |
1243 | @@ -62,6 +62,13 @@ | |
1244 | #define local_irq_disable() __cli() | |
1245 | #define local_irq_enable() __sti() | |
1246 | ||
1247 | +#define irqs_disabled() \ | |
1248 | +({ \ | |
1249 | + unsigned long cpsr_val; \ | |
1250 | + asm ("mrs %0, cpsr" : "=r" (cpsr_val)); \ | |
1251 | + cpsr_val & 128; \ | |
1252 | +}) | |
1253 | + | |
1254 | #ifdef CONFIG_SMP | |
1255 | #error SMP not supported | |
1256 | ||
1257 | diff -urN linux-2.4.20/include/asm-i386/hardirq.h linux/include/asm-i386/hardirq.h | |
1258 | --- linux-2.4.20/include/asm-i386/hardirq.h 2001-11-22 14:46:19.000000000 -0500 | |
1259 | +++ linux/include/asm-i386/hardirq.h 2002-12-11 02:34:47.000000000 -0500 | |
1260 | @@ -19,12 +19,16 @@ | |
1261 | ||
1262 | /* | |
1263 | * Are we in an interrupt context? Either doing bottom half | |
1264 | - * or hardware interrupt processing? | |
1265 | + * or hardware interrupt processing? Note the preempt check, | |
1266 | + * this is both a bugfix and an optimization. If we are | |
1267 | + * preemptible, we cannot be in an interrupt. | |
1268 | */ | |
1269 | -#define in_interrupt() ({ int __cpu = smp_processor_id(); \ | |
1270 | - (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }) | |
1271 | +#define in_interrupt() (preempt_is_disabled() && \ | |
1272 | + ({unsigned long __cpu = smp_processor_id(); \ | |
1273 | + (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })) | |
1274 | ||
1275 | -#define in_irq() (local_irq_count(smp_processor_id()) != 0) | |
1276 | +#define in_irq() (preempt_is_disabled() && \ | |
1277 | + (local_irq_count(smp_processor_id()) != 0)) | |
1278 | ||
1279 | #ifndef CONFIG_SMP | |
1280 | ||
1281 | @@ -36,6 +40,8 @@ | |
1282 | ||
1283 | #define synchronize_irq() barrier() | |
1284 | ||
1285 | +#define release_irqlock(cpu) do { } while (0) | |
1286 | + | |
1287 | #else | |
1288 | ||
1289 | #include <asm/atomic.h> | |
1290 | diff -urN linux-2.4.20/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h | |
1291 | --- linux-2.4.20/include/asm-i386/highmem.h 2002-08-02 20:39:45.000000000 -0400 | |
1292 | +++ linux/include/asm-i386/highmem.h 2002-12-11 02:34:47.000000000 -0500 | |
1293 | @@ -88,6 +88,7 @@ | |
1294 | enum fixed_addresses idx; | |
1295 | unsigned long vaddr; | |
1296 | ||
1297 | + preempt_disable(); | |
1298 | if (page < highmem_start_page) | |
1299 | return page_address(page); | |
1300 | ||
1301 | @@ -109,8 +110,10 @@ | |
1302 | unsigned long vaddr = (unsigned long) kvaddr; | |
1303 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | |
1304 | ||
1305 | - if (vaddr < FIXADDR_START) // FIXME | |
1306 | + if (vaddr < FIXADDR_START) { // FIXME | |
1307 | + preempt_enable(); | |
1308 | return; | |
1309 | + } | |
1310 | ||
1311 | if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) | |
1312 | out_of_line_bug(); | |
1313 | @@ -122,6 +125,8 @@ | |
1314 | pte_clear(kmap_pte-idx); | |
1315 | __flush_tlb_one(vaddr); | |
1316 | #endif | |
1317 | + | |
1318 | + preempt_enable(); | |
1319 | } | |
1320 | ||
1321 | #endif /* __KERNEL__ */ | |
1322 | diff -urN linux-2.4.20/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h | |
1323 | --- linux-2.4.20/include/asm-i386/hw_irq.h 2001-11-22 14:46:18.000000000 -0500 | |
1324 | +++ linux/include/asm-i386/hw_irq.h 2002-12-11 02:34:47.000000000 -0500 | |
1325 | @@ -95,6 +95,18 @@ | |
1326 | #define __STR(x) #x | |
1327 | #define STR(x) __STR(x) | |
1328 | ||
1329 | +#define GET_CURRENT \ | |
1330 | + "movl %esp, %ebx\n\t" \ | |
1331 | + "andl $-8192, %ebx\n\t" | |
1332 | + | |
1333 | +#ifdef CONFIG_PREEMPT | |
1334 | +#define BUMP_LOCK_COUNT \ | |
1335 | + GET_CURRENT \ | |
1336 | + "incl 4(%ebx)\n\t" | |
1337 | +#else | |
1338 | +#define BUMP_LOCK_COUNT | |
1339 | +#endif | |
1340 | + | |
1341 | #define SAVE_ALL \ | |
1342 | "cld\n\t" \ | |
1343 | "pushl %es\n\t" \ | |
1344 | @@ -108,15 +120,12 @@ | |
1345 | "pushl %ebx\n\t" \ | |
1346 | "movl $" STR(__KERNEL_DS) ",%edx\n\t" \ | |
1347 | "movl %edx,%ds\n\t" \ | |
1348 | - "movl %edx,%es\n\t" | |
1349 | + "movl %edx,%es\n\t" \ | |
1350 | + BUMP_LOCK_COUNT | |
1351 | ||
1352 | #define IRQ_NAME2(nr) nr##_interrupt(void) | |
1353 | #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr) | |
1354 | ||
1355 | -#define GET_CURRENT \ | |
1356 | - "movl %esp, %ebx\n\t" \ | |
1357 | - "andl $-8192, %ebx\n\t" | |
1358 | - | |
1359 | /* | |
1360 | * SMP has a few special interrupts for IPI messages | |
1361 | */ | |
1362 | diff -urN linux-2.4.20/include/asm-i386/i387.h linux/include/asm-i386/i387.h | |
1363 | --- linux-2.4.20/include/asm-i386/i387.h 2002-08-02 20:39:45.000000000 -0400 | |
1364 | +++ linux/include/asm-i386/i387.h 2002-12-11 02:34:47.000000000 -0500 | |
1365 | @@ -12,6 +12,7 @@ | |
1366 | #define __ASM_I386_I387_H | |
1367 | ||
1368 | #include <linux/sched.h> | |
1369 | +#include <linux/spinlock.h> | |
1370 | #include <asm/processor.h> | |
1371 | #include <asm/sigcontext.h> | |
1372 | #include <asm/user.h> | |
1373 | @@ -24,7 +25,7 @@ | |
1374 | extern void restore_fpu( struct task_struct *tsk ); | |
1375 | ||
1376 | extern void kernel_fpu_begin(void); | |
1377 | -#define kernel_fpu_end() stts() | |
1378 | +#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0) | |
1379 | ||
1380 | ||
1381 | #define unlazy_fpu( tsk ) do { \ | |
1382 | diff -urN linux-2.4.20/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h | |
1383 | --- linux-2.4.20/include/asm-i386/pgalloc.h 2002-08-02 20:39:45.000000000 -0400 | |
1384 | +++ linux/include/asm-i386/pgalloc.h 2002-12-11 02:34:47.000000000 -0500 | |
1385 | @@ -75,20 +75,26 @@ | |
1386 | { | |
1387 | unsigned long *ret; | |
1388 | ||
1389 | + preempt_disable(); | |
1390 | if ((ret = pgd_quicklist) != NULL) { | |
1391 | pgd_quicklist = (unsigned long *)(*ret); | |
1392 | ret[0] = 0; | |
1393 | pgtable_cache_size--; | |
1394 | - } else | |
1395 | + preempt_enable(); | |
1396 | + } else { | |
1397 | + preempt_enable(); | |
1398 | ret = (unsigned long *)get_pgd_slow(); | |
1399 | + } | |
1400 | return (pgd_t *)ret; | |
1401 | } | |
1402 | ||
1403 | static inline void free_pgd_fast(pgd_t *pgd) | |
1404 | { | |
1405 | + preempt_disable(); | |
1406 | *(unsigned long *)pgd = (unsigned long) pgd_quicklist; | |
1407 | pgd_quicklist = (unsigned long *) pgd; | |
1408 | pgtable_cache_size++; | |
1409 | + preempt_enable(); | |
1410 | } | |
1411 | ||
1412 | static inline void free_pgd_slow(pgd_t *pgd) | |
1413 | @@ -119,19 +125,23 @@ | |
1414 | { | |
1415 | unsigned long *ret; | |
1416 | ||
1417 | + preempt_disable(); | |
1418 | if ((ret = (unsigned long *)pte_quicklist) != NULL) { | |
1419 | pte_quicklist = (unsigned long *)(*ret); | |
1420 | ret[0] = ret[1]; | |
1421 | pgtable_cache_size--; | |
1422 | } | |
1423 | + preempt_enable(); | |
1424 | return (pte_t *)ret; | |
1425 | } | |
1426 | ||
1427 | static inline void pte_free_fast(pte_t *pte) | |
1428 | { | |
1429 | + preempt_disable(); | |
1430 | *(unsigned long *)pte = (unsigned long) pte_quicklist; | |
1431 | pte_quicklist = (unsigned long *) pte; | |
1432 | pgtable_cache_size++; | |
1433 | + preempt_enable(); | |
1434 | } | |
1435 | ||
1436 | static __inline__ void pte_free_slow(pte_t *pte) | |
1437 | diff -urN linux-2.4.20/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h | |
1438 | --- linux-2.4.20/include/asm-i386/smplock.h 2002-08-02 20:39:45.000000000 -0400 | |
1439 | +++ linux/include/asm-i386/smplock.h 2002-12-11 02:34:47.000000000 -0500 | |
1440 | @@ -11,7 +11,15 @@ | |
1441 | extern spinlock_cacheline_t kernel_flag_cacheline; | |
1442 | #define kernel_flag kernel_flag_cacheline.lock | |
1443 | ||
1444 | +#ifdef CONFIG_SMP | |
1445 | #define kernel_locked() spin_is_locked(&kernel_flag) | |
1446 | +#else | |
1447 | +#ifdef CONFIG_PREEMPT | |
1448 | +#define kernel_locked() preempt_get_count() | |
1449 | +#else | |
1450 | +#define kernel_locked() 1 | |
1451 | +#endif | |
1452 | +#endif | |
1453 | ||
1454 | /* | |
1455 | * Release global kernel lock and global interrupt lock | |
1456 | @@ -43,6 +51,11 @@ | |
1457 | */ | |
1458 | static __inline__ void lock_kernel(void) | |
1459 | { | |
1460 | +#ifdef CONFIG_PREEMPT | |
1461 | + if (current->lock_depth == -1) | |
1462 | + spin_lock(&kernel_flag); | |
1463 | + ++current->lock_depth; | |
1464 | +#else | |
1465 | #if 1 | |
1466 | if (!++current->lock_depth) | |
1467 | spin_lock(&kernel_flag); | |
1468 | @@ -55,6 +68,7 @@ | |
1469 | :"=m" (__dummy_lock(&kernel_flag)), | |
1470 | "=m" (current->lock_depth)); | |
1471 | #endif | |
1472 | +#endif | |
1473 | } | |
1474 | ||
1475 | static __inline__ void unlock_kernel(void) | |
1476 | diff -urN linux-2.4.20/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h | |
1477 | --- linux-2.4.20/include/asm-i386/softirq.h 2002-08-02 20:39:45.000000000 -0400 | |
1478 | +++ linux/include/asm-i386/softirq.h 2002-12-11 02:34:48.000000000 -0500 | |
1479 | @@ -5,9 +5,9 @@ | |
1480 | #include <asm/hardirq.h> | |
1481 | ||
1482 | #define __cpu_bh_enable(cpu) \ | |
1483 | - do { barrier(); local_bh_count(cpu)--; } while (0) | |
1484 | + do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0) | |
1485 | #define cpu_bh_disable(cpu) \ | |
1486 | - do { local_bh_count(cpu)++; barrier(); } while (0) | |
1487 | + do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0) | |
1488 | ||
1489 | #define local_bh_disable() cpu_bh_disable(smp_processor_id()) | |
1490 | #define __local_bh_enable() __cpu_bh_enable(smp_processor_id()) | |
1491 | @@ -22,7 +22,7 @@ | |
1492 | * If you change the offsets in irq_stat then you have to | |
1493 | * update this code as well. | |
1494 | */ | |
1495 | -#define local_bh_enable() \ | |
1496 | +#define _local_bh_enable() \ | |
1497 | do { \ | |
1498 | unsigned int *ptr = &local_bh_count(smp_processor_id()); \ | |
1499 | \ | |
1500 | @@ -45,4 +45,6 @@ | |
1501 | /* no registers clobbered */ ); \ | |
1502 | } while (0) | |
1503 | ||
1504 | +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0) | |
1505 | + | |
1506 | #endif /* __ASM_SOFTIRQ_H */ | |
1507 | diff -urN linux-2.4.20/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h | |
1508 | --- linux-2.4.20/include/asm-i386/spinlock.h 2002-11-28 18:53:15.000000000 -0500 | |
1509 | +++ linux/include/asm-i386/spinlock.h 2002-12-11 02:34:48.000000000 -0500 | |
1510 | @@ -77,7 +77,7 @@ | |
1511 | :"=m" (lock->lock) : : "memory" | |
1512 | ||
1513 | ||
1514 | -static inline void spin_unlock(spinlock_t *lock) | |
1515 | +static inline void _raw_spin_unlock(spinlock_t *lock) | |
1516 | { | |
1517 | #if SPINLOCK_DEBUG | |
1518 | if (lock->magic != SPINLOCK_MAGIC) | |
1519 | @@ -97,7 +97,7 @@ | |
1520 | :"=q" (oldval), "=m" (lock->lock) \ | |
1521 | :"0" (oldval) : "memory" | |
1522 | ||
1523 | -static inline void spin_unlock(spinlock_t *lock) | |
1524 | +static inline void _raw_spin_unlock(spinlock_t *lock) | |
1525 | { | |
1526 | char oldval = 1; | |
1527 | #if SPINLOCK_DEBUG | |
1528 | @@ -113,7 +113,7 @@ | |
1529 | ||
1530 | #endif | |
1531 | ||
1532 | -static inline int spin_trylock(spinlock_t *lock) | |
1533 | +static inline int _raw_spin_trylock(spinlock_t *lock) | |
1534 | { | |
1535 | char oldval; | |
1536 | __asm__ __volatile__( | |
1537 | @@ -123,7 +123,7 @@ | |
1538 | return oldval > 0; | |
1539 | } | |
1540 | ||
1541 | -static inline void spin_lock(spinlock_t *lock) | |
1542 | +static inline void _raw_spin_lock(spinlock_t *lock) | |
1543 | { | |
1544 | #if SPINLOCK_DEBUG | |
1545 | __label__ here; | |
1546 | @@ -179,7 +179,7 @@ | |
1547 | */ | |
1548 | /* the spinlock helpers are in arch/i386/kernel/semaphore.c */ | |
1549 | ||
1550 | -static inline void read_lock(rwlock_t *rw) | |
1551 | +static inline void _raw_read_lock(rwlock_t *rw) | |
1552 | { | |
1553 | #if SPINLOCK_DEBUG | |
1554 | if (rw->magic != RWLOCK_MAGIC) | |
1555 | @@ -188,7 +188,7 @@ | |
1556 | __build_read_lock(rw, "__read_lock_failed"); | |
1557 | } | |
1558 | ||
1559 | -static inline void write_lock(rwlock_t *rw) | |
1560 | +static inline void _raw_write_lock(rwlock_t *rw) | |
1561 | { | |
1562 | #if SPINLOCK_DEBUG | |
1563 | if (rw->magic != RWLOCK_MAGIC) | |
1564 | @@ -197,10 +197,10 @@ | |
1565 | __build_write_lock(rw, "__write_lock_failed"); | |
1566 | } | |
1567 | ||
1568 | -#define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | |
1569 | -#define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | |
1570 | +#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory") | |
1571 | +#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory") | |
1572 | ||
1573 | -static inline int write_trylock(rwlock_t *lock) | |
1574 | +static inline int _raw_write_trylock(rwlock_t *lock) | |
1575 | { | |
1576 | atomic_t *count = (atomic_t *)lock; | |
1577 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) | |
1578 | diff -urN linux-2.4.20/include/asm-i386/system.h linux/include/asm-i386/system.h | |
1579 | --- linux-2.4.20/include/asm-i386/system.h 2002-11-28 18:53:15.000000000 -0500 | |
1580 | +++ linux/include/asm-i386/system.h 2002-12-11 02:34:48.000000000 -0500 | |
1581 | @@ -322,6 +322,13 @@ | |
1582 | /* used in the idle loop; sti takes one instruction cycle to complete */ | |
1583 | #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") | |
1584 | ||
1585 | +#define irqs_disabled() \ | |
1586 | +({ \ | |
1587 | + unsigned long flags; \ | |
1588 | + __save_flags(flags); \ | |
1589 | + !(flags & (1<<9)); \ | |
1590 | +}) | |
1591 | + | |
1592 | /* For spinlocks etc */ | |
1593 | #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") | |
1594 | #define local_irq_restore(x) __restore_flags(x) | |
1595 | diff -urN linux-2.4.20/include/asm-mips/smplock.h linux/include/asm-mips/smplock.h | |
1596 | --- linux-2.4.20/include/asm-mips/smplock.h 2002-08-02 20:39:45.000000000 -0400 | |
1597 | +++ linux/include/asm-mips/smplock.h 2002-12-11 02:34:48.000000000 -0500 | |
1598 | @@ -5,12 +5,21 @@ | |
1599 | * | |
1600 | * Default SMP lock implementation | |
1601 | */ | |
1602 | +#include <linux/config.h> | |
1603 | #include <linux/interrupt.h> | |
1604 | #include <linux/spinlock.h> | |
1605 | ||
1606 | extern spinlock_t kernel_flag; | |
1607 | ||
1608 | +#ifdef CONFIG_SMP | |
1609 | #define kernel_locked() spin_is_locked(&kernel_flag) | |
1610 | +#else | |
1611 | +#ifdef CONFIG_PREEMPT | |
1612 | +#define kernel_locked() preempt_get_count() | |
1613 | +#else | |
1614 | +#define kernel_locked() 1 | |
1615 | +#endif | |
1616 | +#endif | |
1617 | ||
1618 | /* | |
1619 | * Release global kernel lock and global interrupt lock | |
1620 | @@ -42,8 +51,14 @@ | |
1621 | */ | |
1622 | extern __inline__ void lock_kernel(void) | |
1623 | { | |
1624 | +#ifdef CONFIG_PREEMPT | |
1625 | + if (current->lock_depth == -1) | |
1626 | + spin_lock(&kernel_flag); | |
1627 | + ++current->lock_depth; | |
1628 | +#else | |
1629 | if (!++current->lock_depth) | |
1630 | spin_lock(&kernel_flag); | |
1631 | +#endif | |
1632 | } | |
1633 | ||
1634 | extern __inline__ void unlock_kernel(void) | |
1635 | diff -urN linux-2.4.20/include/asm-mips/softirq.h linux/include/asm-mips/softirq.h | |
1636 | --- linux-2.4.20/include/asm-mips/softirq.h 2002-11-28 18:53:15.000000000 -0500 | |
1637 | +++ linux/include/asm-mips/softirq.h 2002-12-11 02:34:48.000000000 -0500 | |
1638 | @@ -15,6 +15,7 @@ | |
1639 | ||
1640 | static inline void cpu_bh_disable(int cpu) | |
1641 | { | |
1642 | + preempt_disable(); | |
1643 | local_bh_count(cpu)++; | |
1644 | barrier(); | |
1645 | } | |
1646 | @@ -23,6 +24,7 @@ | |
1647 | { | |
1648 | barrier(); | |
1649 | local_bh_count(cpu)--; | |
1650 | + preempt_enable(); | |
1651 | } | |
1652 | ||
1653 | ||
1654 | @@ -36,6 +38,7 @@ | |
1655 | cpu = smp_processor_id(); \ | |
1656 | if (!--local_bh_count(cpu) && softirq_pending(cpu)) \ | |
1657 | do_softirq(); \ | |
1658 | + preempt_enable(); \ | |
1659 | } while (0) | |
1660 | ||
1661 | #define in_softirq() (local_bh_count(smp_processor_id()) != 0) | |
1662 | diff -urN linux-2.4.20/include/asm-mips/system.h linux/include/asm-mips/system.h | |
1663 | --- linux-2.4.20/include/asm-mips/system.h 2002-11-28 18:53:15.000000000 -0500 | |
1664 | +++ linux/include/asm-mips/system.h 2002-12-11 02:34:48.000000000 -0500 | |
1665 | @@ -322,4 +322,18 @@ | |
1666 | #define die_if_kernel(msg, regs) \ | |
1667 | __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__) | |
1668 | ||
1669 | +extern __inline__ int intr_on(void) | |
1670 | +{ | |
1671 | + unsigned long flags; | |
1672 | + save_flags(flags); | |
1673 | + return flags & 1; | |
1674 | +} | |
1675 | + | |
1676 | +extern __inline__ int intr_off(void) | |
1677 | +{ | |
1678 | + return ! intr_on(); | |
1679 | +} | |
1680 | + | |
1681 | +#define irqs_disabled() intr_off() | |
1682 | + | |
1683 | #endif /* _ASM_SYSTEM_H */ | |
1684 | diff -urN linux-2.4.20/include/asm-ppc/dma.h linux/include/asm-ppc/dma.h | |
1685 | --- linux-2.4.20/include/asm-ppc/dma.h 2001-05-21 18:02:06.000000000 -0400 | |
1686 | +++ linux/include/asm-ppc/dma.h 2002-12-11 02:34:48.000000000 -0500 | |
1687 | @@ -14,6 +14,7 @@ | |
1688 | #include <linux/config.h> | |
1689 | #include <asm/io.h> | |
1690 | #include <linux/spinlock.h> | |
1691 | +#include <linux/sched.h> | |
1692 | #include <asm/system.h> | |
1693 | ||
1694 | /* | |
1695 | diff -urN linux-2.4.20/include/asm-ppc/hardirq.h linux/include/asm-ppc/hardirq.h | |
1696 | --- linux-2.4.20/include/asm-ppc/hardirq.h 2002-11-28 18:53:15.000000000 -0500 | |
1697 | +++ linux/include/asm-ppc/hardirq.h 2002-12-11 02:34:48.000000000 -0500 | |
1698 | @@ -48,6 +48,7 @@ | |
1699 | #define hardirq_exit(cpu) (local_irq_count(cpu)--) | |
1700 | ||
1701 | #define synchronize_irq() do { } while (0) | |
1702 | +#define release_irqlock(cpu) do { } while (0) | |
1703 | ||
1704 | #else /* CONFIG_SMP */ | |
1705 | ||
1706 | diff -urN linux-2.4.20/include/asm-ppc/highmem.h linux/include/asm-ppc/highmem.h | |
1707 | --- linux-2.4.20/include/asm-ppc/highmem.h 2001-07-02 17:34:57.000000000 -0400 | |
1708 | +++ linux/include/asm-ppc/highmem.h 2002-12-11 02:34:48.000000000 -0500 | |
1709 | @@ -84,6 +84,7 @@ | |
1710 | unsigned int idx; | |
1711 | unsigned long vaddr; | |
1712 | ||
1713 | + preempt_disable(); | |
1714 | if (page < highmem_start_page) | |
1715 | return page_address(page); | |
1716 | ||
1717 | @@ -105,8 +106,10 @@ | |
1718 | unsigned long vaddr = (unsigned long) kvaddr; | |
1719 | unsigned int idx = type + KM_TYPE_NR*smp_processor_id(); | |
1720 | ||
1721 | - if (vaddr < KMAP_FIX_BEGIN) // FIXME | |
1722 | + if (vaddr < KMAP_FIX_BEGIN) { // FIXME | |
1723 | + preempt_enable(); | |
1724 | return; | |
1725 | + } | |
1726 | ||
1727 | if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE) | |
1728 | BUG(); | |
1729 | @@ -118,6 +121,7 @@ | |
1730 | pte_clear(kmap_pte+idx); | |
1731 | flush_tlb_page(0, vaddr); | |
1732 | #endif | |
1733 | + preempt_enable(); | |
1734 | } | |
1735 | ||
1736 | #endif /* __KERNEL__ */ | |
1737 | diff -urN linux-2.4.20/include/asm-ppc/hw_irq.h linux/include/asm-ppc/hw_irq.h | |
1738 | --- linux-2.4.20/include/asm-ppc/hw_irq.h 2002-11-28 18:53:15.000000000 -0500 | |
1739 | +++ linux/include/asm-ppc/hw_irq.h 2002-12-11 02:34:48.000000000 -0500 | |
1740 | @@ -22,6 +22,12 @@ | |
1741 | #define __save_flags(flags) __save_flags_ptr((unsigned long *)&flags) | |
1742 | #define __save_and_cli(flags) ({__save_flags(flags);__cli();}) | |
1743 | ||
1744 | +#define mfmsr() ({unsigned int rval; \ | |
1745 | + asm volatile("mfmsr %0" : "=r" (rval)); rval;}) | |
1746 | +#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v)) | |
1747 | + | |
1748 | +#define irqs_disabled() ((mfmsr() & MSR_EE) == 0) | |
1749 | + | |
1750 | extern void do_lost_interrupts(unsigned long); | |
1751 | ||
1752 | #define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);}) | |
1753 | diff -urN linux-2.4.20/include/asm-ppc/mmu_context.h linux/include/asm-ppc/mmu_context.h | |
1754 | --- linux-2.4.20/include/asm-ppc/mmu_context.h 2001-10-02 12:12:44.000000000 -0400 | |
1755 | +++ linux/include/asm-ppc/mmu_context.h 2002-12-11 02:34:48.000000000 -0500 | |
1756 | @@ -158,6 +158,10 @@ | |
1757 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |
1758 | struct task_struct *tsk, int cpu) | |
1759 | { | |
1760 | +#ifdef CONFIG_PREEMPT | |
1761 | + if (preempt_get_count() == 0) | |
1762 | + BUG(); | |
1763 | +#endif | |
1764 | tsk->thread.pgdir = next->pgd; | |
1765 | get_mmu_context(next); | |
1766 | set_context(next->context, next->pgd); | |
1767 | diff -urN linux-2.4.20/include/asm-ppc/pgalloc.h linux/include/asm-ppc/pgalloc.h | |
1768 | --- linux-2.4.20/include/asm-ppc/pgalloc.h 2001-05-21 18:02:06.000000000 -0400 | |
1769 | +++ linux/include/asm-ppc/pgalloc.h 2002-12-11 02:34:48.000000000 -0500 | |
1770 | @@ -68,20 +68,25 @@ | |
1771 | { | |
1772 | unsigned long *ret; | |
1773 | ||
1774 | + preempt_disable(); | |
1775 | if ((ret = pgd_quicklist) != NULL) { | |
1776 | pgd_quicklist = (unsigned long *)(*ret); | |
1777 | ret[0] = 0; | |
1778 | pgtable_cache_size--; | |
1779 | + preempt_enable(); | |
1780 | } else | |
1781 | + preempt_enable(); | |
1782 | ret = (unsigned long *)get_pgd_slow(); | |
1783 | return (pgd_t *)ret; | |
1784 | } | |
1785 | ||
1786 | extern __inline__ void free_pgd_fast(pgd_t *pgd) | |
1787 | { | |
1788 | + preempt_disable(); | |
1789 | *(unsigned long **)pgd = pgd_quicklist; | |
1790 | pgd_quicklist = (unsigned long *) pgd; | |
1791 | pgtable_cache_size++; | |
1792 | + preempt_enable(); | |
1793 | } | |
1794 | ||
1795 | extern __inline__ void free_pgd_slow(pgd_t *pgd) | |
1796 | @@ -120,19 +125,23 @@ | |
1797 | { | |
1798 | unsigned long *ret; | |
1799 | ||
1800 | + preempt_disable(); | |
1801 | if ((ret = pte_quicklist) != NULL) { | |
1802 | pte_quicklist = (unsigned long *)(*ret); | |
1803 | ret[0] = 0; | |
1804 | pgtable_cache_size--; | |
1805 | } | |
1806 | + preempt_enable(); | |
1807 | return (pte_t *)ret; | |
1808 | } | |
1809 | ||
1810 | extern __inline__ void pte_free_fast(pte_t *pte) | |
1811 | { | |
1812 | + preempt_disable(); | |
1813 | *(unsigned long **)pte = pte_quicklist; | |
1814 | pte_quicklist = (unsigned long *) pte; | |
1815 | pgtable_cache_size++; | |
1816 | + preempt_enable(); | |
1817 | } | |
1818 | ||
1819 | extern __inline__ void pte_free_slow(pte_t *pte) | |
1820 | diff -urN linux-2.4.20/include/asm-ppc/smplock.h linux/include/asm-ppc/smplock.h | |
1821 | --- linux-2.4.20/include/asm-ppc/smplock.h 2001-11-02 20:43:54.000000000 -0500 | |
1822 | +++ linux/include/asm-ppc/smplock.h 2002-12-11 02:34:48.000000000 -0500 | |
1823 | @@ -15,7 +15,15 @@ | |
1824 | ||
1825 | extern spinlock_t kernel_flag; | |
1826 | ||
1827 | +#ifdef CONFIG_SMP | |
1828 | #define kernel_locked() spin_is_locked(&kernel_flag) | |
1829 | +#else | |
1830 | +#ifdef CONFIG_PREEMPT | |
1831 | +#define kernel_locked() preempt_get_count() | |
1832 | +#else | |
1833 | +#define kernel_locked() 1 | |
1834 | +#endif | |
1835 | +#endif | |
1836 | ||
1837 | /* | |
1838 | * Release global kernel lock and global interrupt lock | |
1839 | @@ -47,8 +55,14 @@ | |
1840 | */ | |
1841 | static __inline__ void lock_kernel(void) | |
1842 | { | |
1843 | +#ifdef CONFIG_PREEMPT | |
1844 | + if (current->lock_depth == -1) | |
1845 | + spin_lock(&kernel_flag); | |
1846 | + ++current->lock_depth; | |
1847 | +#else | |
1848 | if (!++current->lock_depth) | |
1849 | spin_lock(&kernel_flag); | |
1850 | +#endif | |
1851 | } | |
1852 | ||
1853 | static __inline__ void unlock_kernel(void) | |
1854 | diff -urN linux-2.4.20/include/asm-ppc/softirq.h linux/include/asm-ppc/softirq.h | |
1855 | --- linux-2.4.20/include/asm-ppc/softirq.h 2001-09-08 15:02:31.000000000 -0400 | |
1856 | +++ linux/include/asm-ppc/softirq.h 2002-12-11 02:34:48.000000000 -0500 | |
1857 | @@ -10,6 +10,7 @@ | |
1858 | ||
1859 | #define local_bh_disable() \ | |
1860 | do { \ | |
1861 | + preempt_disable(); \ | |
1862 | local_bh_count(smp_processor_id())++; \ | |
1863 | barrier(); \ | |
1864 | } while (0) | |
1865 | @@ -18,9 +19,10 @@ | |
1866 | do { \ | |
1867 | barrier(); \ | |
1868 | local_bh_count(smp_processor_id())--; \ | |
1869 | + preempt_enable(); \ | |
1870 | } while (0) | |
1871 | ||
1872 | -#define local_bh_enable() \ | |
1873 | +#define _local_bh_enable() \ | |
1874 | do { \ | |
1875 | if (!--local_bh_count(smp_processor_id()) \ | |
1876 | && softirq_pending(smp_processor_id())) { \ | |
1877 | @@ -28,6 +30,12 @@ | |
1878 | } \ | |
1879 | } while (0) | |
1880 | ||
1881 | +#define local_bh_enable() \ | |
1882 | +do { \ | |
1883 | + _local_bh_enable(); \ | |
1884 | + preempt_enable(); \ | |
1885 | +} while (0) | |
1886 | + | |
1887 | #define in_softirq() (local_bh_count(smp_processor_id()) != 0) | |
1888 | ||
1889 | #endif /* __ASM_SOFTIRQ_H */ | |
1890 | diff -urN linux-2.4.20/include/asm-sh/hardirq.h linux/include/asm-sh/hardirq.h | |
1891 | --- linux-2.4.20/include/asm-sh/hardirq.h 2001-09-08 15:29:09.000000000 -0400 | |
1892 | +++ linux/include/asm-sh/hardirq.h 2002-12-11 02:34:48.000000000 -0500 | |
1893 | @@ -34,6 +34,8 @@ | |
1894 | ||
1895 | #define synchronize_irq() barrier() | |
1896 | ||
1897 | +#define release_irqlock(cpu) do { } while (0) | |
1898 | + | |
1899 | #else | |
1900 | ||
1901 | #error Super-H SMP is not available | |
1902 | diff -urN linux-2.4.20/include/asm-sh/smplock.h linux/include/asm-sh/smplock.h | |
1903 | --- linux-2.4.20/include/asm-sh/smplock.h 2001-09-08 15:29:09.000000000 -0400 | |
1904 | +++ linux/include/asm-sh/smplock.h 2002-12-11 02:34:48.000000000 -0500 | |
1905 | @@ -9,15 +9,88 @@ | |
1906 | ||
1907 | #include <linux/config.h> | |
1908 | ||
1909 | -#ifndef CONFIG_SMP | |
1910 | - | |
1911 | +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT) | |
1912 | +/* | |
1913 | + * Should never happen, since linux/smp_lock.h catches this case; | |
1914 | + * but in case this file is included directly with neither SMP nor | |
1915 | + * PREEMPT configuration, provide same dummys as linux/smp_lock.h | |
1916 | + */ | |
1917 | #define lock_kernel() do { } while(0) | |
1918 | #define unlock_kernel() do { } while(0) | |
1919 | -#define release_kernel_lock(task, cpu, depth) ((depth) = 1) | |
1920 | -#define reacquire_kernel_lock(task, cpu, depth) do { } while(0) | |
1921 | +#define release_kernel_lock(task, cpu) do { } while(0) | |
1922 | +#define reacquire_kernel_lock(task) do { } while(0) | |
1923 | +#define kernel_locked() 1 | |
1924 | + | |
1925 | +#else /* CONFIG_SMP || CONFIG_PREEMPT */ | |
1926 | + | |
1927 | +#if CONFIG_SMP | |
1928 | +#error "We do not support SMP on SH yet" | |
1929 | +#endif | |
1930 | +/* | |
1931 | + * Default SMP lock implementation (i.e. the i386 version) | |
1932 | + */ | |
1933 | + | |
1934 | +#include <linux/interrupt.h> | |
1935 | +#include <linux/spinlock.h> | |
1936 | + | |
1937 | +extern spinlock_t kernel_flag; | |
1938 | +#define lock_bkl() spin_lock(&kernel_flag) | |
1939 | +#define unlock_bkl() spin_unlock(&kernel_flag) | |
1940 | ||
1941 | +#ifdef CONFIG_SMP | |
1942 | +#define kernel_locked() spin_is_locked(&kernel_flag) | |
1943 | +#elif CONFIG_PREEMPT | |
1944 | +#define kernel_locked() preempt_get_count() | |
1945 | +#else /* neither */ | |
1946 | +#define kernel_locked() 1 | |
1947 | +#endif | |
1948 | + | |
1949 | +/* | |
1950 | + * Release global kernel lock and global interrupt lock | |
1951 | + */ | |
1952 | +#define release_kernel_lock(task, cpu) \ | |
1953 | +do { \ | |
1954 | + if (task->lock_depth >= 0) \ | |
1955 | + spin_unlock(&kernel_flag); \ | |
1956 | + release_irqlock(cpu); \ | |
1957 | + __sti(); \ | |
1958 | +} while (0) | |
1959 | + | |
1960 | +/* | |
1961 | + * Re-acquire the kernel lock | |
1962 | + */ | |
1963 | +#define reacquire_kernel_lock(task) \ | |
1964 | +do { \ | |
1965 | + if (task->lock_depth >= 0) \ | |
1966 | + spin_lock(&kernel_flag); \ | |
1967 | +} while (0) | |
1968 | + | |
1969 | +/* | |
1970 | + * Getting the big kernel lock. | |
1971 | + * | |
1972 | + * This cannot happen asynchronously, | |
1973 | + * so we only need to worry about other | |
1974 | + * CPU's. | |
1975 | + */ | |
1976 | +static __inline__ void lock_kernel(void) | |
1977 | +{ | |
1978 | +#ifdef CONFIG_PREEMPT | |
1979 | + if (current->lock_depth == -1) | |
1980 | + spin_lock(&kernel_flag); | |
1981 | + ++current->lock_depth; | |
1982 | #else | |
1983 | -#error "We do not support SMP on SH" | |
1984 | -#endif /* CONFIG_SMP */ | |
1985 | + if (!++current->lock_depth) | |
1986 | + spin_lock(&kernel_flag); | |
1987 | +#endif | |
1988 | +} | |
1989 | + | |
1990 | +static __inline__ void unlock_kernel(void) | |
1991 | +{ | |
1992 | + if (current->lock_depth < 0) | |
1993 | + BUG(); | |
1994 | + if (--current->lock_depth < 0) | |
1995 | + spin_unlock(&kernel_flag); | |
1996 | +} | |
1997 | +#endif /* CONFIG_SMP || CONFIG_PREEMPT */ | |
1998 | ||
1999 | #endif /* __ASM_SH_SMPLOCK_H */ | |
2000 | diff -urN linux-2.4.20/include/asm-sh/softirq.h linux/include/asm-sh/softirq.h | |
2001 | --- linux-2.4.20/include/asm-sh/softirq.h 2001-09-08 15:29:09.000000000 -0400 | |
2002 | +++ linux/include/asm-sh/softirq.h 2002-12-11 02:34:48.000000000 -0500 | |
2003 | @@ -6,6 +6,7 @@ | |
2004 | ||
2005 | #define local_bh_disable() \ | |
2006 | do { \ | |
2007 | + preempt_disable(); \ | |
2008 | local_bh_count(smp_processor_id())++; \ | |
2009 | barrier(); \ | |
2010 | } while (0) | |
2011 | @@ -14,6 +15,7 @@ | |
2012 | do { \ | |
2013 | barrier(); \ | |
2014 | local_bh_count(smp_processor_id())--; \ | |
2015 | + preempt_enable(); \ | |
2016 | } while (0) | |
2017 | ||
2018 | #define local_bh_enable() \ | |
2019 | @@ -23,6 +25,7 @@ | |
2020 | && softirq_pending(smp_processor_id())) { \ | |
2021 | do_softirq(); \ | |
2022 | } \ | |
2023 | + preempt_enable(); \ | |
2024 | } while (0) | |
2025 | ||
2026 | #define in_softirq() (local_bh_count(smp_processor_id()) != 0) | |
2027 | diff -urN linux-2.4.20/include/asm-sh/system.h linux/include/asm-sh/system.h | |
2028 | --- linux-2.4.20/include/asm-sh/system.h 2001-09-08 15:29:09.000000000 -0400 | |
2029 | +++ linux/include/asm-sh/system.h 2002-12-11 02:34:48.000000000 -0500 | |
2030 | @@ -285,4 +285,17 @@ | |
2031 | void disable_hlt(void); | |
2032 | void enable_hlt(void); | |
2033 | ||
2034 | +/* | |
2035 | + * irqs_disabled - are interrupts disabled? | |
2036 | + */ | |
2037 | +static inline int irqs_disabled(void) | |
2038 | +{ | |
2039 | + unsigned long flags; | |
2040 | + | |
2041 | + __save_flags(flags); | |
2042 | + if (flags & 0x000000f0) | |
2043 | + return 1; | |
2044 | + return 0; | |
2045 | +} | |
2046 | + | |
2047 | #endif | |
2048 | diff -urN linux-2.4.20/include/linux/brlock.h linux/include/linux/brlock.h | |
2049 | --- linux-2.4.20/include/linux/brlock.h 2002-11-28 18:53:15.000000000 -0500 | |
2050 | +++ linux/include/linux/brlock.h 2002-12-11 02:34:48.000000000 -0500 | |
2051 | @@ -171,11 +171,11 @@ | |
2052 | } | |
2053 | ||
2054 | #else | |
2055 | -# define br_read_lock(idx) ((void)(idx)) | |
2056 | -# define br_read_unlock(idx) ((void)(idx)) | |
2057 | -# define br_write_lock(idx) ((void)(idx)) | |
2058 | -# define br_write_unlock(idx) ((void)(idx)) | |
2059 | -#endif | |
2060 | +# define br_read_lock(idx) ({ (void)(idx); preempt_disable(); }) | |
2061 | +# define br_read_unlock(idx) ({ (void)(idx); preempt_enable(); }) | |
2062 | +# define br_write_lock(idx) ({ (void)(idx); preempt_disable(); }) | |
2063 | +# define br_write_unlock(idx) ({ (void)(idx); preempt_enable(); }) | |
2064 | +#endif /* CONFIG_SMP */ | |
2065 | ||
2066 | /* | |
2067 | * Now enumerate all of the possible sw/hw IRQ protected | |
2068 | diff -urN linux-2.4.20/include/linux/dcache.h linux/include/linux/dcache.h | |
2069 | --- linux-2.4.20/include/linux/dcache.h 2002-11-28 18:53:15.000000000 -0500 | |
2070 | +++ linux/include/linux/dcache.h 2002-12-11 02:34:48.000000000 -0500 | |
2071 | @@ -127,31 +127,6 @@ | |
2072 | ||
2073 | extern spinlock_t dcache_lock; | |
2074 | ||
2075 | -/** | |
2076 | - * d_drop - drop a dentry | |
2077 | - * @dentry: dentry to drop | |
2078 | - * | |
2079 | - * d_drop() unhashes the entry from the parent | |
2080 | - * dentry hashes, so that it won't be found through | |
2081 | - * a VFS lookup any more. Note that this is different | |
2082 | - * from deleting the dentry - d_delete will try to | |
2083 | - * mark the dentry negative if possible, giving a | |
2084 | - * successful _negative_ lookup, while d_drop will | |
2085 | - * just make the cache lookup fail. | |
2086 | - * | |
2087 | - * d_drop() is used mainly for stuff that wants | |
2088 | - * to invalidate a dentry for some reason (NFS | |
2089 | - * timeouts or autofs deletes). | |
2090 | - */ | |
2091 | - | |
2092 | -static __inline__ void d_drop(struct dentry * dentry) | |
2093 | -{ | |
2094 | - spin_lock(&dcache_lock); | |
2095 | - list_del(&dentry->d_hash); | |
2096 | - INIT_LIST_HEAD(&dentry->d_hash); | |
2097 | - spin_unlock(&dcache_lock); | |
2098 | -} | |
2099 | - | |
2100 | static __inline__ int dname_external(struct dentry *d) | |
2101 | { | |
2102 | return d->d_name.name != d->d_iname; | |
2103 | @@ -276,3 +251,34 @@ | |
2104 | #endif /* __KERNEL__ */ | |
2105 | ||
2106 | #endif /* __LINUX_DCACHE_H */ | |
2107 | + | |
2108 | +#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED) | |
2109 | +#define __LINUX_DCACHE_H_INLINES | |
2110 | + | |
2111 | +#ifdef __KERNEL__ | |
2112 | +/** | |
2113 | + * d_drop - drop a dentry | |
2114 | + * @dentry: dentry to drop | |
2115 | + * | |
2116 | + * d_drop() unhashes the entry from the parent | |
2117 | + * dentry hashes, so that it won't be found through | |
2118 | + * a VFS lookup any more. Note that this is different | |
2119 | + * from deleting the dentry - d_delete will try to | |
2120 | + * mark the dentry negative if possible, giving a | |
2121 | + * successful _negative_ lookup, while d_drop will | |
2122 | + * just make the cache lookup fail. | |
2123 | + * | |
2124 | + * d_drop() is used mainly for stuff that wants | |
2125 | + * to invalidate a dentry for some reason (NFS | |
2126 | + * timeouts or autofs deletes). | |
2127 | + */ | |
2128 | + | |
2129 | +static __inline__ void d_drop(struct dentry * dentry) | |
2130 | +{ | |
2131 | + spin_lock(&dcache_lock); | |
2132 | + list_del(&dentry->d_hash); | |
2133 | + INIT_LIST_HEAD(&dentry->d_hash); | |
2134 | + spin_unlock(&dcache_lock); | |
2135 | +} | |
2136 | +#endif | |
2137 | +#endif | |
2138 | diff -urN linux-2.4.20/include/linux/fs_struct.h linux/include/linux/fs_struct.h | |
2139 | --- linux-2.4.20/include/linux/fs_struct.h 2001-07-13 18:10:44.000000000 -0400 | |
2140 | +++ linux/include/linux/fs_struct.h 2002-12-11 02:34:48.000000000 -0500 | |
2141 | @@ -20,6 +20,15 @@ | |
2142 | extern void exit_fs(struct task_struct *); | |
2143 | extern void set_fs_altroot(void); | |
2144 | ||
2145 | +struct fs_struct *copy_fs_struct(struct fs_struct *old); | |
2146 | +void put_fs_struct(struct fs_struct *fs); | |
2147 | + | |
2148 | +#endif | |
2149 | +#endif | |
2150 | + | |
2151 | +#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED) | |
2152 | +#define _LINUX_FS_STRUCT_H_INLINES | |
2153 | +#ifdef __KERNEL__ | |
2154 | /* | |
2155 | * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. | |
2156 | * It can block. Requires the big lock held. | |
2157 | @@ -65,9 +74,5 @@ | |
2158 | mntput(old_pwdmnt); | |
2159 | } | |
2160 | } | |
2161 | - | |
2162 | -struct fs_struct *copy_fs_struct(struct fs_struct *old); | |
2163 | -void put_fs_struct(struct fs_struct *fs); | |
2164 | - | |
2165 | #endif | |
2166 | #endif | |
2167 | diff -urN linux-2.4.20/include/linux/sched.h linux/include/linux/sched.h | |
2168 | --- linux-2.4.20/include/linux/sched.h 2002-11-28 18:53:15.000000000 -0500 | |
2169 | +++ linux/include/linux/sched.h 2002-12-11 02:34:48.000000000 -0500 | |
2170 | @@ -91,6 +91,7 @@ | |
2171 | #define TASK_UNINTERRUPTIBLE 2 | |
2172 | #define TASK_ZOMBIE 4 | |
2173 | #define TASK_STOPPED 8 | |
2174 | +#define PREEMPT_ACTIVE 0x4000000 | |
2175 | ||
2176 | #define __set_task_state(tsk, state_value) \ | |
2177 | do { (tsk)->state = (state_value); } while (0) | |
2178 | @@ -157,6 +158,9 @@ | |
2179 | #define MAX_SCHEDULE_TIMEOUT LONG_MAX | |
2180 | extern signed long FASTCALL(schedule_timeout(signed long timeout)); | |
2181 | asmlinkage void schedule(void); | |
2182 | +#ifdef CONFIG_PREEMPT | |
2183 | +asmlinkage void preempt_schedule(void); | |
2184 | +#endif | |
2185 | ||
2186 | extern int schedule_task(struct tq_struct *task); | |
2187 | extern void flush_scheduled_tasks(void); | |
2188 | @@ -289,7 +293,7 @@ | |
2189 | * offsets of these are hardcoded elsewhere - touch with care | |
2190 | */ | |
2191 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | |
2192 | - unsigned long flags; /* per process flags, defined below */ | |
2193 | + int preempt_count; /* 0 => preemptable, <0 => BUG */ | |
2194 | int sigpending; | |
2195 | mm_segment_t addr_limit; /* thread address space: | |
2196 | 0-0xBFFFFFFF for user-thead | |
2197 | @@ -331,6 +335,7 @@ | |
2198 | struct mm_struct *active_mm; | |
2199 | struct list_head local_pages; | |
2200 | unsigned int allocation_order, nr_local_pages; | |
2201 | + unsigned long flags; | |
2202 | ||
2203 | /* task state */ | |
2204 | struct linux_binfmt *binfmt; | |
2205 | @@ -955,5 +960,10 @@ | |
2206 | __cond_resched(); | |
2207 | } | |
2208 | ||
2209 | +#define _TASK_STRUCT_DEFINED | |
2210 | +#include <linux/dcache.h> | |
2211 | +#include <linux/tqueue.h> | |
2212 | +#include <linux/fs_struct.h> | |
2213 | + | |
2214 | #endif /* __KERNEL__ */ | |
2215 | #endif | |
2216 | diff -urN linux-2.4.20/include/linux/smp_lock.h linux/include/linux/smp_lock.h | |
2217 | --- linux-2.4.20/include/linux/smp_lock.h 2001-11-22 14:46:27.000000000 -0500 | |
2218 | +++ linux/include/linux/smp_lock.h 2002-12-11 02:34:48.000000000 -0500 | |
2219 | @@ -3,7 +3,7 @@ | |
2220 | ||
2221 | #include <linux/config.h> | |
2222 | ||
2223 | -#ifndef CONFIG_SMP | |
2224 | +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT) | |
2225 | ||
2226 | #define lock_kernel() do { } while(0) | |
2227 | #define unlock_kernel() do { } while(0) | |
2228 | diff -urN linux-2.4.20/include/linux/spinlock.h linux/include/linux/spinlock.h | |
2229 | --- linux-2.4.20/include/linux/spinlock.h 2002-11-28 18:53:15.000000000 -0500 | |
2230 | +++ linux/include/linux/spinlock.h 2002-12-11 02:34:48.000000000 -0500 | |
2231 | @@ -2,6 +2,7 @@ | |
2232 | #define __LINUX_SPINLOCK_H | |
2233 | ||
2234 | #include <linux/config.h> | |
2235 | +#include <linux/compiler.h> | |
2236 | ||
2237 | /* | |
2238 | * These are the generic versions of the spinlocks and read-write | |
2239 | @@ -62,8 +63,10 @@ | |
2240 | ||
2241 | #if (DEBUG_SPINLOCKS < 1) | |
2242 | ||
2243 | +#ifndef CONFIG_PREEMPT | |
2244 | #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic) | |
2245 | #define ATOMIC_DEC_AND_LOCK | |
2246 | +#endif | |
2247 | ||
2248 | /* | |
2249 | * Your basic spinlocks, allowing only a single CPU anywhere | |
2250 | @@ -80,11 +83,11 @@ | |
2251 | #endif | |
2252 | ||
2253 | #define spin_lock_init(lock) do { } while(0) | |
2254 | -#define spin_lock(lock) (void)(lock) /* Not "unused variable". */ | |
2255 | +#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */ | |
2256 | #define spin_is_locked(lock) (0) | |
2257 | -#define spin_trylock(lock) ({1; }) | |
2258 | +#define _raw_spin_trylock(lock) ({1; }) | |
2259 | #define spin_unlock_wait(lock) do { } while(0) | |
2260 | -#define spin_unlock(lock) do { } while(0) | |
2261 | +#define _raw_spin_unlock(lock) do { } while(0) | |
2262 | ||
2263 | #elif (DEBUG_SPINLOCKS < 2) | |
2264 | ||
2265 | @@ -144,13 +147,78 @@ | |
2266 | #endif | |
2267 | ||
2268 | #define rwlock_init(lock) do { } while(0) | |
2269 | -#define read_lock(lock) (void)(lock) /* Not "unused variable". */ | |
2270 | -#define read_unlock(lock) do { } while(0) | |
2271 | -#define write_lock(lock) (void)(lock) /* Not "unused variable". */ | |
2272 | -#define write_unlock(lock) do { } while(0) | |
2273 | +#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */ | |
2274 | +#define _raw_read_unlock(lock) do { } while(0) | |
2275 | +#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */ | |
2276 | +#define _raw_write_unlock(lock) do { } while(0) | |
2277 | ||
2278 | #endif /* !SMP */ | |
2279 | ||
2280 | +#ifdef CONFIG_PREEMPT | |
2281 | + | |
2282 | +#define preempt_get_count() (current->preempt_count) | |
2283 | +#define preempt_is_disabled() (preempt_get_count() != 0) | |
2284 | + | |
2285 | +#define preempt_disable() \ | |
2286 | +do { \ | |
2287 | + ++current->preempt_count; \ | |
2288 | + barrier(); \ | |
2289 | +} while (0) | |
2290 | + | |
2291 | +#define preempt_enable_no_resched() \ | |
2292 | +do { \ | |
2293 | + --current->preempt_count; \ | |
2294 | + barrier(); \ | |
2295 | +} while (0) | |
2296 | + | |
2297 | +#define preempt_enable() \ | |
2298 | +do { \ | |
2299 | + --current->preempt_count; \ | |
2300 | + barrier(); \ | |
2301 | + if (unlikely(current->preempt_count < current->need_resched)) \ | |
2302 | + preempt_schedule(); \ | |
2303 | +} while (0) | |
2304 | + | |
2305 | +#define spin_lock(lock) \ | |
2306 | +do { \ | |
2307 | + preempt_disable(); \ | |
2308 | + _raw_spin_lock(lock); \ | |
2309 | +} while(0) | |
2310 | + | |
2311 | +#define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ | |
2312 | + 1 : ({preempt_enable(); 0;});}) | |
2313 | +#define spin_unlock(lock) \ | |
2314 | +do { \ | |
2315 | + _raw_spin_unlock(lock); \ | |
2316 | + preempt_enable(); \ | |
2317 | +} while (0) | |
2318 | + | |
2319 | +#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);}) | |
2320 | +#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();}) | |
2321 | +#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);}) | |
2322 | +#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();}) | |
2323 | +#define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \ | |
2324 | + 1 : ({preempt_enable(); 0;});}) | |
2325 | + | |
2326 | +#else | |
2327 | + | |
2328 | +#define preempt_get_count() (0) | |
2329 | +#define preempt_is_disabled() (1) | |
2330 | +#define preempt_disable() do { } while (0) | |
2331 | +#define preempt_enable_no_resched() do {} while(0) | |
2332 | +#define preempt_enable() do { } while (0) | |
2333 | + | |
2334 | +#define spin_lock(lock) _raw_spin_lock(lock) | |
2335 | +#define spin_trylock(lock) _raw_spin_trylock(lock) | |
2336 | +#define spin_unlock(lock) _raw_spin_unlock(lock) | |
2337 | + | |
2338 | +#define read_lock(lock) _raw_read_lock(lock) | |
2339 | +#define read_unlock(lock) _raw_read_unlock(lock) | |
2340 | +#define write_lock(lock) _raw_write_lock(lock) | |
2341 | +#define write_unlock(lock) _raw_write_unlock(lock) | |
2342 | +#define write_trylock(lock) _raw_write_trylock(lock) | |
2343 | +#endif | |
2344 | + | |
2345 | /* "lock on reference count zero" */ | |
2346 | #ifndef ATOMIC_DEC_AND_LOCK | |
2347 | #include <asm/atomic.h> | |
2348 | diff -urN linux-2.4.20/include/linux/tqueue.h linux/include/linux/tqueue.h | |
2349 | --- linux-2.4.20/include/linux/tqueue.h 2001-11-22 14:46:19.000000000 -0500 | |
2350 | +++ linux/include/linux/tqueue.h 2002-12-11 02:34:48.000000000 -0500 | |
2351 | @@ -94,6 +94,22 @@ | |
2352 | extern spinlock_t tqueue_lock; | |
2353 | ||
2354 | /* | |
2355 | + * Call all "bottom halfs" on a given list. | |
2356 | + */ | |
2357 | + | |
2358 | +extern void __run_task_queue(task_queue *list); | |
2359 | + | |
2360 | +static inline void run_task_queue(task_queue *list) | |
2361 | +{ | |
2362 | + if (TQ_ACTIVE(*list)) | |
2363 | + __run_task_queue(list); | |
2364 | +} | |
2365 | + | |
2366 | +#endif /* _LINUX_TQUEUE_H */ | |
2367 | + | |
2368 | +#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED) | |
2369 | +#define _LINUX_TQUEUE_H_INLINES | |
2370 | +/* | |
2371 | * Queue a task on a tq. Return non-zero if it was successfully | |
2372 | * added. | |
2373 | */ | |
2374 | @@ -109,17 +125,4 @@ | |
2375 | } | |
2376 | return ret; | |
2377 | } | |
2378 | - | |
2379 | -/* | |
2380 | - * Call all "bottom halfs" on a given list. | |
2381 | - */ | |
2382 | - | |
2383 | -extern void __run_task_queue(task_queue *list); | |
2384 | - | |
2385 | -static inline void run_task_queue(task_queue *list) | |
2386 | -{ | |
2387 | - if (TQ_ACTIVE(*list)) | |
2388 | - __run_task_queue(list); | |
2389 | -} | |
2390 | - | |
2391 | -#endif /* _LINUX_TQUEUE_H */ | |
2392 | +#endif | |
2393 | diff -urN linux-2.4.20/kernel/exit.c linux/kernel/exit.c | |
2394 | --- linux-2.4.20/kernel/exit.c 2002-11-28 18:53:15.000000000 -0500 | |
2395 | +++ linux/kernel/exit.c 2002-12-11 02:34:48.000000000 -0500 | |
2396 | @@ -313,8 +313,8 @@ | |
2397 | /* more a memory barrier than a real lock */ | |
2398 | task_lock(tsk); | |
2399 | tsk->mm = NULL; | |
2400 | - task_unlock(tsk); | |
2401 | enter_lazy_tlb(mm, current, smp_processor_id()); | |
2402 | + task_unlock(tsk); | |
2403 | mmput(mm); | |
2404 | } | |
2405 | } | |
2406 | @@ -435,6 +435,11 @@ | |
2407 | tsk->flags |= PF_EXITING; | |
2408 | del_timer_sync(&tsk->real_timer); | |
2409 | ||
2410 | + if (unlikely(preempt_get_count())) | |
2411 | + printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | |
2412 | + current->comm, current->pid, | |
2413 | + preempt_get_count()); | |
2414 | + | |
2415 | fake_volatile: | |
2416 | #ifdef CONFIG_BSD_PROCESS_ACCT | |
2417 | acct_process(code); | |
2418 | diff -urN linux-2.4.20/kernel/fork.c linux/kernel/fork.c | |
2419 | --- linux-2.4.20/kernel/fork.c 2002-11-28 18:53:15.000000000 -0500 | |
2420 | +++ linux/kernel/fork.c 2002-12-11 02:34:48.000000000 -0500 | |
2421 | @@ -629,6 +629,13 @@ | |
2422 | if (p->binfmt && p->binfmt->module) | |
2423 | __MOD_INC_USE_COUNT(p->binfmt->module); | |
2424 | ||
2425 | +#ifdef CONFIG_PREEMPT | |
2426 | + /* | |
2427 | + * Continue with preemption disabled as part of the context | |
2428 | + * switch, so start with preempt_count set to 1. | |
2429 | + */ | |
2430 | + p->preempt_count = 1; | |
2431 | +#endif | |
2432 | p->did_exec = 0; | |
2433 | p->swappable = 0; | |
2434 | p->state = TASK_UNINTERRUPTIBLE; | |
2435 | diff -urN linux-2.4.20/kernel/ksyms.c linux/kernel/ksyms.c | |
2436 | --- linux-2.4.20/kernel/ksyms.c 2002-11-28 18:53:15.000000000 -0500 | |
2437 | +++ linux/kernel/ksyms.c 2002-12-11 02:34:48.000000000 -0500 | |
2438 | @@ -450,6 +450,9 @@ | |
2439 | EXPORT_SYMBOL(interruptible_sleep_on); | |
2440 | EXPORT_SYMBOL(interruptible_sleep_on_timeout); | |
2441 | EXPORT_SYMBOL(schedule); | |
2442 | +#ifdef CONFIG_PREEMPT | |
2443 | +EXPORT_SYMBOL(preempt_schedule); | |
2444 | +#endif | |
2445 | EXPORT_SYMBOL(schedule_timeout); | |
2446 | EXPORT_SYMBOL(yield); | |
2447 | EXPORT_SYMBOL(__cond_resched); | |
2448 | diff -urN linux-2.4.20/kernel/sched.c linux/kernel/sched.c | |
2449 | --- linux-2.4.20/kernel/sched.c 2002-11-28 18:53:15.000000000 -0500 | |
2450 | +++ linux/kernel/sched.c 2002-12-11 02:34:48.000000000 -0500 | |
2451 | @@ -489,7 +489,7 @@ | |
2452 | task_lock(prev); | |
2453 | task_release_cpu(prev); | |
2454 | mb(); | |
2455 | - if (prev->state == TASK_RUNNING) | |
2456 | + if (task_on_runqueue(prev)) | |
2457 | goto needs_resched; | |
2458 | ||
2459 | out_unlock: | |
2460 | @@ -519,7 +519,7 @@ | |
2461 | goto out_unlock; | |
2462 | ||
2463 | spin_lock_irqsave(&runqueue_lock, flags); | |
2464 | - if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev)) | |
2465 | + if (task_on_runqueue(prev) && !task_has_cpu(prev)) | |
2466 | reschedule_idle(prev); | |
2467 | spin_unlock_irqrestore(&runqueue_lock, flags); | |
2468 | goto out_unlock; | |
2469 | @@ -532,6 +532,7 @@ | |
2470 | asmlinkage void schedule_tail(struct task_struct *prev) | |
2471 | { | |
2472 | __schedule_tail(prev); | |
2473 | + preempt_enable(); | |
2474 | } | |
2475 | ||
2476 | /* | |
2477 | @@ -551,9 +552,10 @@ | |
2478 | struct list_head *tmp; | |
2479 | int this_cpu, c; | |
2480 | ||
2481 | - | |
2482 | spin_lock_prefetch(&runqueue_lock); | |
2483 | ||
2484 | + preempt_disable(); | |
2485 | + | |
2486 | BUG_ON(!current->active_mm); | |
2487 | need_resched_back: | |
2488 | prev = current; | |
2489 | @@ -581,6 +583,14 @@ | |
2490 | move_last_runqueue(prev); | |
2491 | } | |
2492 | ||
2493 | +#ifdef CONFIG_PREEMPT | |
2494 | + /* | |
2495 | + * entering from preempt_schedule, off a kernel preemption, | |
2496 | + * go straight to picking the next task. | |
2497 | + */ | |
2498 | + if (unlikely(preempt_get_count() & PREEMPT_ACTIVE)) | |
2499 | + goto treat_like_run; | |
2500 | +#endif | |
2501 | switch (prev->state) { | |
2502 | case TASK_INTERRUPTIBLE: | |
2503 | if (signal_pending(prev)) { | |
2504 | @@ -591,6 +601,9 @@ | |
2505 | del_from_runqueue(prev); | |
2506 | case TASK_RUNNING:; | |
2507 | } | |
2508 | +#ifdef CONFIG_PREEMPT | |
2509 | + treat_like_run: | |
2510 | +#endif | |
2511 | prev->need_resched = 0; | |
2512 | ||
2513 | /* | |
2514 | @@ -699,9 +712,31 @@ | |
2515 | reacquire_kernel_lock(current); | |
2516 | if (current->need_resched) | |
2517 | goto need_resched_back; | |
2518 | + preempt_enable_no_resched(); | |
2519 | return; | |
2520 | } | |
2521 | ||
2522 | +#ifdef CONFIG_PREEMPT | |
2523 | +/* | |
2524 | + * this is is the entry point to schedule() from in-kernel preemption | |
2525 | + */ | |
2526 | +asmlinkage void preempt_schedule(void) | |
2527 | +{ | |
2528 | + if (unlikely(irqs_disabled())) | |
2529 | + return; | |
2530 | + | |
2531 | +need_resched: | |
2532 | + current->preempt_count += PREEMPT_ACTIVE; | |
2533 | + schedule(); | |
2534 | + current->preempt_count -= PREEMPT_ACTIVE; | |
2535 | + | |
2536 | + /* we could miss a preemption opportunity between schedule and now */ | |
2537 | + barrier(); | |
2538 | + if (unlikely(current->need_resched)) | |
2539 | + goto need_resched; | |
2540 | +} | |
2541 | +#endif /* CONFIG_PREEMPT */ | |
2542 | + | |
2543 | /* | |
2544 | * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything | |
2545 | * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the | |
2546 | @@ -1327,6 +1362,13 @@ | |
2547 | sched_data->curr = current; | |
2548 | sched_data->last_schedule = get_cycles(); | |
2549 | clear_bit(current->processor, &wait_init_idle); | |
2550 | +#ifdef CONFIG_PREEMPT | |
2551 | + /* | |
2552 | + * fix up the preempt_count for non-CPU0 idle threads | |
2553 | + */ | |
2554 | + if (current->processor) | |
2555 | + current->preempt_count = 0; | |
2556 | +#endif | |
2557 | } | |
2558 | ||
2559 | extern void init_timervecs (void); | |
2560 | diff -urN linux-2.4.20/lib/dec_and_lock.c linux/lib/dec_and_lock.c | |
2561 | --- linux-2.4.20/lib/dec_and_lock.c 2001-10-03 12:11:26.000000000 -0400 | |
2562 | +++ linux/lib/dec_and_lock.c 2002-12-11 02:34:48.000000000 -0500 | |
2563 | @@ -1,5 +1,6 @@ | |
2564 | #include <linux/module.h> | |
2565 | #include <linux/spinlock.h> | |
2566 | +#include <linux/sched.h> | |
2567 | #include <asm/atomic.h> | |
2568 | ||
2569 | /* | |
2570 | diff -urN linux-2.4.20/MAINTAINERS linux/MAINTAINERS | |
2571 | --- linux-2.4.20/MAINTAINERS 2002-11-28 18:53:08.000000000 -0500 | |
2572 | +++ linux/MAINTAINERS 2002-12-11 02:34:48.000000000 -0500 | |
2573 | @@ -1310,6 +1310,14 @@ | |
2574 | M: mostrows@styx.uwaterloo.ca | |
2575 | S: Maintained | |
2576 | ||
2577 | +PREEMPTIBLE KERNEL | |
2578 | +P: Robert M. Love | |
2579 | +M: rml@tech9.net | |
2580 | +L: linux-kernel@vger.kernel.org | |
2581 | +L: kpreempt-tech@lists.sourceforge.net | |
2582 | +W: http://tech9.net/rml/linux | |
2583 | +S: Supported | |
2584 | + | |
2585 | PROMISE DC4030 CACHING DISK CONTROLLER DRIVER | |
2586 | P: Peter Denison | |
2587 | M: promise@pnd-pc.demon.co.uk | |
2588 | diff -urN linux-2.4.20/mm/slab.c linux/mm/slab.c | |
2589 | --- linux-2.4.20/mm/slab.c 2002-11-28 18:53:15.000000000 -0500 | |
2590 | +++ linux/mm/slab.c 2002-12-11 02:34:48.000000000 -0500 | |
2591 | @@ -49,7 +49,8 @@ | |
2592 | * constructors and destructors are called without any locking. | |
2593 | * Several members in kmem_cache_t and slab_t never change, they | |
2594 | * are accessed without any locking. | |
2595 | - * The per-cpu arrays are never accessed from the wrong cpu, no locking. | |
2596 | + * The per-cpu arrays are never accessed from the wrong cpu, no locking, | |
2597 | + * and local interrupts are disabled so slab code is preempt-safe. | |
2598 | * The non-constant members are protected with a per-cache irq spinlock. | |
2599 | * | |
2600 | * Further notes from the original documentation: | |
2601 | diff -urN linux-2.4.20/net/core/dev.c linux/net/core/dev.c | |
2602 | --- linux-2.4.20/net/core/dev.c 2002-11-28 18:53:15.000000000 -0500 | |
2603 | +++ linux/net/core/dev.c 2002-12-11 02:34:48.000000000 -0500 | |
2604 | @@ -1049,9 +1049,15 @@ | |
2605 | int cpu = smp_processor_id(); | |
2606 | ||
2607 | if (dev->xmit_lock_owner != cpu) { | |
2608 | + /* | |
2609 | + * The spin_lock effectivly does a preempt lock, but | |
2610 | + * we are about to drop that... | |
2611 | + */ | |
2612 | + preempt_disable(); | |
2613 | spin_unlock(&dev->queue_lock); | |
2614 | spin_lock(&dev->xmit_lock); | |
2615 | dev->xmit_lock_owner = cpu; | |
2616 | + preempt_enable(); | |
2617 | ||
2618 | if (!netif_queue_stopped(dev)) { | |
2619 | if (netdev_nit) | |
2620 | diff -urN linux-2.4.20/net/core/skbuff.c linux/net/core/skbuff.c | |
2621 | --- linux-2.4.20/net/core/skbuff.c 2002-08-02 20:39:46.000000000 -0400 | |
2622 | +++ linux/net/core/skbuff.c 2002-12-11 02:34:48.000000000 -0500 | |
2623 | @@ -111,33 +111,37 @@ | |
2624 | ||
2625 | static __inline__ struct sk_buff *skb_head_from_pool(void) | |
2626 | { | |
2627 | - struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list; | |
2628 | + struct sk_buff_head *list; | |
2629 | + struct sk_buff *skb = NULL; | |
2630 | + unsigned long flags; | |
2631 | ||
2632 | - if (skb_queue_len(list)) { | |
2633 | - struct sk_buff *skb; | |
2634 | - unsigned long flags; | |
2635 | + local_irq_save(flags); | |
2636 | ||
2637 | - local_irq_save(flags); | |
2638 | + list = &skb_head_pool[smp_processor_id()].list; | |
2639 | + | |
2640 | + if (skb_queue_len(list)) | |
2641 | skb = __skb_dequeue(list); | |
2642 | - local_irq_restore(flags); | |
2643 | - return skb; | |
2644 | - } | |
2645 | - return NULL; | |
2646 | + | |
2647 | + local_irq_restore(flags); | |
2648 | + return skb; | |
2649 | } | |
2650 | ||
2651 | static __inline__ void skb_head_to_pool(struct sk_buff *skb) | |
2652 | { | |
2653 | - struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list; | |
2654 | + struct sk_buff_head *list; | |
2655 | + unsigned long flags; | |
2656 | ||
2657 | - if (skb_queue_len(list) < sysctl_hot_list_len) { | |
2658 | - unsigned long flags; | |
2659 | + local_irq_save(flags); | |
2660 | + list = &skb_head_pool[smp_processor_id()].list; | |
2661 | ||
2662 | - local_irq_save(flags); | |
2663 | + if (skb_queue_len(list) < sysctl_hot_list_len) { | |
2664 | __skb_queue_head(list, skb); | |
2665 | local_irq_restore(flags); | |
2666 | ||
2667 | return; | |
2668 | } | |
2669 | + | |
2670 | + local_irq_restore(flags); | |
2671 | kmem_cache_free(skbuff_head_cache, skb); | |
2672 | } | |
2673 | ||
2674 | diff -urN linux-2.4.20/net/socket.c linux/net/socket.c | |
2675 | --- linux-2.4.20/net/socket.c 2002-11-28 18:53:16.000000000 -0500 | |
2676 | +++ linux/net/socket.c 2002-12-11 02:34:48.000000000 -0500 | |
2677 | @@ -132,7 +132,7 @@ | |
2678 | ||
2679 | static struct net_proto_family *net_families[NPROTO]; | |
2680 | ||
2681 | -#ifdef CONFIG_SMP | |
2682 | +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT) | |
2683 | static atomic_t net_family_lockct = ATOMIC_INIT(0); | |
2684 | static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED; | |
2685 | ||
2686 | diff -urN linux-2.4.20/net/sunrpc/pmap_clnt.c linux/net/sunrpc/pmap_clnt.c | |
2687 | --- linux-2.4.20/net/sunrpc/pmap_clnt.c 2002-08-02 20:39:46.000000000 -0400 | |
2688 | +++ linux/net/sunrpc/pmap_clnt.c 2002-12-11 02:34:48.000000000 -0500 | |
2689 | @@ -12,6 +12,7 @@ | |
2690 | #include <linux/config.h> | |
2691 | #include <linux/types.h> | |
2692 | #include <linux/socket.h> | |
2693 | +#include <linux/sched.h> | |
2694 | #include <linux/kernel.h> | |
2695 | #include <linux/errno.h> | |
2696 | #include <linux/uio.h> |