]> git.pld-linux.org Git - packages/kernel.git/blame - preempt-kernel-rml-2.4.20-3.patch
- obsolete
[packages/kernel.git] / preempt-kernel-rml-2.4.20-3.patch
CommitLineData
fd994d3f 1
2 CREDITS | 2
3 Documentation/Configure.help | 11 ++++
4 Documentation/preempt-locking.txt | 104 ++++++++++++++++++++++++++++++++++++++
5 MAINTAINERS | 8 ++
6 arch/alpha/kernel/process.c | 1
7 arch/arm/config.in | 2
8 arch/arm/kernel/entry-armv.S | 40 ++++++++++++++
9 arch/arm/tools/getconstants.c | 6 ++
10 arch/i386/config.in | 8 ++
11 arch/i386/kernel/cpuid.c | 4 +
12 arch/i386/kernel/entry.S | 49 +++++++++++++++++
13 arch/i386/kernel/i387.c | 3 +
14 arch/i386/kernel/ioport.c | 5 +
15 arch/i386/kernel/irq.c | 15 ++++-
16 arch/i386/kernel/ldt.c | 2
17 arch/i386/kernel/microcode.c | 3 +
18 arch/i386/kernel/msr.c | 15 +++--
19 arch/i386/kernel/mtrr.c | 6 ++
20 arch/i386/kernel/smp.c | 29 ++++++++--
21 arch/i386/kernel/traps.c | 2
22 arch/i386/lib/dec_and_lock.c | 1
23 arch/i386/mm/init.c | 2
24 arch/mips/config-shared.in | 1
25 arch/mips/kernel/i8259.c | 1
26 arch/mips/kernel/irq.c | 29 ++++++++++
27 arch/mips/mm/extable.c | 1
28 arch/ppc/config.in | 2
29 arch/ppc/kernel/entry.S | 40 ++++++++++++++
30 arch/ppc/kernel/irq.c | 52 ++++++++++++++++---
31 arch/ppc/kernel/mk_defs.c | 3 +
32 arch/ppc/kernel/open_pic.c | 9 ++-
33 arch/ppc/kernel/setup.c | 14 +++++
34 arch/ppc/kernel/temp.c | 8 ++
35 arch/ppc/lib/dec_and_lock.c | 1
36 arch/ppc/mm/tlb.c | 16 +++++
37 arch/sh/config.in | 1
38 arch/sh/kernel/entry.S | 104 +++++++++++++++++++++++++++++++++++---
39 arch/sh/kernel/irq.c | 17 ++++++
40 drivers/ieee1394/csr.c | 1
41 drivers/sound/sound_core.c | 1
42 fs/adfs/map.c | 1
43 fs/exec.c | 2
44 fs/fat/cache.c | 1
45 fs/nls/nls_base.c | 1
46 include/asm-arm/dma.h | 1
47 include/asm-arm/hardirq.h | 1
48 include/asm-arm/pgalloc.h | 8 ++
49 include/asm-arm/smplock.h | 11 ++++
50 include/asm-arm/softirq.h | 8 +-
51 include/asm-arm/system.h | 7 ++
52 include/asm-i386/desc.h | 5 +
53 include/asm-i386/hardirq.h | 14 +++--
54 include/asm-i386/highmem.h | 7 ++
55 include/asm-i386/hw_irq.h | 19 +++++-
56 include/asm-i386/i387.h | 3 -
57 include/asm-i386/pgalloc.h | 12 ++++
58 include/asm-i386/smplock.h | 14 +++++
59 include/asm-i386/softirq.h | 11 ++--
60 include/asm-i386/spinlock.h | 18 +++---
61 include/asm-i386/system.h | 7 ++
62 include/asm-mips/smplock.h | 15 +++++
63 include/asm-mips/softirq.h | 3 +
64 include/asm-mips/system.h | 14 +++++
65 include/asm-ppc/dma.h | 1
66 include/asm-ppc/hardirq.h | 9 ++-
67 include/asm-ppc/highmem.h | 6 +-
68 include/asm-ppc/hw_irq.h | 6 ++
69 include/asm-ppc/mmu_context.h | 4 +
70 include/asm-ppc/pgalloc.h | 9 +++
71 include/asm-ppc/smplock.h | 14 +++++
72 include/asm-ppc/softirq.h | 13 ++++
73 include/asm-sh/hardirq.h | 2
74 include/asm-sh/smplock.h | 85 ++++++++++++++++++++++++++++---
75 include/asm-sh/softirq.h | 3 +
76 include/asm-sh/system.h | 13 ++++
77 include/linux/brlock.h | 10 +--
78 include/linux/dcache.h | 56 +++++++++++---------
79 include/linux/fs_struct.h | 13 +++-
80 include/linux/sched.h | 12 ++++
81 include/linux/smp_lock.h | 2
82 include/linux/spinlock.h | 82 +++++++++++++++++++++++++++--
83 include/linux/tqueue.h | 31 ++++++-----
84 kernel/exit.c | 9 ++-
85 kernel/fork.c | 7 ++
86 kernel/ksyms.c | 3 +
87 kernel/sched.c | 48 ++++++++++++++++-
88 kernel/softirq.c | 13 +++-
89 lib/dec_and_lock.c | 1
90 mm/slab.c | 5 +
91 net/core/dev.c | 11 +++-
92 net/core/skbuff.c | 30 ++++++----
93 net/socket.c | 2
94 net/sunrpc/pmap_clnt.c | 1
95 93 files changed, 1138 insertions(+), 165 deletions(-)
96
97
98diff -urN linux-2.4.20/arch/alpha/kernel/process.c linux/arch/alpha/kernel/process.c
99--- linux-2.4.20/arch/alpha/kernel/process.c 2001-09-30 15:26:08.000000000 -0400
100+++ linux/arch/alpha/kernel/process.c 2003-04-11 17:03:05.182081640 -0400
101@@ -186,6 +186,7 @@
102 args.mode = mode;
103 args.restart_cmd = restart_cmd;
104 #ifdef CONFIG_SMP
105+ preempt_disable();
106 smp_call_function(common_shutdown_1, &args, 1, 0);
107 #endif
108 common_shutdown_1(&args);
109diff -urN linux-2.4.20/arch/arm/config.in linux/arch/arm/config.in
110--- linux-2.4.20/arch/arm/config.in 2002-11-28 18:53:09.000000000 -0500
111+++ linux/arch/arm/config.in 2003-04-11 17:02:55.318581120 -0400
112@@ -372,7 +372,7 @@
113 else
114 define_bool CONFIG_DISCONTIGMEM n
115 fi
116-
117+dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_CPU_32
118 endmenu
119
120 mainmenu_option next_comment
121diff -urN linux-2.4.20/arch/arm/kernel/entry-armv.S linux/arch/arm/kernel/entry-armv.S
122--- linux-2.4.20/arch/arm/kernel/entry-armv.S 2002-08-02 20:39:42.000000000 -0400
123+++ linux/arch/arm/kernel/entry-armv.S 2003-04-11 17:02:55.393569720 -0400
124@@ -697,6 +697,12 @@
125 add r4, sp, #S_SP
126 mov r6, lr
127 stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
128+#ifdef CONFIG_PREEMPT
129+ get_current_task r9
130+ ldr r8, [r9, #TSK_PREEMPT]
131+ add r8, r8, #1
132+ str r8, [r9, #TSK_PREEMPT]
133+#endif
134 1: get_irqnr_and_base r0, r6, r5, lr
135 movne r1, sp
136 @
137@@ -704,6 +710,25 @@
138 @
139 adrsvc ne, lr, 1b
140 bne do_IRQ
141+#ifdef CONFIG_PREEMPT
142+2: ldr r8, [r9, #TSK_PREEMPT]
143+ subs r8, r8, #1
144+ bne 3f
145+ ldr r7, [r9, #TSK_NEED_RESCHED]
146+ teq r7, #0
147+ beq 3f
148+ ldr r6, .LCirqstat
149+ ldr r0, [r6, #IRQSTAT_BH_COUNT]
150+ teq r0, #0
151+ bne 3f
152+ mov r0, #MODE_SVC
153+ msr cpsr_c, r0 @ enable interrupts
154+ bl SYMBOL_NAME(preempt_schedule)
155+ mov r0, #I_BIT | MODE_SVC
156+ msr cpsr_c, r0 @ disable interrupts
157+ b 2b
158+3: str r8, [r9, #TSK_PREEMPT]
159+#endif
160 ldr r0, [sp, #S_PSR] @ irqs are already disabled
161 msr spsr, r0
162 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
163@@ -761,6 +786,9 @@
164 .LCprocfns: .word SYMBOL_NAME(processor)
165 #endif
166 .LCfp: .word SYMBOL_NAME(fp_enter)
167+#ifdef CONFIG_PREEMPT
168+.LCirqstat: .word SYMBOL_NAME(irq_stat)
169+#endif
170
171 irq_prio_table
172
173@@ -801,6 +829,12 @@
174 stmdb r8, {sp, lr}^
175 alignment_trap r4, r7, __temp_irq
176 zero_fp
177+ get_current_task tsk
178+#ifdef CONFIG_PREEMPT
179+ ldr r0, [tsk, #TSK_PREEMPT]
180+ add r0, r0, #1
181+ str r0, [tsk, #TSK_PREEMPT]
182+#endif
183 1: get_irqnr_and_base r0, r6, r5, lr
184 movne r1, sp
185 adrsvc ne, lr, 1b
186@@ -808,8 +842,12 @@
187 @ routine called with r0 = irq number, r1 = struct pt_regs *
188 @
189 bne do_IRQ
190+#ifdef CONFIG_PREEMPT
191+ ldr r0, [tsk, #TSK_PREEMPT]
192+ sub r0, r0, #1
193+ str r0, [tsk, #TSK_PREEMPT]
194+#endif
195 mov why, #0
196- get_current_task tsk
197 b ret_to_user
198
199 .align 5
200diff -urN linux-2.4.20/arch/arm/tools/getconstants.c linux/arch/arm/tools/getconstants.c
201--- linux-2.4.20/arch/arm/tools/getconstants.c 2001-10-11 12:04:57.000000000 -0400
202+++ linux/arch/arm/tools/getconstants.c 2003-04-11 17:02:55.394569568 -0400
203@@ -13,6 +13,7 @@
204
205 #include <asm/pgtable.h>
206 #include <asm/uaccess.h>
207+#include <asm/hardirq.h>
208
209 /*
210 * Make sure that the compiler and target are compatible.
211@@ -39,6 +40,11 @@
212 DEFN("TSS_SAVE", OFF_TSK(thread.save));
213 DEFN("TSS_FPESAVE", OFF_TSK(thread.fpstate.soft.save));
214
215+#ifdef CONFIG_PREEMPT
216+DEFN("TSK_PREEMPT", OFF_TSK(preempt_count));
217+DEFN("IRQSTAT_BH_COUNT", (unsigned long)&(((irq_cpustat_t *)0)->__local_bh_count));
218+#endif
219+
220 #ifdef CONFIG_CPU_32
221 DEFN("TSS_DOMAIN", OFF_TSK(thread.domain));
222
223diff -urN linux-2.4.20/arch/i386/config.in linux/arch/i386/config.in
224--- linux-2.4.20/arch/i386/config.in 2002-11-28 18:53:09.000000000 -0500
225+++ linux/arch/i386/config.in 2003-04-11 17:02:55.395569416 -0400
226@@ -206,6 +206,7 @@
227 bool 'Math emulation' CONFIG_MATH_EMULATION
228 bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
229 bool 'Symmetric multi-processing support' CONFIG_SMP
230+bool 'Preemptible Kernel' CONFIG_PREEMPT
231 if [ "$CONFIG_SMP" != "y" ]; then
232 bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
233 dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
234@@ -224,9 +225,12 @@
235 define_bool CONFIG_X86_TSC y
236 fi
237
238-if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
239- define_bool CONFIG_HAVE_DEC_LOCK y
240+if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then
241+ if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then
242+ define_bool CONFIG_HAVE_DEC_LOCK y
243+ fi
244 fi
245+
246 endmenu
247
248 mainmenu_option next_comment
249diff -urN linux-2.4.20/arch/i386/kernel/cpuid.c linux/arch/i386/kernel/cpuid.c
250--- linux-2.4.20/arch/i386/kernel/cpuid.c 2001-10-11 12:04:57.000000000 -0400
251+++ linux/arch/i386/kernel/cpuid.c 2003-04-11 17:03:05.217076320 -0400
252@@ -60,7 +60,8 @@
253 static inline void do_cpuid(int cpu, u32 reg, u32 *data)
254 {
255 struct cpuid_command cmd;
256-
257+
258+ preempt_disable();
259 if ( cpu == smp_processor_id() ) {
260 cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
261 } else {
262@@ -70,6 +71,7 @@
263
264 smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1);
265 }
266+ preempt_enable();
267 }
268 #else /* ! CONFIG_SMP */
269
270diff -urN linux-2.4.20/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
271--- linux-2.4.20/arch/i386/kernel/entry.S 2002-11-28 18:53:09.000000000 -0500
272+++ linux/arch/i386/kernel/entry.S 2003-04-11 17:02:55.397569112 -0400
273@@ -73,7 +73,7 @@
274 * these are offsets into the task-struct.
275 */
276 state = 0
277-flags = 4
278+preempt_count = 4
279 sigpending = 8
280 addr_limit = 12
281 exec_domain = 16
282@@ -81,8 +81,28 @@
283 tsk_ptrace = 24
284 processor = 52
285
286+/* These are offsets into the irq_stat structure
287+ * There is one per cpu and it is aligned to 32
288+ * byte boundry (we put that here as a shift count)
289+ */
290+irq_array_shift = CONFIG_X86_L1_CACHE_SHIFT
291+
292+irq_stat_local_irq_count = 4
293+irq_stat_local_bh_count = 8
294+
295 ENOSYS = 38
296
297+#ifdef CONFIG_SMP
298+#define GET_CPU_INDX movl processor(%ebx),%eax; \
299+ shll $irq_array_shift,%eax
300+#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \
301+ GET_CPU_INDX
302+#define CPU_INDX (,%eax)
303+#else
304+#define GET_CPU_INDX
305+#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx)
306+#define CPU_INDX
307+#endif
308
309 #define SAVE_ALL \
310 cld; \
311@@ -255,12 +275,30 @@
312 ALIGN
313 ENTRY(ret_from_intr)
314 GET_CURRENT(%ebx)
315+#ifdef CONFIG_PREEMPT
316+ cli
317+ decl preempt_count(%ebx)
318+#endif
319 ret_from_exception:
320 movl EFLAGS(%esp),%eax # mix EFLAGS and CS
321 movb CS(%esp),%al
322 testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
323 jne ret_from_sys_call
324+#ifdef CONFIG_PREEMPT
325+ cmpl $0,preempt_count(%ebx)
326+ jnz restore_all
327+ cmpl $0,need_resched(%ebx)
328+ jz restore_all
329+ movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
330+ addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
331+ jnz restore_all
332+ incl preempt_count(%ebx)
333+ sti
334+ call SYMBOL_NAME(preempt_schedule)
335+ jmp ret_from_intr
336+#else
337 jmp restore_all
338+#endif
339
340 ALIGN
341 reschedule:
342@@ -297,6 +335,9 @@
343 GET_CURRENT(%ebx)
344 call *%edi
345 addl $8,%esp
346+#ifdef CONFIG_PREEMPT
347+ cli
348+#endif
349 jmp ret_from_exception
350
351 ENTRY(coprocessor_error)
352@@ -316,12 +357,18 @@
353 movl %cr0,%eax
354 testl $0x4,%eax # EM (math emulation bit)
355 jne device_not_available_emulate
356+#ifdef CONFIG_PREEMPT
357+ cli
358+#endif
359 call SYMBOL_NAME(math_state_restore)
360 jmp ret_from_exception
361 device_not_available_emulate:
362 pushl $0 # temporary storage for ORIG_EIP
363 call SYMBOL_NAME(math_emulate)
364 addl $4,%esp
365+#ifdef CONFIG_PREEMPT
366+ cli
367+#endif
368 jmp ret_from_exception
369
370 ENTRY(debug)
371diff -urN linux-2.4.20/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c
372--- linux-2.4.20/arch/i386/kernel/i387.c 2002-08-02 20:39:42.000000000 -0400
373+++ linux/arch/i386/kernel/i387.c 2003-04-11 17:02:55.398568960 -0400
374@@ -10,6 +10,7 @@
375
376 #include <linux/config.h>
377 #include <linux/sched.h>
378+#include <linux/spinlock.h>
379 #include <linux/init.h>
380 #include <asm/processor.h>
381 #include <asm/i387.h>
382@@ -89,6 +90,8 @@
383 {
384 struct task_struct *tsk = current;
385
386+ preempt_disable();
387+
388 if (tsk->flags & PF_USEDFPU) {
389 __save_init_fpu(tsk);
390 return;
391diff -urN linux-2.4.20/arch/i386/kernel/ioport.c linux/arch/i386/kernel/ioport.c
392--- linux-2.4.20/arch/i386/kernel/ioport.c 1999-07-19 18:22:48.000000000 -0400
393+++ linux/arch/i386/kernel/ioport.c 2003-04-11 17:03:05.218076168 -0400
394@@ -55,7 +55,7 @@
395 asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
396 {
397 struct thread_struct * t = &current->thread;
398- struct tss_struct * tss = init_tss + smp_processor_id();
399+ struct tss_struct * tss;
400
401 if ((from + num <= from) || (from + num > IO_BITMAP_SIZE*32))
402 return -EINVAL;
403@@ -66,6 +66,8 @@
404 * IO bitmap up. ioperm() is much less timing critical than clone(),
405 * this is why we delay this operation until now:
406 */
407+ preempt_disable();
408+ tss = init_tss + smp_processor_id();
409 if (!t->ioperm) {
410 /*
411 * just in case ...
412@@ -83,6 +85,7 @@
413 */
414 set_bitmap(t->io_bitmap, from, num, !turn_on);
415 set_bitmap(tss->io_bitmap, from, num, !turn_on);
416+ preempt_enable();
417
418 return 0;
419 }
420diff -urN linux-2.4.20/arch/i386/kernel/irq.c linux/arch/i386/kernel/irq.c
421--- linux-2.4.20/arch/i386/kernel/irq.c 2002-11-28 18:53:09.000000000 -0500
422+++ linux/arch/i386/kernel/irq.c 2003-04-11 17:03:05.255070544 -0400
423@@ -283,9 +283,11 @@
424 show("wait_on_irq");
425 count = ~0;
426 }
427+ preempt_disable();
428 __sti();
429 SYNC_OTHER_CORES(cpu);
430 __cli();
431+ preempt_enable_no_resched();
432 if (irqs_running())
433 continue;
434 if (global_irq_lock)
435@@ -359,8 +361,9 @@
436
437 __save_flags(flags);
438 if (flags & (1 << EFLAGS_IF_SHIFT)) {
439- int cpu = smp_processor_id();
440+ int cpu;
441 __cli();
442+ cpu = smp_processor_id();
443 if (!local_irq_count(cpu))
444 get_irqlock(cpu);
445 }
446@@ -368,11 +371,14 @@
447
448 void __global_sti(void)
449 {
450- int cpu = smp_processor_id();
451+ int cpu;
452
453+ preempt_disable();
454+ cpu = smp_processor_id();
455 if (!local_irq_count(cpu))
456 release_irqlock(cpu);
457 __sti();
458+ preempt_enable();
459 }
460
461 /*
462@@ -387,13 +393,15 @@
463 int retval;
464 int local_enabled;
465 unsigned long flags;
466- int cpu = smp_processor_id();
467+ int cpu;
468
469 __save_flags(flags);
470 local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
471 /* default to local */
472 retval = 2 + local_enabled;
473
474+ preempt_disable();
475+ cpu = smp_processor_id();
476 /* check for global flags if we're not in an interrupt */
477 if (!local_irq_count(cpu)) {
478 if (local_enabled)
479@@ -401,6 +409,7 @@
480 if (global_irq_holder == cpu)
481 retval = 0;
482 }
483+ preempt_enable();
484 return retval;
485 }
486
487diff -urN linux-2.4.20/arch/i386/kernel/ldt.c linux/arch/i386/kernel/ldt.c
488--- linux-2.4.20/arch/i386/kernel/ldt.c 2001-10-17 17:46:29.000000000 -0400
489+++ linux/arch/i386/kernel/ldt.c 2003-04-11 17:03:05.322060360 -0400
490@@ -92,6 +92,7 @@
491 * the GDT index of the LDT is allocated dynamically, and is
492 * limited by MAX_LDT_DESCRIPTORS.
493 */
494+ preempt_disable();
495 down_write(&mm->mmap_sem);
496 if (!mm->context.segments) {
497 void * segments = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
498@@ -144,6 +145,7 @@
499
500 out_unlock:
501 up_write(&mm->mmap_sem);
502+ preempt_enable();
503 out:
504 return error;
505 }
506diff -urN linux-2.4.20/arch/i386/kernel/microcode.c linux/arch/i386/kernel/microcode.c
507--- linux-2.4.20/arch/i386/kernel/microcode.c 2002-08-02 20:39:42.000000000 -0400
508+++ linux/arch/i386/kernel/microcode.c 2003-04-11 17:03:05.323060208 -0400
509@@ -182,11 +182,14 @@
510 int i, error = 0, err;
511 struct microcode *m;
512
513+ preempt_disable();
514 if (smp_call_function(do_update_one, NULL, 1, 1) != 0) {
515 printk(KERN_ERR "microcode: IPI timeout, giving up\n");
516+ preempt_enable();
517 return -EIO;
518 }
519 do_update_one(NULL);
520+ preempt_enable();
521
522 for (i=0; i<smp_num_cpus; i++) {
523 err = update_req[i].err;
524diff -urN linux-2.4.20/arch/i386/kernel/msr.c linux/arch/i386/kernel/msr.c
525--- linux-2.4.20/arch/i386/kernel/msr.c 2001-10-11 12:04:57.000000000 -0400
526+++ linux/arch/i386/kernel/msr.c 2003-04-11 17:03:05.359054736 -0400
527@@ -114,8 +114,9 @@
528 {
529 struct msr_command cmd;
530
531+ preempt_disable();
532 if ( cpu == smp_processor_id() ) {
533- return wrmsr_eio(reg, eax, edx);
534+ cmd.err = wrmsr_eio(reg, eax, edx);
535 } else {
536 cmd.cpu = cpu;
537 cmd.reg = reg;
538@@ -123,16 +124,19 @@
539 cmd.data[1] = edx;
540
541 smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
542- return cmd.err;
543 }
544+
545+ preempt_enable();
546+ return cmd.err;
547 }
548
549 static inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx)
550 {
551 struct msr_command cmd;
552
553+ preempt_disable();
554 if ( cpu == smp_processor_id() ) {
555- return rdmsr_eio(reg, eax, edx);
556+ cmd.err = rdmsr_eio(reg, eax, edx);
557 } else {
558 cmd.cpu = cpu;
559 cmd.reg = reg;
560@@ -141,9 +145,10 @@
561
562 *eax = cmd.data[0];
563 *edx = cmd.data[1];
564-
565- return cmd.err;
566 }
567+
568+ preempt_enable();
569+ return cmd.err;
570 }
571
572 #else /* ! CONFIG_SMP */
573diff -urN linux-2.4.20/arch/i386/kernel/mtrr.c linux/arch/i386/kernel/mtrr.c
574--- linux-2.4.20/arch/i386/kernel/mtrr.c 2002-08-02 20:39:42.000000000 -0400
575+++ linux/arch/i386/kernel/mtrr.c 2003-04-11 17:03:05.397048960 -0400
576@@ -1057,6 +1057,9 @@
577 wait_barrier_execute = TRUE;
578 wait_barrier_cache_enable = TRUE;
579 atomic_set (&undone_count, smp_num_cpus - 1);
580+
581+ preempt_disable();
582+
583 /* Start the ball rolling on other CPUs */
584 if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
585 panic ("mtrr: timed out waiting for other CPUs\n");
586@@ -1082,6 +1085,9 @@
587 then enable the local cache and return */
588 wait_barrier_cache_enable = FALSE;
589 set_mtrr_done (&ctxt);
590+
591+ preempt_enable();
592+
593 } /* End Function set_mtrr_smp */
594
595
596diff -urN linux-2.4.20/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c
597--- linux-2.4.20/arch/i386/kernel/smp.c 2002-11-28 18:53:09.000000000 -0500
598+++ linux/arch/i386/kernel/smp.c 2003-04-11 17:03:05.435043184 -0400
599@@ -357,10 +357,14 @@
600
601 asmlinkage void smp_invalidate_interrupt (void)
602 {
603- unsigned long cpu = smp_processor_id();
604+ unsigned long cpu;
605+
606+ preempt_disable();
607+
608+ cpu = smp_processor_id();
609
610 if (!test_bit(cpu, &flush_cpumask))
611- return;
612+ goto out;
613 /*
614 * This was a BUG() but until someone can quote me the
615 * line from the intel manual that guarantees an IPI to
616@@ -381,6 +385,8 @@
617 }
618 ack_APIC_irq();
619 clear_bit(cpu, &flush_cpumask);
620+out:
621+ preempt_enable();
622 }
623
624 static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
625@@ -430,17 +436,22 @@
626 void flush_tlb_current_task(void)
627 {
628 struct mm_struct *mm = current->mm;
629- unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
630+ unsigned long cpu_mask;
631
632+ preempt_disable();
633+ cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
634 local_flush_tlb();
635 if (cpu_mask)
636 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
637+ preempt_enable();
638 }
639
640 void flush_tlb_mm (struct mm_struct * mm)
641 {
642- unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
643+ unsigned long cpu_mask;
644
645+ preempt_disable();
646+ cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
647 if (current->active_mm == mm) {
648 if (current->mm)
649 local_flush_tlb();
650@@ -449,13 +460,16 @@
651 }
652 if (cpu_mask)
653 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
654+ preempt_enable();
655 }
656
657 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
658 {
659 struct mm_struct *mm = vma->vm_mm;
660- unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
661+ unsigned long cpu_mask;
662
663+ preempt_disable();
664+ cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
665 if (current->active_mm == mm) {
666 if(current->mm)
667 __flush_tlb_one(va);
668@@ -465,6 +479,7 @@
669
670 if (cpu_mask)
671 flush_tlb_others(cpu_mask, mm, va);
672+ preempt_enable();
673 }
674
675 static inline void do_flush_tlb_all_local(void)
676@@ -483,9 +498,11 @@
677
678 void flush_tlb_all(void)
679 {
680+ preempt_disable();
681 smp_call_function (flush_tlb_all_ipi,0,1,1);
682
683 do_flush_tlb_all_local();
684+ preempt_enable();
685 }
686
687 /*
688@@ -569,7 +586,7 @@
689 static void stop_this_cpu (void * dummy)
690 {
691 /*
692- * Remove this CPU:
693+ * Remove this CPU: assumes preemption is disabled
694 */
695 clear_bit(smp_processor_id(), &cpu_online_map);
696 __cli();
697diff -urN linux-2.4.20/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
698--- linux-2.4.20/arch/i386/kernel/traps.c 2002-11-28 18:53:09.000000000 -0500
699+++ linux/arch/i386/kernel/traps.c 2003-04-11 17:02:55.401568504 -0400
700@@ -751,6 +751,8 @@
701 *
702 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
703 * Don't touch unless you *really* know how it works.
704+ *
705+ * Must be called with kernel preemption disabled.
706 */
707 asmlinkage void math_state_restore(struct pt_regs regs)
708 {
709diff -urN linux-2.4.20/arch/i386/lib/dec_and_lock.c linux/arch/i386/lib/dec_and_lock.c
710--- linux-2.4.20/arch/i386/lib/dec_and_lock.c 2000-07-07 21:20:16.000000000 -0400
711+++ linux/arch/i386/lib/dec_and_lock.c 2003-04-11 17:02:55.401568504 -0400
712@@ -8,6 +8,7 @@
713 */
714
715 #include <linux/spinlock.h>
716+#include <linux/sched.h>
717 #include <asm/atomic.h>
718
719 int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
720diff -urN linux-2.4.20/arch/i386/mm/init.c linux/arch/i386/mm/init.c
721--- linux-2.4.20/arch/i386/mm/init.c 2002-11-28 18:53:09.000000000 -0500
722+++ linux/arch/i386/mm/init.c 2003-04-11 17:03:05.471037712 -0400
723@@ -46,6 +46,7 @@
724 int do_check_pgt_cache(int low, int high)
725 {
726 int freed = 0;
727+ preempt_disable();
728 if(pgtable_cache_size > high) {
729 do {
730 if (pgd_quicklist) {
731@@ -62,6 +63,7 @@
732 }
733 } while(pgtable_cache_size > low);
734 }
735+ preempt_enable();
736 return freed;
737 }
738
739diff -urN linux-2.4.20/arch/mips/config-shared.in linux/arch/mips/config-shared.in
740--- linux-2.4.20/arch/mips/config-shared.in 2002-11-28 18:53:09.000000000 -0500
741+++ linux/arch/mips/config-shared.in 2003-04-11 17:02:55.403568200 -0400
742@@ -615,6 +615,7 @@
743 define_bool CONFIG_HOTPLUG_PCI n
744 fi
745
746+dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_NEW_IRQ
747 bool 'System V IPC' CONFIG_SYSVIPC
748 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
749 bool 'Sysctl support' CONFIG_SYSCTL
750diff -urN linux-2.4.20/arch/mips/kernel/i8259.c linux/arch/mips/kernel/i8259.c
751--- linux-2.4.20/arch/mips/kernel/i8259.c 2002-11-28 18:53:10.000000000 -0500
752+++ linux/arch/mips/kernel/i8259.c 2003-04-11 17:02:55.475557256 -0400
753@@ -8,6 +8,7 @@
754 * Copyright (C) 1992 Linus Torvalds
755 * Copyright (C) 1994 - 2000 Ralf Baechle
756 */
757+#include <linux/sched.h>
758 #include <linux/delay.h>
759 #include <linux/init.h>
760 #include <linux/ioport.h>
761diff -urN linux-2.4.20/arch/mips/kernel/irq.c linux/arch/mips/kernel/irq.c
762--- linux-2.4.20/arch/mips/kernel/irq.c 2002-11-28 18:53:10.000000000 -0500
763+++ linux/arch/mips/kernel/irq.c 2003-04-11 17:02:55.514551328 -0400
764@@ -8,6 +8,8 @@
765 * Copyright (C) 1992 Linus Torvalds
766 * Copyright (C) 1994 - 2000 Ralf Baechle
767 */
768+
769+#include <linux/sched.h>
770 #include <linux/config.h>
771 #include <linux/kernel.h>
772 #include <linux/delay.h>
773@@ -19,11 +21,13 @@
774 #include <linux/slab.h>
775 #include <linux/mm.h>
776 #include <linux/random.h>
777-#include <linux/sched.h>
778+#include <linux/spinlock.h>
779+#include <linux/ptrace.h>
780
781 #include <asm/atomic.h>
782 #include <asm/system.h>
783 #include <asm/uaccess.h>
784+#include <asm/debug.h>
785
786 /*
787 * Controller mappings for all interrupt sources:
788@@ -429,6 +433,8 @@
789 struct irqaction * action;
790 unsigned int status;
791
792+ preempt_disable();
793+
794 kstat.irqs[cpu][irq]++;
795 spin_lock(&desc->lock);
796 desc->handler->ack(irq);
797@@ -490,6 +496,27 @@
798
799 if (softirq_pending(cpu))
800 do_softirq();
801+
802+#if defined(CONFIG_PREEMPT)
803+ while (--current->preempt_count == 0) {
804+ db_assert(intr_off());
805+ db_assert(!in_interrupt());
806+
807+ if (current->need_resched == 0) {
808+ break;
809+ }
810+
811+ current->preempt_count ++;
812+ sti();
813+ if (user_mode(regs)) {
814+ schedule();
815+ } else {
816+ preempt_schedule();
817+ }
818+ cli();
819+ }
820+#endif
821+
822 return 1;
823 }
824
825diff -urN linux-2.4.20/arch/mips/mm/extable.c linux/arch/mips/mm/extable.c
826--- linux-2.4.20/arch/mips/mm/extable.c 2002-11-28 18:53:10.000000000 -0500
827+++ linux/arch/mips/mm/extable.c 2003-04-11 17:02:55.515551176 -0400
828@@ -3,6 +3,7 @@
829 */
830 #include <linux/config.h>
831 #include <linux/module.h>
832+#include <linux/sched.h>
833 #include <linux/spinlock.h>
834 #include <asm/uaccess.h>
835
836diff -urN linux-2.4.20/arch/ppc/config.in linux/arch/ppc/config.in
837--- linux-2.4.20/arch/ppc/config.in 2002-11-28 18:53:11.000000000 -0500
838+++ linux/arch/ppc/config.in 2003-04-11 17:02:55.516551024 -0400
839@@ -112,6 +112,8 @@
840 bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
841 fi
842
843+bool 'Preemptible kernel support' CONFIG_PREEMPT
844+
845 if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ];then
846 bool 'AltiVec Support' CONFIG_ALTIVEC
847 bool 'Thermal Management Support' CONFIG_TAU
848diff -urN linux-2.4.20/arch/ppc/kernel/entry.S linux/arch/ppc/kernel/entry.S
849--- linux-2.4.20/arch/ppc/kernel/entry.S 2002-11-28 18:53:11.000000000 -0500
850+++ linux/arch/ppc/kernel/entry.S 2003-04-11 17:03:05.473037408 -0400
851@@ -278,6 +278,46 @@
852 */
853 cmpi 0,r3,0
854 beq restore
855+#ifdef CONFIG_PREEMPT
856+ lwz r3,PREEMPT_COUNT(r2)
857+ cmpi 0,r3,1
858+ bge ret_from_except
859+ lwz r5,_MSR(r1)
860+ andi. r5,r5,MSR_PR
861+ bne do_signal_ret
862+ lwz r5,NEED_RESCHED(r2)
863+ cmpi 0,r5,0
864+ beq ret_from_except
865+ lis r3,irq_stat@h
866+ ori r3,r3,irq_stat@l
867+#ifdef CONFIG_SMP
868+ lwz r5,CPU(r2)
869+ rlwinm r5,r5,5,0,26
870+ add r3,r3,r5
871+#endif
872+ lwz r5,4(r3)
873+ lwz r3,8(r3)
874+ add r3,r3,r5
875+ cmpi 0,r3,0
876+ bne ret_from_except
877+ lwz r3,PREEMPT_COUNT(r2)
878+ addi r3,r3,1
879+ stw r3,PREEMPT_COUNT(r2)
880+ mfmsr r0
881+ ori r0,r0,MSR_EE
882+ mtmsr r0
883+ sync
884+ bl preempt_schedule
885+ mfmsr r0
886+ rlwinm r0,r0,0,17,15
887+ mtmsr r0
888+ sync
889+ lwz r3,PREEMPT_COUNT(r2)
890+ subi r3,r3,1
891+ stw r3,PREEMPT_COUNT(r2)
892+ li r3,1
893+ b ret_from_intercept
894+#endif /* CONFIG_PREEMPT */
895 .globl ret_from_except
896 ret_from_except:
897 lwz r3,_MSR(r1) /* Returning to user mode? */
898diff -urN linux-2.4.20/arch/ppc/kernel/irq.c linux/arch/ppc/kernel/irq.c
899--- linux-2.4.20/arch/ppc/kernel/irq.c 2002-11-28 18:53:11.000000000 -0500
900+++ linux/arch/ppc/kernel/irq.c 2003-04-11 17:03:54.067649904 -0400
901@@ -556,6 +556,34 @@
902 return 1; /* lets ret_from_int know we can do checks */
903 }
904
905+#ifdef CONFIG_PREEMPT
906+int
907+preempt_intercept(struct pt_regs *regs)
908+{
909+ int ret;
910+
911+ preempt_disable();
912+
913+ switch(regs->trap) {
914+ case 0x500:
915+ ret = do_IRQ(regs);
916+ break;
917+#ifndef CONFIG_4xx
918+ case 0x900:
919+#else
920+ case 0x1000:
921+#endif
922+ ret = timer_interrupt(regs);
923+ break;
924+ default:
925+ BUG();
926+ }
927+
928+ preempt_enable();
929+ return ret;
930+}
931+#endif /* CONFIG_PREEMPT */
932+
933 unsigned long probe_irq_on (void)
934 {
935 return 0;
936@@ -652,11 +680,13 @@
937 show("wait_on_irq");
938 count = ~0;
939 }
940+ preempt_disable();
941 __sti();
942 /* don't worry about the lock race Linus found
943 * on intel here. -- Cort
944 */
945 __cli();
946+ preempt_enable_no_resched();
947 if (atomic_read(&global_irq_count))
948 continue;
949 if (global_irq_lock)
950@@ -732,6 +762,8 @@
951 global_irq_holder = cpu;
952 }
953
954+#define EFLAGS_IF_SHIFT 15
955+
956 /*
957 * A global "cli()" while in an interrupt context
958 * turns into just a local cli(). Interrupts
959@@ -749,9 +781,10 @@
960 unsigned long flags;
961
962 __save_flags(flags);
963- if (flags & (1 << 15)) {
964- int cpu = smp_processor_id();
965+ if (flags & (1 << EFLAGS_IF_SHIFT)) {
966+ int cpu;
967 __cli();
968+ cpu = smp_processor_id();
969 if (!local_irq_count(cpu))
970 get_irqlock(cpu);
971 }
972@@ -759,11 +792,14 @@
973
974 void __global_sti(void)
975 {
976- int cpu = smp_processor_id();
977+ int cpu;
978
979+ preempt_disable();
980+ cpu = smp_processor_id();
981 if (!local_irq_count(cpu))
982 release_irqlock(cpu);
983 __sti();
984+ preempt_enable();
985 }
986
987 /*
988@@ -778,19 +814,23 @@
989 int retval;
990 int local_enabled;
991 unsigned long flags;
992+ int cpu;
993
994 __save_flags(flags);
995- local_enabled = (flags >> 15) & 1;
996+ local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
997 /* default to local */
998 retval = 2 + local_enabled;
999
1000 /* check for global flags if we're not in an interrupt */
1001- if (!local_irq_count(smp_processor_id())) {
1002+ preempt_disable();
1003+ cpu = smp_processor_id();
1004+ if (!local_irq_count(cpu)) {
1005 if (local_enabled)
1006 retval = 1;
1007- if (global_irq_holder == (unsigned char) smp_processor_id())
1008+ if (global_irq_holder == cpu)
1009 retval = 0;
1010 }
1011+ preempt_enable();
1012 return retval;
1013 }
1014
1015diff -urN linux-2.4.20/arch/ppc/kernel/mk_defs.c linux/arch/ppc/kernel/mk_defs.c
1016--- linux-2.4.20/arch/ppc/kernel/mk_defs.c 2001-08-28 09:58:33.000000000 -0400
1017+++ linux/arch/ppc/kernel/mk_defs.c 2003-04-11 17:02:55.598538560 -0400
1018@@ -42,6 +42,9 @@
1019 DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
1020 DEFINE(THREAD, offsetof(struct task_struct, thread));
1021 DEFINE(MM, offsetof(struct task_struct, mm));
1022+#ifdef CONFIG_PREEMPT
1023+ DEFINE(PREEMPT_COUNT, offsetof(struct task_struct, preempt_count));
1024+#endif
1025 DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm));
1026 DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
1027 DEFINE(KSP, offsetof(struct thread_struct, ksp));
1028diff -urN linux-2.4.20/arch/ppc/kernel/open_pic.c linux/arch/ppc/kernel/open_pic.c
1029--- linux-2.4.20/arch/ppc/kernel/open_pic.c 2002-11-28 18:53:11.000000000 -0500
1030+++ linux/arch/ppc/kernel/open_pic.c 2003-04-11 17:03:05.592019320 -0400
1031@@ -567,19 +567,24 @@
1032 void __init do_openpic_setup_cpu(void)
1033 {
1034 int i;
1035- u32 msk = 1 << smp_hw_index[smp_processor_id()];
1036+#ifdef CONFIG_IRQ_ALL_CPUS
1037+ u32 msk;
1038+#endif /* CONFIG_IRQ_ALL_CPUS */
1039
1040 spin_lock(&openpic_setup_lock);
1041
1042 #ifdef CONFIG_IRQ_ALL_CPUS
1043+ msk = 1 << smp_hw_index[smp_processor_id()];
1044+
1045 /* let the openpic know we want intrs. default affinity
1046 * is 0xffffffff until changed via /proc
1047 * That's how it's done on x86. If we want it differently, then
1048 * we should make sure we also change the default values of irq_affinity
1049 * in irq.c.
1050 */
1051- for (i = 0; i < NumSources; i++)
1052+ for (i = 0; i < NumSources; i++) {
1053 openpic_mapirq(i, msk, ~0U);
1054+ }
1055 #endif /* CONFIG_IRQ_ALL_CPUS */
1056 openpic_set_priority(0);
1057
1058diff -urN linux-2.4.20/arch/ppc/kernel/setup.c linux/arch/ppc/kernel/setup.c
1059--- linux-2.4.20/arch/ppc/kernel/setup.c 2002-11-28 18:53:11.000000000 -0500
1060+++ linux/arch/ppc/kernel/setup.c 2003-04-11 17:02:55.635532936 -0400
1061@@ -498,6 +498,20 @@
1062 strcpy(cmd_line, CONFIG_CMDLINE);
1063 #endif /* CONFIG_CMDLINE */
1064
1065+#ifdef CONFIG_PREEMPT
1066+ /* Override the irq routines for external & timer interrupts here,
1067+ * as the MMU has only been minimally setup at this point and
1068+ * there are no protections on page zero.
1069+ */
1070+ {
1071+ extern int preempt_intercept(struct pt_regs *);
1072+
1073+ do_IRQ_intercept = (unsigned long) &preempt_intercept;
1074+ timer_interrupt_intercept = (unsigned long) &preempt_intercept;
1075+
1076+ }
1077+#endif /* CONFIG_PREEMPT */
1078+
1079 platform_init(r3, r4, r5, r6, r7);
1080
1081 if (ppc_md.progress)
1082diff -urN linux-2.4.20/arch/ppc/kernel/temp.c linux/arch/ppc/kernel/temp.c
1083--- linux-2.4.20/arch/ppc/kernel/temp.c 2001-08-28 09:58:33.000000000 -0400
1084+++ linux/arch/ppc/kernel/temp.c 2003-04-11 17:03:05.593019168 -0400
1085@@ -138,7 +138,7 @@
1086
1087 static void tau_timeout(void * info)
1088 {
1089- unsigned long cpu = smp_processor_id();
1090+ unsigned long cpu;
1091 unsigned long flags;
1092 int size;
1093 int shrink;
1094@@ -146,6 +146,8 @@
1095 /* disabling interrupts *should* be okay */
1096 save_flags(flags); cli();
1097
1098+ cpu = smp_processor_id();
1099+
1100 #ifndef CONFIG_TAU_INT
1101 TAUupdate(cpu);
1102 #endif
1103@@ -191,13 +193,15 @@
1104
1105 static void tau_timeout_smp(unsigned long unused)
1106 {
1107-
1108 /* schedule ourselves to be run again */
1109 mod_timer(&tau_timer, jiffies + shrink_timer) ;
1110+
1111+ preempt_disable();
1112 #ifdef CONFIG_SMP
1113 smp_call_function(tau_timeout, NULL, 1, 0);
1114 #endif
1115 tau_timeout(NULL);
1116+ preempt_enable();
1117 }
1118
1119 /*
1120diff -urN linux-2.4.20/arch/ppc/lib/dec_and_lock.c linux/arch/ppc/lib/dec_and_lock.c
1121--- linux-2.4.20/arch/ppc/lib/dec_and_lock.c 2001-11-16 13:10:08.000000000 -0500
1122+++ linux/arch/ppc/lib/dec_and_lock.c 2003-04-11 17:02:55.636532784 -0400
1123@@ -1,4 +1,5 @@
1124 #include <linux/module.h>
1125+#include <linux/sched.h>
1126 #include <linux/spinlock.h>
1127 #include <asm/atomic.h>
1128 #include <asm/system.h>
1129diff -urN linux-2.4.20/arch/ppc/mm/tlb.c linux/arch/ppc/mm/tlb.c
1130--- linux-2.4.20/arch/ppc/mm/tlb.c 2001-08-28 09:58:33.000000000 -0400
1131+++ linux/arch/ppc/mm/tlb.c 2003-04-11 17:03:05.630013544 -0400
1132@@ -61,11 +61,14 @@
1133 * we can and should dispense with flush_tlb_all().
1134 * -- paulus.
1135 */
1136+
1137+ preempt_disable();
1138 local_flush_tlb_range(&init_mm, TASK_SIZE, ~0UL);
1139
1140 #ifdef CONFIG_SMP
1141 smp_send_tlb_invalidate(0);
1142 #endif /* CONFIG_SMP */
1143+ preempt_enable();
1144 }
1145
1146 /*
1147@@ -76,8 +79,10 @@
1148 void
1149 local_flush_tlb_mm(struct mm_struct *mm)
1150 {
1151+ preempt_disable();
1152 if (Hash == 0) {
1153 _tlbia();
1154+ preempt_enable();
1155 return;
1156 }
1157
1158@@ -91,6 +96,7 @@
1159 #ifdef CONFIG_SMP
1160 smp_send_tlb_invalidate(0);
1161 #endif
1162+ preempt_enable();
1163 }
1164
1165 void
1166@@ -100,8 +106,10 @@
1167 pmd_t *pmd;
1168 pte_t *pte;
1169
1170+ preempt_disable();
1171 if (Hash == 0) {
1172 _tlbie(vmaddr);
1173+ preempt_enable();
1174 return;
1175 }
1176 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
1177@@ -114,6 +122,7 @@
1178 #ifdef CONFIG_SMP
1179 smp_send_tlb_invalidate(0);
1180 #endif
1181+ preempt_enable();
1182 }
1183
1184
1185@@ -130,13 +139,17 @@
1186 unsigned long pmd_end;
1187 unsigned int ctx = mm->context;
1188
1189+ preempt_disable();
1190 if (Hash == 0) {
1191 _tlbia();
1192+ preempt_enable();
1193 return;
1194 }
1195 start &= PAGE_MASK;
1196- if (start >= end)
1197+ if (start >= end) {
1198+ preempt_enable();
1199 return;
1200+ }
1201 pmd = pmd_offset(pgd_offset(mm, start), start);
1202 do {
1203 pmd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
1204@@ -159,4 +172,5 @@
1205 #ifdef CONFIG_SMP
1206 smp_send_tlb_invalidate(0);
1207 #endif
1208+ preempt_enable();
1209 }
1210diff -urN linux-2.4.20/arch/sh/config.in linux/arch/sh/config.in
1211--- linux-2.4.20/arch/sh/config.in 2002-11-28 18:53:11.000000000 -0500
1212+++ linux/arch/sh/config.in 2003-04-11 17:02:55.637532632 -0400
1213@@ -124,6 +124,7 @@
1214 hex 'Physical memory start address' CONFIG_MEMORY_START 08000000
1215 hex 'Physical memory size' CONFIG_MEMORY_SIZE 00400000
1216 fi
1217+bool 'Preemptible Kernel' CONFIG_PREEMPT
1218 endmenu
1219
1220 if [ "$CONFIG_SH_HP690" = "y" ]; then
1221diff -urN linux-2.4.20/arch/sh/kernel/entry.S linux/arch/sh/kernel/entry.S
1222--- linux-2.4.20/arch/sh/kernel/entry.S 2002-08-02 20:39:43.000000000 -0400
1223+++ linux/arch/sh/kernel/entry.S 2003-04-11 17:02:55.709521688 -0400
1224@@ -60,10 +60,18 @@
1225 /*
1226 * These are offsets into the task-struct.
1227 */
1228-flags = 4
1229+preempt_count = 4
1230 sigpending = 8
1231 need_resched = 20
1232 tsk_ptrace = 24
1233+flags = 84
1234+
1235+/*
1236+ * These offsets are into irq_stat.
1237+ * (Find irq_cpustat_t in asm-sh/hardirq.h)
1238+ */
1239+local_irq_count = 8
1240+local_bh_count = 12
1241
1242 PT_TRACESYS = 0x00000002
1243 PF_USEDFPU = 0x00100000
1244@@ -143,7 +151,7 @@
1245 mov.l __INV_IMASK, r11; \
1246 stc sr, r10; \
1247 and r11, r10; \
1248- stc k_g_imask, r11; \
1249+ stc k_g_imask, r11; \
1250 or r11, r10; \
1251 ldc r10, sr
1252
1253@@ -304,8 +312,8 @@
1254 mov.l @(tsk_ptrace,r0), r0 ! Is current PTRACE_SYSCALL'd?
1255 mov #PT_TRACESYS, r1
1256 tst r1, r0
1257- bt ret_from_syscall
1258- bra syscall_ret_trace
1259+ bf syscall_ret_trace
1260+ bra ret_from_syscall
1261 nop
1262
1263 .align 2
1264@@ -505,8 +513,6 @@
1265 .long syscall_ret_trace
1266 __syscall_ret:
1267 .long syscall_ret
1268-__INV_IMASK:
1269- .long 0xffffff0f ! ~(IMASK)
1270
1271
1272 .align 2
1273@@ -518,7 +524,84 @@
1274 .align 2
1275 1: .long SYMBOL_NAME(schedule)
1276
1277+#ifdef CONFIG_PREEMPT
1278+ !
1279+ ! Returning from interrupt during kernel mode: check if
1280+ ! preempt_schedule should be called. If need_resched flag
1281+ ! is set, preempt_count is zero, and we're not currently
1282+ ! in an interrupt handler (local irq or bottom half) then
1283+ ! call preempt_schedule.
1284+ !
1285+ ! Increment preempt_count to prevent a nested interrupt
1286+ ! from reentering preempt_schedule, then decrement after
1287+ ! and drop through to regular interrupt return which will
1288+ ! jump back and check again in case such an interrupt did
1289+ ! come in (and didn't preempt due to preempt_count).
1290+ !
1291+ ! NOTE: because we just checked that preempt_count was
1292+ ! zero before getting to the call, can't we use immediate
1293+ ! values (1 and 0) rather than inc/dec? Also, rather than
1294+ ! drop through to ret_from_irq, we already know this thread
1295+ ! is kernel mode, can't we go direct to ret_from_kirq? In
1296+ ! fact, with proper interrupt nesting and so forth could
1297+ ! the loop simply be on the need_resched w/o checking the
1298+ ! other stuff again? Optimize later...
1299+ !
1300+ .align 2
1301+ret_from_kirq:
1302+ ! Nonzero preempt_count prevents scheduling
1303+ stc k_current, r1
1304+ mov.l @(preempt_count,r1), r0
1305+ cmp/eq #0, r0
1306+ bf restore_all
1307+ ! Zero need_resched prevents scheduling
1308+ mov.l @(need_resched,r1), r0
1309+ cmp/eq #0, r0
1310+ bt restore_all
1311+ ! If in_interrupt(), don't schedule
1312+ mov.l __irq_stat, r1
1313+ mov.l @(local_irq_count,r1), r0
1314+ mov.l @(local_bh_count,r1), r1
1315+ or r1, r0
1316+ cmp/eq #0, r0
1317+ bf restore_all
1318+ ! Allow scheduling using preempt_schedule
1319+ ! Adjust preempt_count and SR as needed.
1320+ stc k_current, r1
1321+ mov.l @(preempt_count,r1), r0 ! Could replace this ...
1322+ add #1, r0 ! ... and this w/mov #1?
1323+ mov.l r0, @(preempt_count,r1)
1324+ STI()
1325+ mov.l __preempt_schedule, r0
1326+ jsr @r0
1327+ nop
1328+ /* CLI */
1329+ stc sr, r0
1330+ or #0xf0, r0
1331+ ldc r0, sr
1332+ !
1333+ stc k_current, r1
1334+ mov.l @(preempt_count,r1), r0 ! Could replace this ...
1335+ add #-1, r0 ! ... and this w/mov #0?
1336+ mov.l r0, @(preempt_count,r1)
1337+ ! Maybe should bra ret_from_kirq, or loop over need_resched?
1338+ ! For now, fall through to ret_from_irq again...
1339+#endif /* CONFIG_PREEMPT */
1340+
1341 ret_from_irq:
1342+ mov #OFF_SR, r0
1343+ mov.l @(r0,r15), r0 ! get status register
1344+ shll r0
1345+ shll r0 ! kernel space?
1346+#ifndef CONFIG_PREEMPT
1347+ bt restore_all ! Yes, it's from kernel, go back soon
1348+#else /* CONFIG_PREEMPT */
1349+ bt ret_from_kirq ! From kernel: maybe preempt_schedule
1350+#endif /* CONFIG_PREEMPT */
1351+ !
1352+ bra ret_from_syscall
1353+ nop
1354+
1355 ret_from_exception:
1356 mov #OFF_SR, r0
1357 mov.l @(r0,r15), r0 ! get status register
1358@@ -564,6 +647,13 @@
1359 .long SYMBOL_NAME(do_signal)
1360 __irq_stat:
1361 .long SYMBOL_NAME(irq_stat)
1362+#ifdef CONFIG_PREEMPT
1363+__preempt_schedule:
1364+ .long SYMBOL_NAME(preempt_schedule)
1365+#endif /* CONFIG_PREEMPT */
1366+__INV_IMASK:
1367+ .long 0xffffff0f ! ~(IMASK)
1368+
1369
1370 .align 2
1371 restore_all:
1372@@ -679,7 +769,7 @@
1373 __fpu_prepare_fd:
1374 .long SYMBOL_NAME(fpu_prepare_fd)
1375 __init_task_flags:
1376- .long SYMBOL_NAME(init_task_union)+4
1377+ .long SYMBOL_NAME(init_task_union)+flags
1378 __PF_USEDFPU:
1379 .long PF_USEDFPU
1380 #endif
1381diff -urN linux-2.4.20/arch/sh/kernel/irq.c linux/arch/sh/kernel/irq.c
1382--- linux-2.4.20/arch/sh/kernel/irq.c 2001-09-08 15:29:09.000000000 -0400
1383+++ linux/arch/sh/kernel/irq.c 2003-04-11 17:02:55.711521384 -0400
1384@@ -229,6 +229,14 @@
1385 struct irqaction * action;
1386 unsigned int status;
1387
1388+ /*
1389+ * At this point we're now about to actually call handlers,
1390+ * and interrupts might get reenabled during them... bump
1391+ * preempt_count to prevent any preemption while the handler
1392+ * called here is pending...
1393+ */
1394+ preempt_disable();
1395+
1396 /* Get IRQ number */
1397 asm volatile("stc r2_bank, %0\n\t"
1398 "shlr2 %0\n\t"
1399@@ -298,8 +306,17 @@
1400 desc->handler->end(irq);
1401 spin_unlock(&desc->lock);
1402
1403+
1404 if (softirq_pending(cpu))
1405 do_softirq();
1406+
1407+ /*
1408+ * We're done with the handlers, interrupts should be
1409+ * currently disabled; decrement preempt_count now so
1410+ * as we return preemption may be allowed...
1411+ */
1412+ preempt_enable_no_resched();
1413+
1414 return 1;
1415 }
1416
1417diff -urN linux-2.4.20/CREDITS linux/CREDITS
1418--- linux-2.4.20/CREDITS 2002-11-28 18:53:08.000000000 -0500
1419+++ linux/CREDITS 2003-04-11 17:02:55.789509528 -0400
1420@@ -1001,8 +1001,8 @@
1421
1422 N: Nigel Gamble
1423 E: nigel@nrg.org
1424-E: nigel@sgi.com
1425 D: Interrupt-driven printer driver
1426+D: Preemptible kernel
1427 S: 120 Alley Way
1428 S: Mountain View, California 94040
1429 S: USA
1430diff -urN linux-2.4.20/Documentation/Configure.help linux/Documentation/Configure.help
1431--- linux-2.4.20/Documentation/Configure.help 2002-11-28 18:53:08.000000000 -0500
1432+++ linux/Documentation/Configure.help 2003-04-11 17:02:55.883495240 -0400
1433@@ -279,6 +279,17 @@
1434 If you have a system with several CPUs, you do not need to say Y
1435 here: the local APIC will be used automatically.
1436
1437+Preemptible Kernel
1438+CONFIG_PREEMPT
1439+ This option reduces the latency of the kernel when reacting to
1440+ real-time or interactive events by allowing a low priority process to
1441+ be preempted even if it is in kernel mode executing a system call.
1442+ This allows applications to run more reliably even when the system is
1443+ under load.
1444+
1445+ Say Y here if you are building a kernel for a desktop, embedded or
1446+ real-time system. Say N if you are unsure.
1447+
1448 Kernel math emulation
1449 CONFIG_MATH_EMULATION
1450 Linux can emulate a math coprocessor (used for floating point
1451diff -urN linux-2.4.20/Documentation/preempt-locking.txt linux/Documentation/preempt-locking.txt
1452--- linux-2.4.20/Documentation/preempt-locking.txt 1969-12-31 19:00:00.000000000 -0500
1453+++ linux/Documentation/preempt-locking.txt 2003-04-11 17:02:55.940486576 -0400
1454@@ -0,0 +1,104 @@
1455+ Proper Locking Under a Preemptible Kernel:
1456+ Keeping Kernel Code Preempt-Safe
1457+ Robert Love <rml@tech9.net>
1458+ Last Updated: 22 Jan 2002
1459+
1460+
1461+INTRODUCTION
1462+
1463+
1464+A preemptible kernel creates new locking issues. The issues are the same as
1465+those under SMP: concurrency and reentrancy. Thankfully, the Linux preemptible
1466+kernel model leverages existing SMP locking mechanisms. Thus, the kernel
1467+requires explicit additional locking for very few additional situations.
1468+
1469+This document is for all kernel hackers. Developing code in the kernel
1470+requires protecting these situations.
1471+
1472+
1473+RULE #1: Per-CPU data structures need explicit protection
1474+
1475+
1476+Two similar problems arise. An example code snippet:
1477+
1478+ struct this_needs_locking tux[NR_CPUS];
1479+ tux[smp_processor_id()] = some_value;
1480+ /* task is preempted here... */
1481+ something = tux[smp_processor_id()];
1482+
1483+First, since the data is per-CPU, it may not have explicit SMP locking, but
1484+require it otherwise. Second, when a preempted task is finally rescheduled,
1485+the previous value of smp_processor_id may not equal the current. You must
1486+protect these situations by disabling preemption around them.
1487+
1488+
1489+RULE #2: CPU state must be protected.
1490+
1491+
1492+Under preemption, the state of the CPU must be protected. This is arch-
1493+dependent, but includes CPU structures and state not preserved over a context
1494+switch. For example, on x86, entering and exiting FPU mode is now a critical
1495+section that must occur while preemption is disabled. Think what would happen
1496+if the kernel is executing a floating-point instruction and is then preempted.
1497+Remember, the kernel does not save FPU state except for user tasks. Therefore,
1498+upon preemption, the FPU registers will be sold to the lowest bidder. Thus,
1499+preemption must be disabled around such regions.
1500+
1501+Note, some FPU functions are already explicitly preempt safe. For example,
1502+kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
1503+However, math_state_restore must be called with preemption disabled.
1504+
1505+
1506+RULE #3: Lock acquire and release must be performed by same task
1507+
1508+
1509+A lock acquired in one task must be released by the same task. This
1510+means you can't do oddball things like acquire a lock and go off to
1511+play while another task releases it. If you want to do something
1512+like this, acquire and release the task in the same code path and
1513+have the caller wait on an event by the other task.
1514+
1515+
1516+SOLUTION
1517+
1518+
1519+Data protection under preemption is achieved by disabling preemption for the
1520+duration of the critical region.
1521+
1522+preempt_enable() decrement the preempt counter
1523+preempt_disable() increment the preempt counter
1524+preempt_enable_no_resched() decrement, but do not immediately preempt
1525+preempt_get_count() return the preempt counter
1526+
1527+The functions are nestable. In other words, you can call preempt_disable
1528+n-times in a code path, and preemption will not be reenabled until the n-th
1529+call to preempt_enable. The preempt statements define to nothing if
1530+preemption is not enabled.
1531+
1532+Note that you do not need to explicitly prevent preemption if you are holding
1533+any locks or interrupts are disabled, since preemption is implicitly disabled
1534+in those cases.
1535+
1536+Example:
1537+
1538+ cpucache_t *cc; /* this is per-CPU */
1539+ preempt_disable();
1540+ cc = cc_data(searchp);
1541+ if (cc && cc->avail) {
1542+ __free_block(searchp, cc_entry(cc), cc->avail);
1543+ cc->avail = 0;
1544+ }
1545+ preempt_enable();
1546+ return 0;
1547+
1548+Notice how the preemption statements must encompass every reference of the
1549+critical variables. Another example:
1550+
1551+ int buf[NR_CPUS];
1552+ set_cpu_val(buf);
1553+ if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n");
1554+ spin_lock(&buf_lock);
1555+ /* ... */
1556+
1557+This code is not preempt-safe, but see how easily we can fix it by simply
1558+moving the spin_lock up two lines.
1559diff -urN linux-2.4.20/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c
1560--- linux-2.4.20/drivers/ieee1394/csr.c 2002-11-28 18:53:13.000000000 -0500
1561+++ linux/drivers/ieee1394/csr.c 2003-04-11 17:02:55.941486424 -0400
1562@@ -10,6 +10,7 @@
1563 */
1564
1565 #include <linux/string.h>
1566+#include <linux/sched.h>
1567
1568 #include "ieee1394_types.h"
1569 #include "hosts.h"
1570diff -urN linux-2.4.20/drivers/sound/sound_core.c linux/drivers/sound/sound_core.c
1571--- linux-2.4.20/drivers/sound/sound_core.c 2001-09-30 15:26:08.000000000 -0400
1572+++ linux/drivers/sound/sound_core.c 2003-04-11 17:02:55.977480952 -0400
1573@@ -37,6 +37,7 @@
1574 #include <linux/config.h>
1575 #include <linux/module.h>
1576 #include <linux/init.h>
1577+#include <linux/sched.h>
1578 #include <linux/slab.h>
1579 #include <linux/types.h>
1580 #include <linux/kernel.h>
1581diff -urN linux-2.4.20/fs/adfs/map.c linux/fs/adfs/map.c
1582--- linux-2.4.20/fs/adfs/map.c 2001-10-25 16:53:53.000000000 -0400
1583+++ linux/fs/adfs/map.c 2003-04-11 17:02:56.014475328 -0400
1584@@ -12,6 +12,7 @@
1585 #include <linux/fs.h>
1586 #include <linux/adfs_fs.h>
1587 #include <linux/spinlock.h>
1588+#include <linux/sched.h>
1589
1590 #include "adfs.h"
1591
1592diff -urN linux-2.4.20/fs/exec.c linux/fs/exec.c
1593--- linux-2.4.20/fs/exec.c 2002-11-28 18:53:15.000000000 -0500
1594+++ linux/fs/exec.c 2003-04-11 17:02:56.050469856 -0400
1595@@ -440,8 +440,8 @@
1596 active_mm = current->active_mm;
1597 current->mm = mm;
1598 current->active_mm = mm;
1599- task_unlock(current);
1600 activate_mm(active_mm, mm);
1601+ task_unlock(current);
1602 mm_release();
1603 if (old_mm) {
1604 if (active_mm != old_mm) BUG();
1605diff -urN linux-2.4.20/fs/fat/cache.c linux/fs/fat/cache.c
1606--- linux-2.4.20/fs/fat/cache.c 2001-10-12 16:48:42.000000000 -0400
1607+++ linux/fs/fat/cache.c 2003-04-11 17:02:56.085464536 -0400
1608@@ -14,6 +14,7 @@
1609 #include <linux/string.h>
1610 #include <linux/stat.h>
1611 #include <linux/fat_cvf.h>
1612+#include <linux/sched.h>
1613
1614 #if 0
1615 # define PRINTK(x) printk x
1616diff -urN linux-2.4.20/fs/nls/nls_base.c linux/fs/nls/nls_base.c
1617--- linux-2.4.20/fs/nls/nls_base.c 2002-08-02 20:39:45.000000000 -0400
1618+++ linux/fs/nls/nls_base.c 2003-04-11 17:02:56.121459064 -0400
1619@@ -18,6 +18,7 @@
1620 #ifdef CONFIG_KMOD
1621 #include <linux/kmod.h>
1622 #endif
1623+#include <linux/sched.h>
1624 #include <linux/spinlock.h>
1625
1626 static struct nls_table *tables;
1627diff -urN linux-2.4.20/include/asm-arm/dma.h linux/include/asm-arm/dma.h
1628--- linux-2.4.20/include/asm-arm/dma.h 2001-08-12 14:14:00.000000000 -0400
1629+++ linux/include/asm-arm/dma.h 2003-04-11 17:02:56.155453896 -0400
1630@@ -5,6 +5,7 @@
1631
1632 #include <linux/config.h>
1633 #include <linux/spinlock.h>
1634+#include <linux/sched.h>
1635 #include <asm/system.h>
1636 #include <asm/memory.h>
1637 #include <asm/scatterlist.h>
1638diff -urN linux-2.4.20/include/asm-arm/hardirq.h linux/include/asm-arm/hardirq.h
1639--- linux-2.4.20/include/asm-arm/hardirq.h 2001-10-11 12:04:57.000000000 -0400
1640+++ linux/include/asm-arm/hardirq.h 2003-04-11 17:02:56.156453744 -0400
1641@@ -34,6 +34,7 @@
1642 #define irq_exit(cpu,irq) (local_irq_count(cpu)--)
1643
1644 #define synchronize_irq() do { } while (0)
1645+#define release_irqlock(cpu) do { } while (0)
1646
1647 #else
1648 #error SMP not supported
1649diff -urN linux-2.4.20/include/asm-arm/pgalloc.h linux/include/asm-arm/pgalloc.h
1650--- linux-2.4.20/include/asm-arm/pgalloc.h 2001-08-12 14:14:00.000000000 -0400
1651+++ linux/include/asm-arm/pgalloc.h 2003-04-11 17:02:56.191448424 -0400
1652@@ -57,40 +57,48 @@
1653 {
1654 unsigned long *ret;
1655
1656+ preempt_disable();
1657 if ((ret = pgd_quicklist) != NULL) {
1658 pgd_quicklist = (unsigned long *)__pgd_next(ret);
1659 ret[1] = ret[2];
1660 clean_dcache_entry(ret + 1);
1661 pgtable_cache_size--;
1662 }
1663+ preempt_enable();
1664 return (pgd_t *)ret;
1665 }
1666
1667 static inline void free_pgd_fast(pgd_t *pgd)
1668 {
1669+ preempt_disable();
1670 __pgd_next(pgd) = (unsigned long) pgd_quicklist;
1671 pgd_quicklist = (unsigned long *) pgd;
1672 pgtable_cache_size++;
1673+ preempt_enable();
1674 }
1675
1676 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
1677 {
1678 unsigned long *ret;
1679
1680+ preempt_disable();
1681 if((ret = pte_quicklist) != NULL) {
1682 pte_quicklist = (unsigned long *)__pte_next(ret);
1683 ret[0] = 0;
1684 clean_dcache_entry(ret);
1685 pgtable_cache_size--;
1686 }
1687+ preempt_enable();
1688 return (pte_t *)ret;
1689 }
1690
1691 static inline void free_pte_fast(pte_t *pte)
1692 {
1693+ preempt_disable();
1694 __pte_next(pte) = (unsigned long) pte_quicklist;
1695 pte_quicklist = (unsigned long *) pte;
1696 pgtable_cache_size++;
1697+ preempt_enable();
1698 }
1699
1700 #else /* CONFIG_NO_PGT_CACHE */
1701diff -urN linux-2.4.20/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h
1702--- linux-2.4.20/include/asm-arm/smplock.h 2001-08-12 14:14:00.000000000 -0400
1703+++ linux/include/asm-arm/smplock.h 2003-04-11 17:02:56.227442952 -0400
1704@@ -3,12 +3,17 @@
1705 *
1706 * Default SMP lock implementation
1707 */
1708+#include <linux/config.h>
1709 #include <linux/interrupt.h>
1710 #include <linux/spinlock.h>
1711
1712 extern spinlock_t kernel_flag;
1713
1714+#ifdef CONFIG_PREEMPT
1715+#define kernel_locked() preempt_get_count()
1716+#else
1717 #define kernel_locked() spin_is_locked(&kernel_flag)
1718+#endif
1719
1720 /*
1721 * Release global kernel lock and global interrupt lock
1722@@ -40,8 +45,14 @@
1723 */
1724 static inline void lock_kernel(void)
1725 {
1726+#ifdef CONFIG_PREEMPT
1727+ if (current->lock_depth == -1)
1728+ spin_lock(&kernel_flag);
1729+ ++current->lock_depth;
1730+#else
1731 if (!++current->lock_depth)
1732 spin_lock(&kernel_flag);
1733+#endif
1734 }
1735
1736 static inline void unlock_kernel(void)
1737diff -urN linux-2.4.20/include/asm-arm/softirq.h linux/include/asm-arm/softirq.h
1738--- linux-2.4.20/include/asm-arm/softirq.h 2001-09-08 15:02:31.000000000 -0400
1739+++ linux/include/asm-arm/softirq.h 2003-04-11 17:02:56.228442800 -0400
1740@@ -5,20 +5,22 @@
1741 #include <asm/hardirq.h>
1742
1743 #define __cpu_bh_enable(cpu) \
1744- do { barrier(); local_bh_count(cpu)--; } while (0)
1745+ do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
1746 #define cpu_bh_disable(cpu) \
1747- do { local_bh_count(cpu)++; barrier(); } while (0)
1748+ do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
1749
1750 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
1751 #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
1752
1753 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1754
1755-#define local_bh_enable() \
1756+#define _local_bh_enable() \
1757 do { \
1758 unsigned int *ptr = &local_bh_count(smp_processor_id()); \
1759 if (!--*ptr && ptr[-2]) \
1760 __asm__("bl%? __do_softirq": : : "lr");/* out of line */\
1761 } while (0)
1762
1763+#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
1764+
1765 #endif /* __ASM_SOFTIRQ_H */
1766diff -urN linux-2.4.20/include/asm-arm/system.h linux/include/asm-arm/system.h
1767--- linux-2.4.20/include/asm-arm/system.h 2000-11-27 20:07:59.000000000 -0500
1768+++ linux/include/asm-arm/system.h 2003-04-11 17:02:56.228442800 -0400
1769@@ -62,6 +62,13 @@
1770 #define local_irq_disable() __cli()
1771 #define local_irq_enable() __sti()
1772
1773+#define irqs_disabled() \
1774+({ \
1775+ unsigned long cpsr_val; \
1776+ asm ("mrs %0, cpsr" : "=r" (cpsr_val)); \
1777+ cpsr_val & 128; \
1778+})
1779+
1780 #ifdef CONFIG_SMP
1781 #error SMP not supported
1782
1783diff -urN linux-2.4.20/include/asm-i386/desc.h linux/include/asm-i386/desc.h
1784--- linux-2.4.20/include/asm-i386/desc.h 2001-07-26 16:40:32.000000000 -0400
1785+++ linux/include/asm-i386/desc.h 2003-04-11 17:03:05.667007920 -0400
1786@@ -71,9 +71,12 @@
1787
1788 static inline void clear_LDT(void)
1789 {
1790- int cpu = smp_processor_id();
1791+ int cpu;
1792+ preempt_disable();
1793+ cpu = smp_processor_id();
1794 set_ldt_desc(cpu, &default_ldt[0], 5);
1795 __load_LDT(cpu);
1796+ preempt_enable();
1797 }
1798
1799 /*
1800diff -urN linux-2.4.20/include/asm-i386/hardirq.h linux/include/asm-i386/hardirq.h
1801--- linux-2.4.20/include/asm-i386/hardirq.h 2001-11-22 14:46:19.000000000 -0500
1802+++ linux/include/asm-i386/hardirq.h 2003-04-11 17:02:56.263437480 -0400
1803@@ -19,12 +19,16 @@
1804
1805 /*
1806 * Are we in an interrupt context? Either doing bottom half
1807- * or hardware interrupt processing?
1808+ * or hardware interrupt processing? Note the preempt check,
1809+ * this is both a bugfix and an optimization. If we are
1810+ * preemptible, we cannot be in an interrupt.
1811 */
1812-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
1813- (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
1814+#define in_interrupt() (preempt_is_disabled() && \
1815+ ({unsigned long __cpu = smp_processor_id(); \
1816+ (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }))
1817
1818-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
1819+#define in_irq() (preempt_is_disabled() && \
1820+ (local_irq_count(smp_processor_id()) != 0))
1821
1822 #ifndef CONFIG_SMP
1823
1824@@ -36,6 +40,8 @@
1825
1826 #define synchronize_irq() barrier()
1827
1828+#define release_irqlock(cpu) do { } while (0)
1829+
1830 #else
1831
1832 #include <asm/atomic.h>
1833diff -urN linux-2.4.20/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h
1834--- linux-2.4.20/include/asm-i386/highmem.h 2002-08-02 20:39:45.000000000 -0400
1835+++ linux/include/asm-i386/highmem.h 2003-04-11 17:02:56.297432312 -0400
1836@@ -88,6 +88,7 @@
1837 enum fixed_addresses idx;
1838 unsigned long vaddr;
1839
1840+ preempt_disable();
1841 if (page < highmem_start_page)
1842 return page_address(page);
1843
1844@@ -109,8 +110,10 @@
1845 unsigned long vaddr = (unsigned long) kvaddr;
1846 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
1847
1848- if (vaddr < FIXADDR_START) // FIXME
1849+ if (vaddr < FIXADDR_START) { // FIXME
1850+ preempt_enable();
1851 return;
1852+ }
1853
1854 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
1855 out_of_line_bug();
1856@@ -122,6 +125,8 @@
1857 pte_clear(kmap_pte-idx);
1858 __flush_tlb_one(vaddr);
1859 #endif
1860+
1861+ preempt_enable();
1862 }
1863
1864 #endif /* __KERNEL__ */
1865diff -urN linux-2.4.20/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h
1866--- linux-2.4.20/include/asm-i386/hw_irq.h 2001-11-22 14:46:18.000000000 -0500
1867+++ linux/include/asm-i386/hw_irq.h 2003-04-11 17:02:56.333426840 -0400
1868@@ -95,6 +95,18 @@
1869 #define __STR(x) #x
1870 #define STR(x) __STR(x)
1871
1872+#define GET_CURRENT \
1873+ "movl %esp, %ebx\n\t" \
1874+ "andl $-8192, %ebx\n\t"
1875+
1876+#ifdef CONFIG_PREEMPT
1877+#define BUMP_LOCK_COUNT \
1878+ GET_CURRENT \
1879+ "incl 4(%ebx)\n\t"
1880+#else
1881+#define BUMP_LOCK_COUNT
1882+#endif
1883+
1884 #define SAVE_ALL \
1885 "cld\n\t" \
1886 "pushl %es\n\t" \
1887@@ -108,15 +120,12 @@
1888 "pushl %ebx\n\t" \
1889 "movl $" STR(__KERNEL_DS) ",%edx\n\t" \
1890 "movl %edx,%ds\n\t" \
1891- "movl %edx,%es\n\t"
1892+ "movl %edx,%es\n\t" \
1893+ BUMP_LOCK_COUNT
1894
1895 #define IRQ_NAME2(nr) nr##_interrupt(void)
1896 #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
1897
1898-#define GET_CURRENT \
1899- "movl %esp, %ebx\n\t" \
1900- "andl $-8192, %ebx\n\t"
1901-
1902 /*
1903 * SMP has a few special interrupts for IPI messages
1904 */
1905diff -urN linux-2.4.20/include/asm-i386/i387.h linux/include/asm-i386/i387.h
1906--- linux-2.4.20/include/asm-i386/i387.h 2002-08-02 20:39:45.000000000 -0400
1907+++ linux/include/asm-i386/i387.h 2003-04-11 17:02:56.333426840 -0400
1908@@ -12,6 +12,7 @@
1909 #define __ASM_I386_I387_H
1910
1911 #include <linux/sched.h>
1912+#include <linux/spinlock.h>
1913 #include <asm/processor.h>
1914 #include <asm/sigcontext.h>
1915 #include <asm/user.h>
1916@@ -24,7 +25,7 @@
1917 extern void restore_fpu( struct task_struct *tsk );
1918
1919 extern void kernel_fpu_begin(void);
1920-#define kernel_fpu_end() stts()
1921+#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
1922
1923
1924 #define unlazy_fpu( tsk ) do { \
1925diff -urN linux-2.4.20/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h
1926--- linux-2.4.20/include/asm-i386/pgalloc.h 2002-08-02 20:39:45.000000000 -0400
1927+++ linux/include/asm-i386/pgalloc.h 2003-04-11 17:02:56.334426688 -0400
1928@@ -75,20 +75,26 @@
1929 {
1930 unsigned long *ret;
1931
1932+ preempt_disable();
1933 if ((ret = pgd_quicklist) != NULL) {
1934 pgd_quicklist = (unsigned long *)(*ret);
1935 ret[0] = 0;
1936 pgtable_cache_size--;
1937- } else
1938+ preempt_enable();
1939+ } else {
1940+ preempt_enable();
1941 ret = (unsigned long *)get_pgd_slow();
1942+ }
1943 return (pgd_t *)ret;
1944 }
1945
1946 static inline void free_pgd_fast(pgd_t *pgd)
1947 {
1948+ preempt_disable();
1949 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
1950 pgd_quicklist = (unsigned long *) pgd;
1951 pgtable_cache_size++;
1952+ preempt_enable();
1953 }
1954
1955 static inline void free_pgd_slow(pgd_t *pgd)
1956@@ -119,19 +125,23 @@
1957 {
1958 unsigned long *ret;
1959
1960+ preempt_disable();
1961 if ((ret = (unsigned long *)pte_quicklist) != NULL) {
1962 pte_quicklist = (unsigned long *)(*ret);
1963 ret[0] = ret[1];
1964 pgtable_cache_size--;
1965 }
1966+ preempt_enable();
1967 return (pte_t *)ret;
1968 }
1969
1970 static inline void pte_free_fast(pte_t *pte)
1971 {
1972+ preempt_disable();
1973 *(unsigned long *)pte = (unsigned long) pte_quicklist;
1974 pte_quicklist = (unsigned long *) pte;
1975 pgtable_cache_size++;
1976+ preempt_enable();
1977 }
1978
1979 static __inline__ void pte_free_slow(pte_t *pte)
1980diff -urN linux-2.4.20/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h
1981--- linux-2.4.20/include/asm-i386/smplock.h 2002-08-02 20:39:45.000000000 -0400
1982+++ linux/include/asm-i386/smplock.h 2003-04-11 17:02:56.369421368 -0400
1983@@ -11,7 +11,15 @@
1984 extern spinlock_cacheline_t kernel_flag_cacheline;
1985 #define kernel_flag kernel_flag_cacheline.lock
1986
1987+#ifdef CONFIG_SMP
1988 #define kernel_locked() spin_is_locked(&kernel_flag)
1989+#else
1990+#ifdef CONFIG_PREEMPT
1991+#define kernel_locked() preempt_get_count()
1992+#else
1993+#define kernel_locked() 1
1994+#endif
1995+#endif
1996
1997 /*
1998 * Release global kernel lock and global interrupt lock
1999@@ -43,6 +51,11 @@
2000 */
2001 static __inline__ void lock_kernel(void)
2002 {
2003+#ifdef CONFIG_PREEMPT
2004+ if (current->lock_depth == -1)
2005+ spin_lock(&kernel_flag);
2006+ ++current->lock_depth;
2007+#else
2008 #if 1
2009 if (!++current->lock_depth)
2010 spin_lock(&kernel_flag);
2011@@ -55,6 +68,7 @@
2012 :"=m" (__dummy_lock(&kernel_flag)),
2013 "=m" (current->lock_depth));
2014 #endif
2015+#endif
2016 }
2017
2018 static __inline__ void unlock_kernel(void)
2019diff -urN linux-2.4.20/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h
2020--- linux-2.4.20/include/asm-i386/softirq.h 2002-08-02 20:39:45.000000000 -0400
2021+++ linux/include/asm-i386/softirq.h 2003-04-11 17:03:05.668007768 -0400
2022@@ -5,14 +5,15 @@
2023 #include <asm/hardirq.h>
2024
2025 #define __cpu_bh_enable(cpu) \
2026- do { barrier(); local_bh_count(cpu)--; } while (0)
2027+ do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
2028 #define cpu_bh_disable(cpu) \
2029- do { local_bh_count(cpu)++; barrier(); } while (0)
2030+ do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
2031
2032 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
2033 #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
2034
2035-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
2036+#define in_softirq() ( preempt_is_disabled() & \
2037+ (local_bh_count(smp_processor_id()) != 0))
2038
2039 /*
2040 * NOTE: this assembly code assumes:
2041@@ -22,7 +23,7 @@
2042 * If you change the offsets in irq_stat then you have to
2043 * update this code as well.
2044 */
2045-#define local_bh_enable() \
2046+#define _local_bh_enable() \
2047 do { \
2048 unsigned int *ptr = &local_bh_count(smp_processor_id()); \
2049 \
2050@@ -45,4 +46,6 @@
2051 /* no registers clobbered */ ); \
2052 } while (0)
2053
2054+#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
2055+
2056 #endif /* __ASM_SOFTIRQ_H */
2057diff -urN linux-2.4.20/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
2058--- linux-2.4.20/include/asm-i386/spinlock.h 2002-11-28 18:53:15.000000000 -0500
2059+++ linux/include/asm-i386/spinlock.h 2003-04-11 17:02:56.406415744 -0400
2060@@ -77,7 +77,7 @@
2061 :"=m" (lock->lock) : : "memory"
2062
2063
2064-static inline void spin_unlock(spinlock_t *lock)
2065+static inline void _raw_spin_unlock(spinlock_t *lock)
2066 {
2067 #if SPINLOCK_DEBUG
2068 if (lock->magic != SPINLOCK_MAGIC)
2069@@ -97,7 +97,7 @@
2070 :"=q" (oldval), "=m" (lock->lock) \
2071 :"0" (oldval) : "memory"
2072
2073-static inline void spin_unlock(spinlock_t *lock)
2074+static inline void _raw_spin_unlock(spinlock_t *lock)
2075 {
2076 char oldval = 1;
2077 #if SPINLOCK_DEBUG
2078@@ -113,7 +113,7 @@
2079
2080 #endif
2081
2082-static inline int spin_trylock(spinlock_t *lock)
2083+static inline int _raw_spin_trylock(spinlock_t *lock)
2084 {
2085 char oldval;
2086 __asm__ __volatile__(
2087@@ -123,7 +123,7 @@
2088 return oldval > 0;
2089 }
2090
2091-static inline void spin_lock(spinlock_t *lock)
2092+static inline void _raw_spin_lock(spinlock_t *lock)
2093 {
2094 #if SPINLOCK_DEBUG
2095 __label__ here;
2096@@ -179,7 +179,7 @@
2097 */
2098 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
2099
2100-static inline void read_lock(rwlock_t *rw)
2101+static inline void _raw_read_lock(rwlock_t *rw)
2102 {
2103 #if SPINLOCK_DEBUG
2104 if (rw->magic != RWLOCK_MAGIC)
2105@@ -188,7 +188,7 @@
2106 __build_read_lock(rw, "__read_lock_failed");
2107 }
2108
2109-static inline void write_lock(rwlock_t *rw)
2110+static inline void _raw_write_lock(rwlock_t *rw)
2111 {
2112 #if SPINLOCK_DEBUG
2113 if (rw->magic != RWLOCK_MAGIC)
2114@@ -197,10 +197,10 @@
2115 __build_write_lock(rw, "__write_lock_failed");
2116 }
2117
2118-#define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
2119-#define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
2120+#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
2121+#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
2122
2123-static inline int write_trylock(rwlock_t *lock)
2124+static inline int _raw_write_trylock(rwlock_t *lock)
2125 {
2126 atomic_t *count = (atomic_t *)lock;
2127 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
2128diff -urN linux-2.4.20/include/asm-i386/system.h linux/include/asm-i386/system.h
2129--- linux-2.4.20/include/asm-i386/system.h 2002-11-28 18:53:15.000000000 -0500
2130+++ linux/include/asm-i386/system.h 2003-04-11 17:02:56.441410424 -0400
2131@@ -322,6 +322,13 @@
2132 /* used in the idle loop; sti takes one instruction cycle to complete */
2133 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
2134
2135+#define irqs_disabled() \
2136+({ \
2137+ unsigned long flags; \
2138+ __save_flags(flags); \
2139+ !(flags & (1<<9)); \
2140+})
2141+
2142 /* For spinlocks etc */
2143 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
2144 #define local_irq_restore(x) __restore_flags(x)
2145diff -urN linux-2.4.20/include/asm-mips/smplock.h linux/include/asm-mips/smplock.h
2146--- linux-2.4.20/include/asm-mips/smplock.h 2002-08-02 20:39:45.000000000 -0400
2147+++ linux/include/asm-mips/smplock.h 2003-04-11 17:02:56.476405104 -0400
2148@@ -5,12 +5,21 @@
2149 *
2150 * Default SMP lock implementation
2151 */
2152+#include <linux/config.h>
2153 #include <linux/interrupt.h>
2154 #include <linux/spinlock.h>
2155
2156 extern spinlock_t kernel_flag;
2157
2158+#ifdef CONFIG_SMP
2159 #define kernel_locked() spin_is_locked(&kernel_flag)
2160+#else
2161+#ifdef CONFIG_PREEMPT
2162+#define kernel_locked() preempt_get_count()
2163+#else
2164+#define kernel_locked() 1
2165+#endif
2166+#endif
2167
2168 /*
2169 * Release global kernel lock and global interrupt lock
2170@@ -42,8 +51,14 @@
2171 */
2172 extern __inline__ void lock_kernel(void)
2173 {
2174+#ifdef CONFIG_PREEMPT
2175+ if (current->lock_depth == -1)
2176+ spin_lock(&kernel_flag);
2177+ ++current->lock_depth;
2178+#else
2179 if (!++current->lock_depth)
2180 spin_lock(&kernel_flag);
2181+#endif
2182 }
2183
2184 extern __inline__ void unlock_kernel(void)
2185diff -urN linux-2.4.20/include/asm-mips/softirq.h linux/include/asm-mips/softirq.h
2186--- linux-2.4.20/include/asm-mips/softirq.h 2002-11-28 18:53:15.000000000 -0500
2187+++ linux/include/asm-mips/softirq.h 2003-04-11 17:02:56.512399632 -0400
2188@@ -15,6 +15,7 @@
2189
2190 static inline void cpu_bh_disable(int cpu)
2191 {
2192+ preempt_disable();
2193 local_bh_count(cpu)++;
2194 barrier();
2195 }
2196@@ -23,6 +24,7 @@
2197 {
2198 barrier();
2199 local_bh_count(cpu)--;
2200+ preempt_enable();
2201 }
2202
2203
2204@@ -36,6 +38,7 @@
2205 cpu = smp_processor_id(); \
2206 if (!--local_bh_count(cpu) && softirq_pending(cpu)) \
2207 do_softirq(); \
2208+ preempt_enable(); \
2209 } while (0)
2210
2211 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
2212diff -urN linux-2.4.20/include/asm-mips/system.h linux/include/asm-mips/system.h
2213--- linux-2.4.20/include/asm-mips/system.h 2002-11-28 18:53:15.000000000 -0500
2214+++ linux/include/asm-mips/system.h 2003-04-11 17:02:56.513399480 -0400
2215@@ -322,4 +322,18 @@
2216 #define die_if_kernel(msg, regs) \
2217 __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
2218
2219+extern __inline__ int intr_on(void)
2220+{
2221+ unsigned long flags;
2222+ save_flags(flags);
2223+ return flags & 1;
2224+}
2225+
2226+extern __inline__ int intr_off(void)
2227+{
2228+ return ! intr_on();
2229+}
2230+
2231+#define irqs_disabled() intr_off()
2232+
2233 #endif /* _ASM_SYSTEM_H */
2234diff -urN linux-2.4.20/include/asm-ppc/dma.h linux/include/asm-ppc/dma.h
2235--- linux-2.4.20/include/asm-ppc/dma.h 2001-05-21 18:02:06.000000000 -0400
2236+++ linux/include/asm-ppc/dma.h 2003-04-11 17:02:56.550393856 -0400
2237@@ -14,6 +14,7 @@
2238 #include <linux/config.h>
2239 #include <asm/io.h>
2240 #include <linux/spinlock.h>
2241+#include <linux/sched.h>
2242 #include <asm/system.h>
2243
2244 /*
2245diff -urN linux-2.4.20/include/asm-ppc/hardirq.h linux/include/asm-ppc/hardirq.h
2246--- linux-2.4.20/include/asm-ppc/hardirq.h 2002-11-28 18:53:15.000000000 -0500
2247+++ linux/include/asm-ppc/hardirq.h 2003-04-11 17:03:05.705002144 -0400
2248@@ -34,10 +34,12 @@
2249 * Are we in an interrupt context? Either doing bottom half
2250 * or hardware interrupt processing?
2251 */
2252-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
2253- (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
2254+#define in_interrupt() (preempt_is_disabled() && \
2255+ ({ unsigned long __cpu = smp_processor_id(); \
2256+ (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }))
2257
2258-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
2259+#define in_irq() (preempt_is_disabled() && \
2260+ (local_irq_count(smp_processor_id()) != 0))
2261
2262 #ifndef CONFIG_SMP
2263
2264@@ -48,6 +50,7 @@
2265 #define hardirq_exit(cpu) (local_irq_count(cpu)--)
2266
2267 #define synchronize_irq() do { } while (0)
2268+#define release_irqlock(cpu) do { } while (0)
2269
2270 #else /* CONFIG_SMP */
2271
2272diff -urN linux-2.4.20/include/asm-ppc/highmem.h linux/include/asm-ppc/highmem.h
2273--- linux-2.4.20/include/asm-ppc/highmem.h 2001-07-02 17:34:57.000000000 -0400
2274+++ linux/include/asm-ppc/highmem.h 2003-04-11 17:02:56.587388232 -0400
2275@@ -84,6 +84,7 @@
2276 unsigned int idx;
2277 unsigned long vaddr;
2278
2279+ preempt_disable();
2280 if (page < highmem_start_page)
2281 return page_address(page);
2282
2283@@ -105,8 +106,10 @@
2284 unsigned long vaddr = (unsigned long) kvaddr;
2285 unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
2286
2287- if (vaddr < KMAP_FIX_BEGIN) // FIXME
2288+ if (vaddr < KMAP_FIX_BEGIN) { // FIXME
2289+ preempt_enable();
2290 return;
2291+ }
2292
2293 if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
2294 BUG();
2295@@ -118,6 +121,7 @@
2296 pte_clear(kmap_pte+idx);
2297 flush_tlb_page(0, vaddr);
2298 #endif
2299+ preempt_enable();
2300 }
2301
2302 #endif /* __KERNEL__ */
2303diff -urN linux-2.4.20/include/asm-ppc/hw_irq.h linux/include/asm-ppc/hw_irq.h
2304--- linux-2.4.20/include/asm-ppc/hw_irq.h 2002-11-28 18:53:15.000000000 -0500
2305+++ linux/include/asm-ppc/hw_irq.h 2003-04-11 17:02:56.623382760 -0400
2306@@ -22,6 +22,12 @@
2307 #define __save_flags(flags) __save_flags_ptr((unsigned long *)&flags)
2308 #define __save_and_cli(flags) ({__save_flags(flags);__cli();})
2309
2310+#define mfmsr() ({unsigned int rval; \
2311+ asm volatile("mfmsr %0" : "=r" (rval)); rval;})
2312+#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
2313+
2314+#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
2315+
2316 extern void do_lost_interrupts(unsigned long);
2317
2318 #define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
2319diff -urN linux-2.4.20/include/asm-ppc/mmu_context.h linux/include/asm-ppc/mmu_context.h
2320--- linux-2.4.20/include/asm-ppc/mmu_context.h 2001-10-02 12:12:44.000000000 -0400
2321+++ linux/include/asm-ppc/mmu_context.h 2003-04-11 17:02:56.624382608 -0400
2322@@ -158,6 +158,10 @@
2323 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
2324 struct task_struct *tsk, int cpu)
2325 {
2326+#ifdef CONFIG_PREEMPT
2327+ if (preempt_get_count() == 0)
2328+ BUG();
2329+#endif
2330 tsk->thread.pgdir = next->pgd;
2331 get_mmu_context(next);
2332 set_context(next->context, next->pgd);
2333diff -urN linux-2.4.20/include/asm-ppc/pgalloc.h linux/include/asm-ppc/pgalloc.h
2334--- linux-2.4.20/include/asm-ppc/pgalloc.h 2001-05-21 18:02:06.000000000 -0400
2335+++ linux/include/asm-ppc/pgalloc.h 2003-04-11 17:02:56.662376832 -0400
2336@@ -68,20 +68,25 @@
2337 {
2338 unsigned long *ret;
2339
2340+ preempt_disable();
2341 if ((ret = pgd_quicklist) != NULL) {
2342 pgd_quicklist = (unsigned long *)(*ret);
2343 ret[0] = 0;
2344 pgtable_cache_size--;
2345+ preempt_enable();
2346 } else
2347+ preempt_enable();
2348 ret = (unsigned long *)get_pgd_slow();
2349 return (pgd_t *)ret;
2350 }
2351
2352 extern __inline__ void free_pgd_fast(pgd_t *pgd)
2353 {
2354+ preempt_disable();
2355 *(unsigned long **)pgd = pgd_quicklist;
2356 pgd_quicklist = (unsigned long *) pgd;
2357 pgtable_cache_size++;
2358+ preempt_enable();
2359 }
2360
2361 extern __inline__ void free_pgd_slow(pgd_t *pgd)
2362@@ -120,19 +125,23 @@
2363 {
2364 unsigned long *ret;
2365
2366+ preempt_disable();
2367 if ((ret = pte_quicklist) != NULL) {
2368 pte_quicklist = (unsigned long *)(*ret);
2369 ret[0] = 0;
2370 pgtable_cache_size--;
2371 }
2372+ preempt_enable();
2373 return (pte_t *)ret;
2374 }
2375
2376 extern __inline__ void pte_free_fast(pte_t *pte)
2377 {
2378+ preempt_disable();
2379 *(unsigned long **)pte = pte_quicklist;
2380 pte_quicklist = (unsigned long *) pte;
2381 pgtable_cache_size++;
2382+ preempt_enable();
2383 }
2384
2385 extern __inline__ void pte_free_slow(pte_t *pte)
2386diff -urN linux-2.4.20/include/asm-ppc/smplock.h linux/include/asm-ppc/smplock.h
2387--- linux-2.4.20/include/asm-ppc/smplock.h 2001-11-02 20:43:54.000000000 -0500
2388+++ linux/include/asm-ppc/smplock.h 2003-04-11 17:02:56.698371360 -0400
2389@@ -15,7 +15,15 @@
2390
2391 extern spinlock_t kernel_flag;
2392
2393+#ifdef CONFIG_SMP
2394 #define kernel_locked() spin_is_locked(&kernel_flag)
2395+#else
2396+#ifdef CONFIG_PREEMPT
2397+#define kernel_locked() preempt_get_count()
2398+#else
2399+#define kernel_locked() 1
2400+#endif
2401+#endif
2402
2403 /*
2404 * Release global kernel lock and global interrupt lock
2405@@ -47,8 +55,14 @@
2406 */
2407 static __inline__ void lock_kernel(void)
2408 {
2409+#ifdef CONFIG_PREEMPT
2410+ if (current->lock_depth == -1)
2411+ spin_lock(&kernel_flag);
2412+ ++current->lock_depth;
2413+#else
2414 if (!++current->lock_depth)
2415 spin_lock(&kernel_flag);
2416+#endif
2417 }
2418
2419 static __inline__ void unlock_kernel(void)
2420diff -urN linux-2.4.20/include/asm-ppc/softirq.h linux/include/asm-ppc/softirq.h
2421--- linux-2.4.20/include/asm-ppc/softirq.h 2001-09-08 15:02:31.000000000 -0400
2422+++ linux/include/asm-ppc/softirq.h 2003-04-11 17:03:05.741996520 -0400
2423@@ -10,6 +10,7 @@
2424
2425 #define local_bh_disable() \
2426 do { \
2427+ preempt_disable(); \
2428 local_bh_count(smp_processor_id())++; \
2429 barrier(); \
2430 } while (0)
2431@@ -18,9 +19,10 @@
2432 do { \
2433 barrier(); \
2434 local_bh_count(smp_processor_id())--; \
2435+ preempt_enable(); \
2436 } while (0)
2437
2438-#define local_bh_enable() \
2439+#define _local_bh_enable() \
2440 do { \
2441 if (!--local_bh_count(smp_processor_id()) \
2442 && softirq_pending(smp_processor_id())) { \
2443@@ -28,7 +30,14 @@
2444 } \
2445 } while (0)
2446
2447-#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
2448+#define local_bh_enable() \
2449+do { \
2450+ _local_bh_enable(); \
2451+ preempt_enable(); \
2452+} while (0)
2453+
2454+#define in_softirq() (preempt_is_disabled() && \
2455+ (local_bh_count(smp_processor_id()) != 0))
2456
2457 #endif /* __ASM_SOFTIRQ_H */
2458 #endif /* __KERNEL__ */
2459diff -urN linux-2.4.20/include/asm-sh/hardirq.h linux/include/asm-sh/hardirq.h
2460--- linux-2.4.20/include/asm-sh/hardirq.h 2001-09-08 15:29:09.000000000 -0400
2461+++ linux/include/asm-sh/hardirq.h 2003-04-11 17:02:56.737365432 -0400
2462@@ -34,6 +34,8 @@
2463
2464 #define synchronize_irq() barrier()
2465
2466+#define release_irqlock(cpu) do { } while (0)
2467+
2468 #else
2469
2470 #error Super-H SMP is not available
2471diff -urN linux-2.4.20/include/asm-sh/smplock.h linux/include/asm-sh/smplock.h
2472--- linux-2.4.20/include/asm-sh/smplock.h 2001-09-08 15:29:09.000000000 -0400
2473+++ linux/include/asm-sh/smplock.h 2003-04-11 17:02:56.738365280 -0400
2474@@ -9,15 +9,88 @@
2475
2476 #include <linux/config.h>
2477
2478-#ifndef CONFIG_SMP
2479-
2480+#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
2481+/*
2482+ * Should never happen, since linux/smp_lock.h catches this case;
2483+ * but in case this file is included directly with neither SMP nor
2484+ * PREEMPT configuration, provide same dummys as linux/smp_lock.h
2485+ */
2486 #define lock_kernel() do { } while(0)
2487 #define unlock_kernel() do { } while(0)
2488-#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
2489-#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
2490+#define release_kernel_lock(task, cpu) do { } while(0)
2491+#define reacquire_kernel_lock(task) do { } while(0)
2492+#define kernel_locked() 1
2493+
2494+#else /* CONFIG_SMP || CONFIG_PREEMPT */
2495+
2496+#if CONFIG_SMP
2497+#error "We do not support SMP on SH yet"
2498+#endif
2499+/*
2500+ * Default SMP lock implementation (i.e. the i386 version)
2501+ */
2502+
2503+#include <linux/interrupt.h>
2504+#include <linux/spinlock.h>
2505+
2506+extern spinlock_t kernel_flag;
2507+#define lock_bkl() spin_lock(&kernel_flag)
2508+#define unlock_bkl() spin_unlock(&kernel_flag)
2509
2510+#ifdef CONFIG_SMP
2511+#define kernel_locked() spin_is_locked(&kernel_flag)
2512+#elif CONFIG_PREEMPT
2513+#define kernel_locked() preempt_get_count()
2514+#else /* neither */
2515+#define kernel_locked() 1
2516+#endif
2517+
2518+/*
2519+ * Release global kernel lock and global interrupt lock
2520+ */
2521+#define release_kernel_lock(task, cpu) \
2522+do { \
2523+ if (task->lock_depth >= 0) \
2524+ spin_unlock(&kernel_flag); \
2525+ release_irqlock(cpu); \
2526+ __sti(); \
2527+} while (0)
2528+
2529+/*
2530+ * Re-acquire the kernel lock
2531+ */
2532+#define reacquire_kernel_lock(task) \
2533+do { \
2534+ if (task->lock_depth >= 0) \
2535+ spin_lock(&kernel_flag); \
2536+} while (0)
2537+
2538+/*
2539+ * Getting the big kernel lock.
2540+ *
2541+ * This cannot happen asynchronously,
2542+ * so we only need to worry about other
2543+ * CPU's.
2544+ */
2545+static __inline__ void lock_kernel(void)
2546+{
2547+#ifdef CONFIG_PREEMPT
2548+ if (current->lock_depth == -1)
2549+ spin_lock(&kernel_flag);
2550+ ++current->lock_depth;
2551 #else
2552-#error "We do not support SMP on SH"
2553-#endif /* CONFIG_SMP */
2554+ if (!++current->lock_depth)
2555+ spin_lock(&kernel_flag);
2556+#endif
2557+}
2558+
2559+static __inline__ void unlock_kernel(void)
2560+{
2561+ if (current->lock_depth < 0)
2562+ BUG();
2563+ if (--current->lock_depth < 0)
2564+ spin_unlock(&kernel_flag);
2565+}
2566+#endif /* CONFIG_SMP || CONFIG_PREEMPT */
2567
2568 #endif /* __ASM_SH_SMPLOCK_H */
2569diff -urN linux-2.4.20/include/asm-sh/softirq.h linux/include/asm-sh/softirq.h
2570--- linux-2.4.20/include/asm-sh/softirq.h 2001-09-08 15:29:09.000000000 -0400
2571+++ linux/include/asm-sh/softirq.h 2003-04-11 17:02:56.775359656 -0400
2572@@ -6,6 +6,7 @@
2573
2574 #define local_bh_disable() \
2575 do { \
2576+ preempt_disable(); \
2577 local_bh_count(smp_processor_id())++; \
2578 barrier(); \
2579 } while (0)
2580@@ -14,6 +15,7 @@
2581 do { \
2582 barrier(); \
2583 local_bh_count(smp_processor_id())--; \
2584+ preempt_enable(); \
2585 } while (0)
2586
2587 #define local_bh_enable() \
2588@@ -23,6 +25,7 @@
2589 && softirq_pending(smp_processor_id())) { \
2590 do_softirq(); \
2591 } \
2592+ preempt_enable(); \
2593 } while (0)
2594
2595 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
2596diff -urN linux-2.4.20/include/asm-sh/system.h linux/include/asm-sh/system.h
2597--- linux-2.4.20/include/asm-sh/system.h 2001-09-08 15:29:09.000000000 -0400
2598+++ linux/include/asm-sh/system.h 2003-04-11 17:02:56.776359504 -0400
2599@@ -285,4 +285,17 @@
2600 void disable_hlt(void);
2601 void enable_hlt(void);
2602
2603+/*
2604+ * irqs_disabled - are interrupts disabled?
2605+ */
2606+static inline int irqs_disabled(void)
2607+{
2608+ unsigned long flags;
2609+
2610+ __save_flags(flags);
2611+ if (flags & 0x000000f0)
2612+ return 1;
2613+ return 0;
2614+}
2615+
2616 #endif
2617diff -urN linux-2.4.20/include/linux/brlock.h linux/include/linux/brlock.h
2618--- linux-2.4.20/include/linux/brlock.h 2002-11-28 18:53:15.000000000 -0500
2619+++ linux/include/linux/brlock.h 2003-04-11 17:02:56.812354032 -0400
2620@@ -171,11 +171,11 @@
2621 }
2622
2623 #else
2624-# define br_read_lock(idx) ((void)(idx))
2625-# define br_read_unlock(idx) ((void)(idx))
2626-# define br_write_lock(idx) ((void)(idx))
2627-# define br_write_unlock(idx) ((void)(idx))
2628-#endif
2629+# define br_read_lock(idx) ({ (void)(idx); preempt_disable(); })
2630+# define br_read_unlock(idx) ({ (void)(idx); preempt_enable(); })
2631+# define br_write_lock(idx) ({ (void)(idx); preempt_disable(); })
2632+# define br_write_unlock(idx) ({ (void)(idx); preempt_enable(); })
2633+#endif /* CONFIG_SMP */
2634
2635 /*
2636 * Now enumerate all of the possible sw/hw IRQ protected
2637diff -urN linux-2.4.20/include/linux/dcache.h linux/include/linux/dcache.h
2638--- linux-2.4.20/include/linux/dcache.h 2002-11-28 18:53:15.000000000 -0500
2639+++ linux/include/linux/dcache.h 2003-04-11 17:02:56.849348408 -0400
2640@@ -127,31 +127,6 @@
2641
2642 extern spinlock_t dcache_lock;
2643
2644-/**
2645- * d_drop - drop a dentry
2646- * @dentry: dentry to drop
2647- *
2648- * d_drop() unhashes the entry from the parent
2649- * dentry hashes, so that it won't be found through
2650- * a VFS lookup any more. Note that this is different
2651- * from deleting the dentry - d_delete will try to
2652- * mark the dentry negative if possible, giving a
2653- * successful _negative_ lookup, while d_drop will
2654- * just make the cache lookup fail.
2655- *
2656- * d_drop() is used mainly for stuff that wants
2657- * to invalidate a dentry for some reason (NFS
2658- * timeouts or autofs deletes).
2659- */
2660-
2661-static __inline__ void d_drop(struct dentry * dentry)
2662-{
2663- spin_lock(&dcache_lock);
2664- list_del(&dentry->d_hash);
2665- INIT_LIST_HEAD(&dentry->d_hash);
2666- spin_unlock(&dcache_lock);
2667-}
2668-
2669 static __inline__ int dname_external(struct dentry *d)
2670 {
2671 return d->d_name.name != d->d_iname;
2672@@ -276,3 +251,34 @@
2673 #endif /* __KERNEL__ */
2674
2675 #endif /* __LINUX_DCACHE_H */
2676+
2677+#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2678+#define __LINUX_DCACHE_H_INLINES
2679+
2680+#ifdef __KERNEL__
2681+/**
2682+ * d_drop - drop a dentry
2683+ * @dentry: dentry to drop
2684+ *
2685+ * d_drop() unhashes the entry from the parent
2686+ * dentry hashes, so that it won't be found through
2687+ * a VFS lookup any more. Note that this is different
2688+ * from deleting the dentry - d_delete will try to
2689+ * mark the dentry negative if possible, giving a
2690+ * successful _negative_ lookup, while d_drop will
2691+ * just make the cache lookup fail.
2692+ *
2693+ * d_drop() is used mainly for stuff that wants
2694+ * to invalidate a dentry for some reason (NFS
2695+ * timeouts or autofs deletes).
2696+ */
2697+
2698+static __inline__ void d_drop(struct dentry * dentry)
2699+{
2700+ spin_lock(&dcache_lock);
2701+ list_del(&dentry->d_hash);
2702+ INIT_LIST_HEAD(&dentry->d_hash);
2703+ spin_unlock(&dcache_lock);
2704+}
2705+#endif
2706+#endif
2707diff -urN linux-2.4.20/include/linux/fs_struct.h linux/include/linux/fs_struct.h
2708--- linux-2.4.20/include/linux/fs_struct.h 2001-07-13 18:10:44.000000000 -0400
2709+++ linux/include/linux/fs_struct.h 2003-04-11 17:02:56.852347952 -0400
2710@@ -20,6 +20,15 @@
2711 extern void exit_fs(struct task_struct *);
2712 extern void set_fs_altroot(void);
2713
2714+struct fs_struct *copy_fs_struct(struct fs_struct *old);
2715+void put_fs_struct(struct fs_struct *fs);
2716+
2717+#endif
2718+#endif
2719+
2720+#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2721+#define _LINUX_FS_STRUCT_H_INLINES
2722+#ifdef __KERNEL__
2723 /*
2724 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
2725 * It can block. Requires the big lock held.
2726@@ -65,9 +74,5 @@
2727 mntput(old_pwdmnt);
2728 }
2729 }
2730-
2731-struct fs_struct *copy_fs_struct(struct fs_struct *old);
2732-void put_fs_struct(struct fs_struct *fs);
2733-
2734 #endif
2735 #endif
2736diff -urN linux-2.4.20/include/linux/sched.h linux/include/linux/sched.h
2737--- linux-2.4.20/include/linux/sched.h 2002-11-28 18:53:15.000000000 -0500
2738+++ linux/include/linux/sched.h 2003-04-11 17:02:56.908339440 -0400
2739@@ -91,6 +91,7 @@
2740 #define TASK_UNINTERRUPTIBLE 2
2741 #define TASK_ZOMBIE 4
2742 #define TASK_STOPPED 8
2743+#define PREEMPT_ACTIVE 0x4000000
2744
2745 #define __set_task_state(tsk, state_value) \
2746 do { (tsk)->state = (state_value); } while (0)
2747@@ -157,6 +158,9 @@
2748 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
2749 extern signed long FASTCALL(schedule_timeout(signed long timeout));
2750 asmlinkage void schedule(void);
2751+#ifdef CONFIG_PREEMPT
2752+asmlinkage void preempt_schedule(void);
2753+#endif
2754
2755 extern int schedule_task(struct tq_struct *task);
2756 extern void flush_scheduled_tasks(void);
2757@@ -289,7 +293,7 @@
2758 * offsets of these are hardcoded elsewhere - touch with care
2759 */
2760 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
2761- unsigned long flags; /* per process flags, defined below */
2762+ int preempt_count; /* 0 => preemptable, <0 => BUG */
2763 int sigpending;
2764 mm_segment_t addr_limit; /* thread address space:
2765 0-0xBFFFFFFF for user-thead
2766@@ -331,6 +335,7 @@
2767 struct mm_struct *active_mm;
2768 struct list_head local_pages;
2769 unsigned int allocation_order, nr_local_pages;
2770+ unsigned long flags;
2771
2772 /* task state */
2773 struct linux_binfmt *binfmt;
2774@@ -955,5 +960,10 @@
2775 __cond_resched();
2776 }
2777
2778+#define _TASK_STRUCT_DEFINED
2779+#include <linux/dcache.h>
2780+#include <linux/tqueue.h>
2781+#include <linux/fs_struct.h>
2782+
2783 #endif /* __KERNEL__ */
2784 #endif
2785diff -urN linux-2.4.20/include/linux/smp_lock.h linux/include/linux/smp_lock.h
2786--- linux-2.4.20/include/linux/smp_lock.h 2001-11-22 14:46:27.000000000 -0500
2787+++ linux/include/linux/smp_lock.h 2003-04-11 17:02:56.944333968 -0400
2788@@ -3,7 +3,7 @@
2789
2790 #include <linux/config.h>
2791
2792-#ifndef CONFIG_SMP
2793+#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
2794
2795 #define lock_kernel() do { } while(0)
2796 #define unlock_kernel() do { } while(0)
2797diff -urN linux-2.4.20/include/linux/spinlock.h linux/include/linux/spinlock.h
2798--- linux-2.4.20/include/linux/spinlock.h 2002-11-28 18:53:15.000000000 -0500
2799+++ linux/include/linux/spinlock.h 2003-04-11 17:02:56.981328344 -0400
2800@@ -2,6 +2,7 @@
2801 #define __LINUX_SPINLOCK_H
2802
2803 #include <linux/config.h>
2804+#include <linux/compiler.h>
2805
2806 /*
2807 * These are the generic versions of the spinlocks and read-write
2808@@ -62,8 +63,10 @@
2809
2810 #if (DEBUG_SPINLOCKS < 1)
2811
2812+#ifndef CONFIG_PREEMPT
2813 #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
2814 #define ATOMIC_DEC_AND_LOCK
2815+#endif
2816
2817 /*
2818 * Your basic spinlocks, allowing only a single CPU anywhere
2819@@ -80,11 +83,11 @@
2820 #endif
2821
2822 #define spin_lock_init(lock) do { } while(0)
2823-#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
2824+#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */
2825 #define spin_is_locked(lock) (0)
2826-#define spin_trylock(lock) ({1; })
2827+#define _raw_spin_trylock(lock) ({1; })
2828 #define spin_unlock_wait(lock) do { } while(0)
2829-#define spin_unlock(lock) do { } while(0)
2830+#define _raw_spin_unlock(lock) do { } while(0)
2831
2832 #elif (DEBUG_SPINLOCKS < 2)
2833
2834@@ -144,13 +147,78 @@
2835 #endif
2836
2837 #define rwlock_init(lock) do { } while(0)
2838-#define read_lock(lock) (void)(lock) /* Not "unused variable". */
2839-#define read_unlock(lock) do { } while(0)
2840-#define write_lock(lock) (void)(lock) /* Not "unused variable". */
2841-#define write_unlock(lock) do { } while(0)
2842+#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */
2843+#define _raw_read_unlock(lock) do { } while(0)
2844+#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */
2845+#define _raw_write_unlock(lock) do { } while(0)
2846
2847 #endif /* !SMP */
2848
2849+#ifdef CONFIG_PREEMPT
2850+
2851+#define preempt_get_count() (current->preempt_count)
2852+#define preempt_is_disabled() (preempt_get_count() != 0)
2853+
2854+#define preempt_disable() \
2855+do { \
2856+ ++current->preempt_count; \
2857+ barrier(); \
2858+} while (0)
2859+
2860+#define preempt_enable_no_resched() \
2861+do { \
2862+ --current->preempt_count; \
2863+ barrier(); \
2864+} while (0)
2865+
2866+#define preempt_enable() \
2867+do { \
2868+ --current->preempt_count; \
2869+ barrier(); \
2870+ if (unlikely(current->preempt_count < current->need_resched)) \
2871+ preempt_schedule(); \
2872+} while (0)
2873+
2874+#define spin_lock(lock) \
2875+do { \
2876+ preempt_disable(); \
2877+ _raw_spin_lock(lock); \
2878+} while(0)
2879+
2880+#define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
2881+ 1 : ({preempt_enable(); 0;});})
2882+#define spin_unlock(lock) \
2883+do { \
2884+ _raw_spin_unlock(lock); \
2885+ preempt_enable(); \
2886+} while (0)
2887+
2888+#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
2889+#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
2890+#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
2891+#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();})
2892+#define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \
2893+ 1 : ({preempt_enable(); 0;});})
2894+
2895+#else
2896+
2897+#define preempt_get_count() (0)
2898+#define preempt_is_disabled() (1)
2899+#define preempt_disable() do { } while (0)
2900+#define preempt_enable_no_resched() do {} while(0)
2901+#define preempt_enable() do { } while (0)
2902+
2903+#define spin_lock(lock) _raw_spin_lock(lock)
2904+#define spin_trylock(lock) _raw_spin_trylock(lock)
2905+#define spin_unlock(lock) _raw_spin_unlock(lock)
2906+
2907+#define read_lock(lock) _raw_read_lock(lock)
2908+#define read_unlock(lock) _raw_read_unlock(lock)
2909+#define write_lock(lock) _raw_write_lock(lock)
2910+#define write_unlock(lock) _raw_write_unlock(lock)
2911+#define write_trylock(lock) _raw_write_trylock(lock)
2912+#endif
2913+
2914 /* "lock on reference count zero" */
2915 #ifndef ATOMIC_DEC_AND_LOCK
2916 #include <asm/atomic.h>
2917diff -urN linux-2.4.20/include/linux/tqueue.h linux/include/linux/tqueue.h
2918--- linux-2.4.20/include/linux/tqueue.h 2001-11-22 14:46:19.000000000 -0500
2919+++ linux/include/linux/tqueue.h 2003-04-11 17:02:56.989327128 -0400
2920@@ -94,6 +94,22 @@
2921 extern spinlock_t tqueue_lock;
2922
2923 /*
2924+ * Call all "bottom halfs" on a given list.
2925+ */
2926+
2927+extern void __run_task_queue(task_queue *list);
2928+
2929+static inline void run_task_queue(task_queue *list)
2930+{
2931+ if (TQ_ACTIVE(*list))
2932+ __run_task_queue(list);
2933+}
2934+
2935+#endif /* _LINUX_TQUEUE_H */
2936+
2937+#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2938+#define _LINUX_TQUEUE_H_INLINES
2939+/*
2940 * Queue a task on a tq. Return non-zero if it was successfully
2941 * added.
2942 */
2943@@ -109,17 +125,4 @@
2944 }
2945 return ret;
2946 }
2947-
2948-/*
2949- * Call all "bottom halfs" on a given list.
2950- */
2951-
2952-extern void __run_task_queue(task_queue *list);
2953-
2954-static inline void run_task_queue(task_queue *list)
2955-{
2956- if (TQ_ACTIVE(*list))
2957- __run_task_queue(list);
2958-}
2959-
2960-#endif /* _LINUX_TQUEUE_H */
2961+#endif
2962diff -urN linux-2.4.20/kernel/exit.c linux/kernel/exit.c
2963--- linux-2.4.20/kernel/exit.c 2002-11-28 18:53:15.000000000 -0500
2964+++ linux/kernel/exit.c 2003-04-11 17:03:05.778990896 -0400
2965@@ -282,7 +282,9 @@
2966 current->mm = NULL;
2967 /* active_mm is still 'mm' */
2968 atomic_inc(&mm->mm_count);
2969+ preempt_disable();
2970 enter_lazy_tlb(mm, current, smp_processor_id());
2971+ preempt_enable();
2972 return mm;
2973 }
2974
2975@@ -313,8 +315,8 @@
2976 /* more a memory barrier than a real lock */
2977 task_lock(tsk);
2978 tsk->mm = NULL;
2979- task_unlock(tsk);
2980 enter_lazy_tlb(mm, current, smp_processor_id());
2981+ task_unlock(tsk);
2982 mmput(mm);
2983 }
2984 }
2985@@ -435,6 +437,11 @@
2986 tsk->flags |= PF_EXITING;
2987 del_timer_sync(&tsk->real_timer);
2988
2989+ if (unlikely(preempt_get_count()))
2990+ printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
2991+ current->comm, current->pid,
2992+ preempt_get_count());
2993+
2994 fake_volatile:
2995 #ifdef CONFIG_BSD_PROCESS_ACCT
2996 acct_process(code);
2997diff -urN linux-2.4.20/kernel/fork.c linux/kernel/fork.c
2998--- linux-2.4.20/kernel/fork.c 2002-11-28 18:53:15.000000000 -0500
2999+++ linux/kernel/fork.c 2003-04-11 17:02:57.063315880 -0400
3000@@ -629,6 +629,13 @@
3001 if (p->binfmt && p->binfmt->module)
3002 __MOD_INC_USE_COUNT(p->binfmt->module);
3003
3004+#ifdef CONFIG_PREEMPT
3005+ /*
3006+ * Continue with preemption disabled as part of the context
3007+ * switch, so start with preempt_count set to 1.
3008+ */
3009+ p->preempt_count = 1;
3010+#endif
3011 p->did_exec = 0;
3012 p->swappable = 0;
3013 p->state = TASK_UNINTERRUPTIBLE;
3014diff -urN linux-2.4.20/kernel/ksyms.c linux/kernel/ksyms.c
3015--- linux-2.4.20/kernel/ksyms.c 2002-11-28 18:53:15.000000000 -0500
3016+++ linux/kernel/ksyms.c 2003-04-11 17:02:57.098310560 -0400
3017@@ -450,6 +450,9 @@
3018 EXPORT_SYMBOL(interruptible_sleep_on);
3019 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3020 EXPORT_SYMBOL(schedule);
3021+#ifdef CONFIG_PREEMPT
3022+EXPORT_SYMBOL(preempt_schedule);
3023+#endif
3024 EXPORT_SYMBOL(schedule_timeout);
3025 EXPORT_SYMBOL(yield);
3026 EXPORT_SYMBOL(__cond_resched);
3027diff -urN linux-2.4.20/kernel/sched.c linux/kernel/sched.c
3028--- linux-2.4.20/kernel/sched.c 2002-11-28 18:53:15.000000000 -0500
3029+++ linux/kernel/sched.c 2003-04-11 17:02:57.135304936 -0400
3030@@ -489,7 +489,7 @@
3031 task_lock(prev);
3032 task_release_cpu(prev);
3033 mb();
3034- if (prev->state == TASK_RUNNING)
3035+ if (task_on_runqueue(prev))
3036 goto needs_resched;
3037
3038 out_unlock:
3039@@ -519,7 +519,7 @@
3040 goto out_unlock;
3041
3042 spin_lock_irqsave(&runqueue_lock, flags);
3043- if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
3044+ if (task_on_runqueue(prev) && !task_has_cpu(prev))
3045 reschedule_idle(prev);
3046 spin_unlock_irqrestore(&runqueue_lock, flags);
3047 goto out_unlock;
3048@@ -532,6 +532,7 @@
3049 asmlinkage void schedule_tail(struct task_struct *prev)
3050 {
3051 __schedule_tail(prev);
3052+ preempt_enable();
3053 }
3054
3055 /*
3056@@ -551,9 +552,10 @@
3057 struct list_head *tmp;
3058 int this_cpu, c;
3059
3060-
3061 spin_lock_prefetch(&runqueue_lock);
3062
3063+ preempt_disable();
3064+
3065 BUG_ON(!current->active_mm);
3066 need_resched_back:
3067 prev = current;
3068@@ -581,6 +583,14 @@
3069 move_last_runqueue(prev);
3070 }
3071
3072+#ifdef CONFIG_PREEMPT
3073+ /*
3074+ * entering from preempt_schedule, off a kernel preemption,
3075+ * go straight to picking the next task.
3076+ */
3077+ if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
3078+ goto treat_like_run;
3079+#endif
3080 switch (prev->state) {
3081 case TASK_INTERRUPTIBLE:
3082 if (signal_pending(prev)) {
3083@@ -591,6 +601,9 @@
3084 del_from_runqueue(prev);
3085 case TASK_RUNNING:;
3086 }
3087+#ifdef CONFIG_PREEMPT
3088+ treat_like_run:
3089+#endif
3090 prev->need_resched = 0;
3091
3092 /*
3093@@ -699,9 +712,31 @@
3094 reacquire_kernel_lock(current);
3095 if (current->need_resched)
3096 goto need_resched_back;
3097+ preempt_enable_no_resched();
3098 return;
3099 }
3100
3101+#ifdef CONFIG_PREEMPT
3102+/*
3103+ * this is is the entry point to schedule() from in-kernel preemption
3104+ */
3105+asmlinkage void preempt_schedule(void)
3106+{
3107+ if (unlikely(irqs_disabled()))
3108+ return;
3109+
3110+need_resched:
3111+ current->preempt_count += PREEMPT_ACTIVE;
3112+ schedule();
3113+ current->preempt_count -= PREEMPT_ACTIVE;
3114+
3115+ /* we could miss a preemption opportunity between schedule and now */
3116+ barrier();
3117+ if (unlikely(current->need_resched))
3118+ goto need_resched;
3119+}
3120+#endif /* CONFIG_PREEMPT */
3121+
3122 /*
3123 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
3124 * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
3125@@ -1327,6 +1362,13 @@
3126 sched_data->curr = current;
3127 sched_data->last_schedule = get_cycles();
3128 clear_bit(current->processor, &wait_init_idle);
3129+#ifdef CONFIG_PREEMPT
3130+ /*
3131+ * fix up the preempt_count for non-CPU0 idle threads
3132+ */
3133+ if (current->processor)
3134+ current->preempt_count = 0;
3135+#endif
3136 }
3137
3138 extern void init_timervecs (void);
3139diff -urN linux-2.4.20/kernel/softirq.c linux/kernel/softirq.c
3140--- linux-2.4.20/kernel/softirq.c 2002-11-28 18:53:15.000000000 -0500
3141+++ linux/kernel/softirq.c 2003-04-11 17:03:05.853979496 -0400
3142@@ -60,7 +60,7 @@
3143
3144 asmlinkage void do_softirq()
3145 {
3146- int cpu = smp_processor_id();
3147+ int cpu;
3148 __u32 pending;
3149 unsigned long flags;
3150 __u32 mask;
3151@@ -70,6 +70,8 @@
3152
3153 local_irq_save(flags);
3154
3155+ cpu = smp_processor_id();
3156+
3157 pending = softirq_pending(cpu);
3158
3159 if (pending) {
3160@@ -151,10 +153,11 @@
3161
3162 void __tasklet_schedule(struct tasklet_struct *t)
3163 {
3164- int cpu = smp_processor_id();
3165+ int cpu;
3166 unsigned long flags;
3167
3168 local_irq_save(flags);
3169+ cpu = smp_processor_id();
3170 t->next = tasklet_vec[cpu].list;
3171 tasklet_vec[cpu].list = t;
3172 cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
3173@@ -175,10 +178,11 @@
3174
3175 static void tasklet_action(struct softirq_action *a)
3176 {
3177- int cpu = smp_processor_id();
3178+ int cpu;
3179 struct tasklet_struct *list;
3180
3181 local_irq_disable();
3182+ cpu = smp_processor_id();
3183 list = tasklet_vec[cpu].list;
3184 tasklet_vec[cpu].list = NULL;
3185 local_irq_enable();
3186@@ -209,10 +213,11 @@
3187
3188 static void tasklet_hi_action(struct softirq_action *a)
3189 {
3190- int cpu = smp_processor_id();
3191+ int cpu;
3192 struct tasklet_struct *list;
3193
3194 local_irq_disable();
3195+ cpu = smp_processor_id();
3196 list = tasklet_hi_vec[cpu].list;
3197 tasklet_hi_vec[cpu].list = NULL;
3198 local_irq_enable();
3199diff -urN linux-2.4.20/lib/dec_and_lock.c linux/lib/dec_and_lock.c
3200--- linux-2.4.20/lib/dec_and_lock.c 2001-10-03 12:11:26.000000000 -0400
3201+++ linux/lib/dec_and_lock.c 2003-04-11 17:02:57.173299160 -0400
3202@@ -1,5 +1,6 @@
3203 #include <linux/module.h>
3204 #include <linux/spinlock.h>
3205+#include <linux/sched.h>
3206 #include <asm/atomic.h>
3207
3208 /*
3209diff -urN linux-2.4.20/MAINTAINERS linux/MAINTAINERS
3210--- linux-2.4.20/MAINTAINERS 2002-11-28 18:53:08.000000000 -0500
3211+++ linux/MAINTAINERS 2003-04-11 17:02:57.244288368 -0400
3212@@ -1310,6 +1310,14 @@
3213 M: mostrows@styx.uwaterloo.ca
3214 S: Maintained
3215
3216+PREEMPTIBLE KERNEL
3217+P: Robert M. Love
3218+M: rml@tech9.net
3219+L: linux-kernel@vger.kernel.org
3220+L: kpreempt-tech@lists.sourceforge.net
3221+W: http://tech9.net/rml/linux
3222+S: Supported
3223+
3224 PROMISE DC4030 CACHING DISK CONTROLLER DRIVER
3225 P: Peter Denison
3226 M: promise@pnd-pc.demon.co.uk
3227diff -urN linux-2.4.20/mm/slab.c linux/mm/slab.c
3228--- linux-2.4.20/mm/slab.c 2002-11-28 18:53:15.000000000 -0500
3229+++ linux/mm/slab.c 2003-04-11 17:03:05.946965360 -0400
3230@@ -49,7 +49,8 @@
3231 * constructors and destructors are called without any locking.
3232 * Several members in kmem_cache_t and slab_t never change, they
3233 * are accessed without any locking.
3234- * The per-cpu arrays are never accessed from the wrong cpu, no locking.
3235+ * The per-cpu arrays are never accessed from the wrong cpu, no locking,
3236+ * and local interrupts are disabled so slab code is preempt-safe.
3237 * The non-constant members are protected with a per-cache irq spinlock.
3238 *
3239 * Further notes from the original documentation:
3240@@ -858,12 +859,14 @@
3241 */
3242 static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
3243 {
3244+ preempt_disable();
3245 local_irq_disable();
3246 func(arg);
3247 local_irq_enable();
3248
3249 if (smp_call_function(func, arg, 1, 1))
3250 BUG();
3251+ preempt_enable();
3252 }
3253 typedef struct ccupdate_struct_s
3254 {
3255diff -urN linux-2.4.20/net/core/dev.c linux/net/core/dev.c
3256--- linux-2.4.20/net/core/dev.c 2002-11-28 18:53:15.000000000 -0500
3257+++ linux/net/core/dev.c 2003-04-11 17:03:06.026953200 -0400
3258@@ -1049,9 +1049,15 @@
3259 int cpu = smp_processor_id();
3260
3261 if (dev->xmit_lock_owner != cpu) {
3262+ /*
3263+ * The spin_lock effectivly does a preempt lock, but
3264+ * we are about to drop that...
3265+ */
3266+ preempt_disable();
3267 spin_unlock(&dev->queue_lock);
3268 spin_lock(&dev->xmit_lock);
3269 dev->xmit_lock_owner = cpu;
3270+ preempt_enable();
3271
3272 if (!netif_queue_stopped(dev)) {
3273 if (netdev_nit)
3274@@ -1230,7 +1236,7 @@
3275
3276 int netif_rx(struct sk_buff *skb)
3277 {
3278- int this_cpu = smp_processor_id();
3279+ int this_cpu;
3280 struct softnet_data *queue;
3281 unsigned long flags;
3282
3283@@ -1240,9 +1246,10 @@
3284 /* The code is rearranged so that the path is the most
3285 short when CPU is congested, but is still operating.
3286 */
3287- queue = &softnet_data[this_cpu];
3288
3289 local_irq_save(flags);
3290+ this_cpu = smp_processor_id();
3291+ queue = &softnet_data[this_cpu];
3292
3293 netdev_rx_stat[this_cpu].total++;
3294 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
3295diff -urN linux-2.4.20/net/core/skbuff.c linux/net/core/skbuff.c
3296--- linux-2.4.20/net/core/skbuff.c 2002-08-02 20:39:46.000000000 -0400
3297+++ linux/net/core/skbuff.c 2003-04-11 17:02:57.333274840 -0400
3298@@ -111,33 +111,37 @@
3299
3300 static __inline__ struct sk_buff *skb_head_from_pool(void)
3301 {
3302- struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
3303+ struct sk_buff_head *list;
3304+ struct sk_buff *skb = NULL;
3305+ unsigned long flags;
3306
3307- if (skb_queue_len(list)) {
3308- struct sk_buff *skb;
3309- unsigned long flags;
3310+ local_irq_save(flags);
3311
3312- local_irq_save(flags);
3313+ list = &skb_head_pool[smp_processor_id()].list;
3314+
3315+ if (skb_queue_len(list))
3316 skb = __skb_dequeue(list);
3317- local_irq_restore(flags);
3318- return skb;
3319- }
3320- return NULL;
3321+
3322+ local_irq_restore(flags);
3323+ return skb;
3324 }
3325
3326 static __inline__ void skb_head_to_pool(struct sk_buff *skb)
3327 {
3328- struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
3329+ struct sk_buff_head *list;
3330+ unsigned long flags;
3331
3332- if (skb_queue_len(list) < sysctl_hot_list_len) {
3333- unsigned long flags;
3334+ local_irq_save(flags);
3335+ list = &skb_head_pool[smp_processor_id()].list;
3336
3337- local_irq_save(flags);
3338+ if (skb_queue_len(list) < sysctl_hot_list_len) {
3339 __skb_queue_head(list, skb);
3340 local_irq_restore(flags);
3341
3342 return;
3343 }
3344+
3345+ local_irq_restore(flags);
3346 kmem_cache_free(skbuff_head_cache, skb);
3347 }
3348
3349diff -urN linux-2.4.20/net/socket.c linux/net/socket.c
3350--- linux-2.4.20/net/socket.c 2002-11-28 18:53:16.000000000 -0500
3351+++ linux/net/socket.c 2003-04-11 17:02:57.374268608 -0400
3352@@ -132,7 +132,7 @@
3353
3354 static struct net_proto_family *net_families[NPROTO];
3355
3356-#ifdef CONFIG_SMP
3357+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
3358 static atomic_t net_family_lockct = ATOMIC_INIT(0);
3359 static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
3360
3361diff -urN linux-2.4.20/net/sunrpc/pmap_clnt.c linux/net/sunrpc/pmap_clnt.c
3362--- linux-2.4.20/net/sunrpc/pmap_clnt.c 2002-08-02 20:39:46.000000000 -0400
3363+++ linux/net/sunrpc/pmap_clnt.c 2003-04-11 17:02:57.409263288 -0400
3364@@ -12,6 +12,7 @@
3365 #include <linux/config.h>
3366 #include <linux/types.h>
3367 #include <linux/socket.h>
3368+#include <linux/sched.h>
3369 #include <linux/kernel.h>
3370 #include <linux/errno.h>
3371 #include <linux/uio.h>
This page took 0.650562 seconds and 4 git commands to generate.