2 Documentation/Configure.help | 11 ++++
3 Documentation/preempt-locking.txt | 104 ++++++++++++++++++++++++++++++++++++++
6 arch/arm/kernel/entry-armv.S | 40 ++++++++++++++
7 arch/arm/tools/getconstants.c | 6 ++
8 arch/i386/config.in | 8 ++
9 arch/i386/kernel/entry.S | 49 +++++++++++++++++
10 arch/i386/kernel/i387.c | 3 +
11 arch/i386/kernel/smp.c | 24 ++++++--
12 arch/i386/kernel/traps.c | 2
13 arch/i386/lib/dec_and_lock.c | 1
14 arch/mips/config-shared.in | 1
15 arch/mips/kernel/i8259.c | 1
16 arch/mips/kernel/irq.c | 29 ++++++++++
17 arch/mips/mm/extable.c | 1
18 arch/ppc/config.in | 2
19 arch/ppc/kernel/entry.S | 35 ++++++++++++
20 arch/ppc/kernel/irq.c | 28 ++++++++++
21 arch/ppc/kernel/mk_defs.c | 3 +
22 arch/ppc/kernel/setup.c | 14 +++++
23 arch/ppc/lib/dec_and_lock.c | 1
25 arch/sh/kernel/entry.S | 104 +++++++++++++++++++++++++++++++++++---
26 arch/sh/kernel/irq.c | 17 ++++++
27 drivers/ieee1394/csr.c | 1
28 drivers/sound/sound_core.c | 1
33 include/asm-arm/dma.h | 1
34 include/asm-arm/hardirq.h | 1
35 include/asm-arm/pgalloc.h | 8 ++
36 include/asm-arm/smplock.h | 11 ++++
37 include/asm-arm/softirq.h | 8 +-
38 include/asm-arm/system.h | 7 ++
39 include/asm-i386/hardirq.h | 14 +++--
40 include/asm-i386/highmem.h | 7 ++
41 include/asm-i386/hw_irq.h | 19 +++++-
42 include/asm-i386/i387.h | 3 -
43 include/asm-i386/pgalloc.h | 12 ++++
44 include/asm-i386/smplock.h | 14 +++++
45 include/asm-i386/softirq.h | 8 +-
46 include/asm-i386/spinlock.h | 18 +++---
47 include/asm-i386/system.h | 7 ++
48 include/asm-mips/smplock.h | 15 +++++
49 include/asm-mips/softirq.h | 3 +
50 include/asm-mips/system.h | 14 +++++
51 include/asm-ppc/dma.h | 1
52 include/asm-ppc/hardirq.h | 1
53 include/asm-ppc/highmem.h | 6 +-
54 include/asm-ppc/hw_irq.h | 6 ++
55 include/asm-ppc/mmu_context.h | 4 +
56 include/asm-ppc/pgalloc.h | 9 +++
57 include/asm-ppc/smplock.h | 14 +++++
58 include/asm-ppc/softirq.h | 10 +++
59 include/asm-sh/hardirq.h | 2
60 include/asm-sh/smplock.h | 85 ++++++++++++++++++++++++++++---
61 include/asm-sh/softirq.h | 3 +
62 include/asm-sh/system.h | 13 ++++
63 include/linux/brlock.h | 10 +--
64 include/linux/dcache.h | 56 +++++++++++---------
65 include/linux/fs_struct.h | 13 +++-
66 include/linux/sched.h | 12 ++++
67 include/linux/smp_lock.h | 2
68 include/linux/spinlock.h | 82 +++++++++++++++++++++++++++--
69 include/linux/tqueue.h | 31 ++++++-----
73 kernel/sched.c | 48 ++++++++++++++++-
74 lib/dec_and_lock.c | 1
77 net/core/skbuff.c | 30 ++++++----
79 net/sunrpc/pmap_clnt.c | 1
80 79 files changed, 1011 insertions(+), 131 deletions(-)
83 diff -urN linux-2.4.20/arch/arm/config.in linux/arch/arm/config.in
84 --- linux-2.4.20/arch/arm/config.in 2002-11-28 18:53:09.000000000 -0500
85 +++ linux/arch/arm/config.in 2002-12-11 02:34:47.000000000 -0500
88 define_bool CONFIG_DISCONTIGMEM n
91 +dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_CPU_32
94 mainmenu_option next_comment
95 diff -urN linux-2.4.20/arch/arm/kernel/entry-armv.S linux/arch/arm/kernel/entry-armv.S
96 --- linux-2.4.20/arch/arm/kernel/entry-armv.S 2002-08-02 20:39:42.000000000 -0400
97 +++ linux/arch/arm/kernel/entry-armv.S 2002-12-11 02:34:47.000000000 -0500
101 stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
102 +#ifdef CONFIG_PREEMPT
103 + get_current_task r9
104 + ldr r8, [r9, #TSK_PREEMPT]
106 + str r8, [r9, #TSK_PREEMPT]
108 1: get_irqnr_and_base r0, r6, r5, lr
115 +#ifdef CONFIG_PREEMPT
116 +2: ldr r8, [r9, #TSK_PREEMPT]
119 + ldr r7, [r9, #TSK_NEED_RESCHED]
123 + ldr r0, [r6, #IRQSTAT_BH_COUNT]
127 + msr cpsr_c, r0 @ enable interrupts
128 + bl SYMBOL_NAME(preempt_schedule)
129 + mov r0, #I_BIT | MODE_SVC
130 + msr cpsr_c, r0 @ disable interrupts
132 +3: str r8, [r9, #TSK_PREEMPT]
134 ldr r0, [sp, #S_PSR] @ irqs are already disabled
136 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
138 .LCprocfns: .word SYMBOL_NAME(processor)
140 .LCfp: .word SYMBOL_NAME(fp_enter)
141 +#ifdef CONFIG_PREEMPT
142 +.LCirqstat: .word SYMBOL_NAME(irq_stat)
149 alignment_trap r4, r7, __temp_irq
151 + get_current_task tsk
152 +#ifdef CONFIG_PREEMPT
153 + ldr r0, [tsk, #TSK_PREEMPT]
155 + str r0, [tsk, #TSK_PREEMPT]
157 1: get_irqnr_and_base r0, r6, r5, lr
161 @ routine called with r0 = irq number, r1 = struct pt_regs *
164 +#ifdef CONFIG_PREEMPT
165 + ldr r0, [tsk, #TSK_PREEMPT]
167 + str r0, [tsk, #TSK_PREEMPT]
170 - get_current_task tsk
174 diff -urN linux-2.4.20/arch/arm/tools/getconstants.c linux/arch/arm/tools/getconstants.c
175 --- linux-2.4.20/arch/arm/tools/getconstants.c 2001-10-11 12:04:57.000000000 -0400
176 +++ linux/arch/arm/tools/getconstants.c 2002-12-11 02:34:47.000000000 -0500
179 #include <asm/pgtable.h>
180 #include <asm/uaccess.h>
181 +#include <asm/hardirq.h>
184 * Make sure that the compiler and target are compatible.
186 DEFN("TSS_SAVE", OFF_TSK(thread.save));
187 DEFN("TSS_FPESAVE", OFF_TSK(thread.fpstate.soft.save));
189 +#ifdef CONFIG_PREEMPT
190 +DEFN("TSK_PREEMPT", OFF_TSK(preempt_count));
191 +DEFN("IRQSTAT_BH_COUNT", (unsigned long)&(((irq_cpustat_t *)0)->__local_bh_count));
195 DEFN("TSS_DOMAIN", OFF_TSK(thread.domain));
197 diff -urN linux-2.4.20/arch/i386/config.in linux/arch/i386/config.in
198 --- linux-2.4.20/arch/i386/config.in 2002-11-28 18:53:09.000000000 -0500
199 +++ linux/arch/i386/config.in 2002-12-11 02:34:47.000000000 -0500
201 bool 'Math emulation' CONFIG_MATH_EMULATION
202 bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
203 bool 'Symmetric multi-processing support' CONFIG_SMP
204 +bool 'Preemptible Kernel' CONFIG_PREEMPT
205 if [ "$CONFIG_SMP" != "y" ]; then
206 bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
207 dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
209 define_bool CONFIG_X86_TSC y
212 -if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
213 - define_bool CONFIG_HAVE_DEC_LOCK y
214 +if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then
215 + if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then
216 + define_bool CONFIG_HAVE_DEC_LOCK y
222 mainmenu_option next_comment
223 diff -urN linux-2.4.20/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
224 --- linux-2.4.20/arch/i386/kernel/entry.S 2002-11-28 18:53:09.000000000 -0500
225 +++ linux/arch/i386/kernel/entry.S 2002-12-11 02:34:47.000000000 -0500
227 * these are offsets into the task-struct.
239 +/* These are offsets into the irq_stat structure
240 + * There is one per cpu and it is aligned to 32
241 + * byte boundry (we put that here as a shift count)
243 +irq_array_shift = CONFIG_X86_L1_CACHE_SHIFT
245 +irq_stat_local_irq_count = 4
246 +irq_stat_local_bh_count = 8
251 +#define GET_CPU_INDX movl processor(%ebx),%eax; \
252 + shll $irq_array_shift,%eax
253 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \
255 +#define CPU_INDX (,%eax)
257 +#define GET_CPU_INDX
258 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx)
264 @@ -255,12 +275,30 @@
268 +#ifdef CONFIG_PREEMPT
270 + decl preempt_count(%ebx)
273 movl EFLAGS(%esp),%eax # mix EFLAGS and CS
275 testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
276 jne ret_from_sys_call
277 +#ifdef CONFIG_PREEMPT
278 + cmpl $0,preempt_count(%ebx)
280 + cmpl $0,need_resched(%ebx)
282 + movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
283 + addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
285 + incl preempt_count(%ebx)
287 + call SYMBOL_NAME(preempt_schedule)
299 +#ifdef CONFIG_PREEMPT
302 jmp ret_from_exception
304 ENTRY(coprocessor_error)
305 @@ -316,12 +357,18 @@
307 testl $0x4,%eax # EM (math emulation bit)
308 jne device_not_available_emulate
309 +#ifdef CONFIG_PREEMPT
312 call SYMBOL_NAME(math_state_restore)
313 jmp ret_from_exception
314 device_not_available_emulate:
315 pushl $0 # temporary storage for ORIG_EIP
316 call SYMBOL_NAME(math_emulate)
318 +#ifdef CONFIG_PREEMPT
321 jmp ret_from_exception
324 diff -urN linux-2.4.20/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c
325 --- linux-2.4.20/arch/i386/kernel/i387.c 2002-08-02 20:39:42.000000000 -0400
326 +++ linux/arch/i386/kernel/i387.c 2002-12-11 02:34:47.000000000 -0500
329 #include <linux/config.h>
330 #include <linux/sched.h>
331 +#include <linux/spinlock.h>
332 #include <linux/init.h>
333 #include <asm/processor.h>
334 #include <asm/i387.h>
337 struct task_struct *tsk = current;
341 if (tsk->flags & PF_USEDFPU) {
342 __save_init_fpu(tsk);
344 diff -urN linux-2.4.20/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c
345 --- linux-2.4.20/arch/i386/kernel/smp.c 2002-11-28 18:53:09.000000000 -0500
346 +++ linux/arch/i386/kernel/smp.c 2002-12-11 02:34:47.000000000 -0500
347 @@ -357,10 +357,13 @@
349 asmlinkage void smp_invalidate_interrupt (void)
351 - unsigned long cpu = smp_processor_id();
356 + cpu = smp_processor_id();
357 if (!test_bit(cpu, &flush_cpumask))
361 * This was a BUG() but until someone can quote me the
362 * line from the intel manual that guarantees an IPI to
366 clear_bit(cpu, &flush_cpumask);
371 static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
372 @@ -430,17 +435,22 @@
373 void flush_tlb_current_task(void)
375 struct mm_struct *mm = current->mm;
376 - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
377 + unsigned long cpu_mask;
380 + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
383 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
387 void flush_tlb_mm (struct mm_struct * mm)
389 - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
390 + unsigned long cpu_mask;
393 + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
394 if (current->active_mm == mm) {
397 @@ -449,13 +459,16 @@
400 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
404 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
406 struct mm_struct *mm = vma->vm_mm;
407 - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
408 + unsigned long cpu_mask;
411 + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
412 if (current->active_mm == mm) {
418 flush_tlb_others(cpu_mask, mm, va);
422 static inline void do_flush_tlb_all_local(void)
423 diff -urN linux-2.4.20/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
424 --- linux-2.4.20/arch/i386/kernel/traps.c 2002-11-28 18:53:09.000000000 -0500
425 +++ linux/arch/i386/kernel/traps.c 2002-12-11 02:34:47.000000000 -0500
428 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
429 * Don't touch unless you *really* know how it works.
431 + * Must be called with kernel preemption disabled.
433 asmlinkage void math_state_restore(struct pt_regs regs)
435 diff -urN linux-2.4.20/arch/i386/lib/dec_and_lock.c linux/arch/i386/lib/dec_and_lock.c
436 --- linux-2.4.20/arch/i386/lib/dec_and_lock.c 2000-07-07 21:20:16.000000000 -0400
437 +++ linux/arch/i386/lib/dec_and_lock.c 2002-12-11 02:34:47.000000000 -0500
441 #include <linux/spinlock.h>
442 +#include <linux/sched.h>
443 #include <asm/atomic.h>
445 int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
446 diff -urN linux-2.4.20/arch/mips/config-shared.in linux/arch/mips/config-shared.in
447 --- linux-2.4.20/arch/mips/config-shared.in 2002-11-28 18:53:09.000000000 -0500
448 +++ linux/arch/mips/config-shared.in 2002-12-11 02:34:47.000000000 -0500
450 define_bool CONFIG_HOTPLUG_PCI n
453 +dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_NEW_IRQ
454 bool 'System V IPC' CONFIG_SYSVIPC
455 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
456 bool 'Sysctl support' CONFIG_SYSCTL
457 diff -urN linux-2.4.20/arch/mips/kernel/i8259.c linux/arch/mips/kernel/i8259.c
458 --- linux-2.4.20/arch/mips/kernel/i8259.c 2002-11-28 18:53:10.000000000 -0500
459 +++ linux/arch/mips/kernel/i8259.c 2002-12-11 02:34:47.000000000 -0500
461 * Copyright (C) 1992 Linus Torvalds
462 * Copyright (C) 1994 - 2000 Ralf Baechle
464 +#include <linux/sched.h>
465 #include <linux/delay.h>
466 #include <linux/init.h>
467 #include <linux/ioport.h>
468 diff -urN linux-2.4.20/arch/mips/kernel/irq.c linux/arch/mips/kernel/irq.c
469 --- linux-2.4.20/arch/mips/kernel/irq.c 2002-11-28 18:53:10.000000000 -0500
470 +++ linux/arch/mips/kernel/irq.c 2002-12-11 02:34:47.000000000 -0500
472 * Copyright (C) 1992 Linus Torvalds
473 * Copyright (C) 1994 - 2000 Ralf Baechle
476 +#include <linux/sched.h>
477 #include <linux/config.h>
478 #include <linux/kernel.h>
479 #include <linux/delay.h>
481 #include <linux/slab.h>
482 #include <linux/mm.h>
483 #include <linux/random.h>
484 -#include <linux/sched.h>
485 +#include <linux/spinlock.h>
486 +#include <linux/ptrace.h>
488 #include <asm/atomic.h>
489 #include <asm/system.h>
490 #include <asm/uaccess.h>
491 +#include <asm/debug.h>
494 * Controller mappings for all interrupt sources:
496 struct irqaction * action;
501 kstat.irqs[cpu][irq]++;
502 spin_lock(&desc->lock);
503 desc->handler->ack(irq);
506 if (softirq_pending(cpu))
509 +#if defined(CONFIG_PREEMPT)
510 + while (--current->preempt_count == 0) {
511 + db_assert(intr_off());
512 + db_assert(!in_interrupt());
514 + if (current->need_resched == 0) {
518 + current->preempt_count ++;
520 + if (user_mode(regs)) {
523 + preempt_schedule();
532 diff -urN linux-2.4.20/arch/mips/mm/extable.c linux/arch/mips/mm/extable.c
533 --- linux-2.4.20/arch/mips/mm/extable.c 2002-11-28 18:53:10.000000000 -0500
534 +++ linux/arch/mips/mm/extable.c 2002-12-11 02:34:47.000000000 -0500
537 #include <linux/config.h>
538 #include <linux/module.h>
539 +#include <linux/sched.h>
540 #include <linux/spinlock.h>
541 #include <asm/uaccess.h>
543 diff -urN linux-2.4.20/arch/ppc/config.in linux/arch/ppc/config.in
544 --- linux-2.4.20/arch/ppc/config.in 2002-11-28 18:53:11.000000000 -0500
545 +++ linux/arch/ppc/config.in 2002-12-11 02:34:47.000000000 -0500
547 bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
550 +bool 'Preemptible kernel support' CONFIG_PREEMPT
552 if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ];then
553 bool 'AltiVec Support' CONFIG_ALTIVEC
554 bool 'Thermal Management Support' CONFIG_TAU
555 diff -urN linux-2.4.20/arch/ppc/kernel/entry.S linux/arch/ppc/kernel/entry.S
556 --- linux-2.4.20/arch/ppc/kernel/entry.S 2002-11-28 18:53:11.000000000 -0500
557 +++ linux/arch/ppc/kernel/entry.S 2002-12-11 02:34:47.000000000 -0500
562 +#ifdef CONFIG_PREEMPT
563 + lwz r3,PREEMPT_COUNT(r2)
565 + bge ret_from_except
569 + lwz r5,NEED_RESCHED(r2)
571 + beq ret_from_except
573 + ori r3,r3,irq_stat@l
578 + bne ret_from_except
579 + lwz r3,PREEMPT_COUNT(r2)
581 + stw r3,PREEMPT_COUNT(r2)
586 + bl preempt_schedule
588 + rlwinm r0,r0,0,17,15
591 + lwz r3,PREEMPT_COUNT(r2)
593 + stw r3,PREEMPT_COUNT(r2)
595 + b ret_from_intercept
596 +#endif /* CONFIG_PREEMPT */
597 .globl ret_from_except
599 lwz r3,_MSR(r1) /* Returning to user mode? */
600 diff -urN linux-2.4.20/arch/ppc/kernel/irq.c linux/arch/ppc/kernel/irq.c
601 --- linux-2.4.20/arch/ppc/kernel/irq.c 2002-11-28 18:53:11.000000000 -0500
602 +++ linux/arch/ppc/kernel/irq.c 2002-12-11 02:34:47.000000000 -0500
604 return 1; /* lets ret_from_int know we can do checks */
607 +#ifdef CONFIG_PREEMPT
609 +preempt_intercept(struct pt_regs *regs)
615 + switch(regs->trap) {
617 + ret = do_IRQ(regs);
624 + ret = timer_interrupt(regs);
633 +#endif /* CONFIG_PREEMPT */
635 unsigned long probe_irq_on (void)
638 diff -urN linux-2.4.20/arch/ppc/kernel/mk_defs.c linux/arch/ppc/kernel/mk_defs.c
639 --- linux-2.4.20/arch/ppc/kernel/mk_defs.c 2001-08-28 09:58:33.000000000 -0400
640 +++ linux/arch/ppc/kernel/mk_defs.c 2002-12-11 02:34:47.000000000 -0500
642 DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
643 DEFINE(THREAD, offsetof(struct task_struct, thread));
644 DEFINE(MM, offsetof(struct task_struct, mm));
645 +#ifdef CONFIG_PREEMPT
646 + DEFINE(PREEMPT_COUNT, offsetof(struct task_struct, preempt_count));
648 DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm));
649 DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
650 DEFINE(KSP, offsetof(struct thread_struct, ksp));
651 diff -urN linux-2.4.20/arch/ppc/kernel/setup.c linux/arch/ppc/kernel/setup.c
652 --- linux-2.4.20/arch/ppc/kernel/setup.c 2002-11-28 18:53:11.000000000 -0500
653 +++ linux/arch/ppc/kernel/setup.c 2002-12-11 02:34:47.000000000 -0500
655 strcpy(cmd_line, CONFIG_CMDLINE);
656 #endif /* CONFIG_CMDLINE */
658 +#ifdef CONFIG_PREEMPT
659 + /* Override the irq routines for external & timer interrupts here,
660 + * as the MMU has only been minimally setup at this point and
661 + * there are no protections on page zero.
664 + extern int preempt_intercept(struct pt_regs *);
666 + do_IRQ_intercept = (unsigned long) &preempt_intercept;
667 + timer_interrupt_intercept = (unsigned long) &preempt_intercept;
670 +#endif /* CONFIG_PREEMPT */
672 platform_init(r3, r4, r5, r6, r7);
675 diff -urN linux-2.4.20/arch/ppc/lib/dec_and_lock.c linux/arch/ppc/lib/dec_and_lock.c
676 --- linux-2.4.20/arch/ppc/lib/dec_and_lock.c 2001-11-16 13:10:08.000000000 -0500
677 +++ linux/arch/ppc/lib/dec_and_lock.c 2002-12-11 02:34:47.000000000 -0500
679 #include <linux/module.h>
680 +#include <linux/sched.h>
681 #include <linux/spinlock.h>
682 #include <asm/atomic.h>
683 #include <asm/system.h>
684 diff -urN linux-2.4.20/arch/sh/config.in linux/arch/sh/config.in
685 --- linux-2.4.20/arch/sh/config.in 2002-11-28 18:53:11.000000000 -0500
686 +++ linux/arch/sh/config.in 2002-12-11 02:34:47.000000000 -0500
688 hex 'Physical memory start address' CONFIG_MEMORY_START 08000000
689 hex 'Physical memory size' CONFIG_MEMORY_SIZE 00400000
691 +bool 'Preemptible Kernel' CONFIG_PREEMPT
694 if [ "$CONFIG_SH_HP690" = "y" ]; then
695 diff -urN linux-2.4.20/arch/sh/kernel/entry.S linux/arch/sh/kernel/entry.S
696 --- linux-2.4.20/arch/sh/kernel/entry.S 2002-08-02 20:39:43.000000000 -0400
697 +++ linux/arch/sh/kernel/entry.S 2002-12-11 02:34:47.000000000 -0500
700 * These are offsets into the task-struct.
710 + * These offsets are into irq_stat.
711 + * (Find irq_cpustat_t in asm-sh/hardirq.h)
716 PT_TRACESYS = 0x00000002
717 PF_USEDFPU = 0x00100000
719 mov.l __INV_IMASK, r11; \
722 - stc k_g_imask, r11; \
723 + stc k_g_imask, r11; \
728 mov.l @(tsk_ptrace,r0), r0 ! Is current PTRACE_SYSCALL'd?
731 - bt ret_from_syscall
732 - bra syscall_ret_trace
733 + bf syscall_ret_trace
734 + bra ret_from_syscall
739 .long syscall_ret_trace
743 - .long 0xffffff0f ! ~(IMASK)
749 1: .long SYMBOL_NAME(schedule)
751 +#ifdef CONFIG_PREEMPT
753 + ! Returning from interrupt during kernel mode: check if
754 + ! preempt_schedule should be called. If need_resched flag
755 + ! is set, preempt_count is zero, and we're not currently
756 + ! in an interrupt handler (local irq or bottom half) then
757 + ! call preempt_schedule.
759 + ! Increment preempt_count to prevent a nested interrupt
760 + ! from reentering preempt_schedule, then decrement after
761 + ! and drop through to regular interrupt return which will
762 + ! jump back and check again in case such an interrupt did
763 + ! come in (and didn't preempt due to preempt_count).
765 + ! NOTE: because we just checked that preempt_count was
766 + ! zero before getting to the call, can't we use immediate
767 + ! values (1 and 0) rather than inc/dec? Also, rather than
768 + ! drop through to ret_from_irq, we already know this thread
769 + ! is kernel mode, can't we go direct to ret_from_kirq? In
770 + ! fact, with proper interrupt nesting and so forth could
771 + ! the loop simply be on the need_resched w/o checking the
772 + ! other stuff again? Optimize later...
776 + ! Nonzero preempt_count prevents scheduling
778 + mov.l @(preempt_count,r1), r0
781 + ! Zero need_resched prevents scheduling
782 + mov.l @(need_resched,r1), r0
785 + ! If in_interrupt(), don't schedule
786 + mov.l __irq_stat, r1
787 + mov.l @(local_irq_count,r1), r0
788 + mov.l @(local_bh_count,r1), r1
792 + ! Allow scheduling using preempt_schedule
793 + ! Adjust preempt_count and SR as needed.
795 + mov.l @(preempt_count,r1), r0 ! Could replace this ...
796 + add #1, r0 ! ... and this w/mov #1?
797 + mov.l r0, @(preempt_count,r1)
799 + mov.l __preempt_schedule, r0
808 + mov.l @(preempt_count,r1), r0 ! Could replace this ...
809 + add #-1, r0 ! ... and this w/mov #0?
810 + mov.l r0, @(preempt_count,r1)
811 + ! Maybe should bra ret_from_kirq, or loop over need_resched?
812 + ! For now, fall through to ret_from_irq again...
813 +#endif /* CONFIG_PREEMPT */
817 + mov.l @(r0,r15), r0 ! get status register
819 + shll r0 ! kernel space?
820 +#ifndef CONFIG_PREEMPT
821 + bt restore_all ! Yes, it's from kernel, go back soon
822 +#else /* CONFIG_PREEMPT */
823 + bt ret_from_kirq ! From kernel: maybe preempt_schedule
824 +#endif /* CONFIG_PREEMPT */
826 + bra ret_from_syscall
831 mov.l @(r0,r15), r0 ! get status register
833 .long SYMBOL_NAME(do_signal)
835 .long SYMBOL_NAME(irq_stat)
836 +#ifdef CONFIG_PREEMPT
838 + .long SYMBOL_NAME(preempt_schedule)
839 +#endif /* CONFIG_PREEMPT */
841 + .long 0xffffff0f ! ~(IMASK)
848 .long SYMBOL_NAME(fpu_prepare_fd)
850 - .long SYMBOL_NAME(init_task_union)+4
851 + .long SYMBOL_NAME(init_task_union)+flags
855 diff -urN linux-2.4.20/arch/sh/kernel/irq.c linux/arch/sh/kernel/irq.c
856 --- linux-2.4.20/arch/sh/kernel/irq.c 2001-09-08 15:29:09.000000000 -0400
857 +++ linux/arch/sh/kernel/irq.c 2002-12-11 02:34:47.000000000 -0500
859 struct irqaction * action;
863 + * At this point we're now about to actually call handlers,
864 + * and interrupts might get reenabled during them... bump
865 + * preempt_count to prevent any preemption while the handler
866 + * called here is pending...
871 asm volatile("stc r2_bank, %0\n\t"
874 desc->handler->end(irq);
875 spin_unlock(&desc->lock);
878 if (softirq_pending(cpu))
882 + * We're done with the handlers, interrupts should be
883 + * currently disabled; decrement preempt_count now so
884 + * as we return preemption may be allowed...
886 + preempt_enable_no_resched();
891 diff -urN linux-2.4.20/CREDITS linux/CREDITS
892 --- linux-2.4.20/CREDITS 2002-11-28 18:53:08.000000000 -0500
893 +++ linux/CREDITS 2002-12-11 02:34:47.000000000 -0500
894 @@ -1001,8 +1001,8 @@
899 D: Interrupt-driven printer driver
900 +D: Preemptible kernel
902 S: Mountain View, California 94040
904 diff -urN linux-2.4.20/Documentation/Configure.help linux/Documentation/Configure.help
905 --- linux-2.4.20/Documentation/Configure.help 2002-11-28 18:53:08.000000000 -0500
906 +++ linux/Documentation/Configure.help 2002-12-11 02:34:47.000000000 -0500
908 If you have a system with several CPUs, you do not need to say Y
909 here: the local APIC will be used automatically.
913 + This option reduces the latency of the kernel when reacting to
914 + real-time or interactive events by allowing a low priority process to
915 + be preempted even if it is in kernel mode executing a system call.
916 + This allows applications to run more reliably even when the system is
919 + Say Y here if you are building a kernel for a desktop, embedded or
920 + real-time system. Say N if you are unsure.
922 Kernel math emulation
923 CONFIG_MATH_EMULATION
924 Linux can emulate a math coprocessor (used for floating point
925 diff -urN linux-2.4.20/Documentation/preempt-locking.txt linux/Documentation/preempt-locking.txt
926 --- linux-2.4.20/Documentation/preempt-locking.txt 1969-12-31 19:00:00.000000000 -0500
927 +++ linux/Documentation/preempt-locking.txt 2002-12-11 02:34:47.000000000 -0500
929 + Proper Locking Under a Preemptible Kernel:
930 + Keeping Kernel Code Preempt-Safe
931 + Robert Love <rml@tech9.net>
932 + Last Updated: 22 Jan 2002
938 +A preemptible kernel creates new locking issues. The issues are the same as
939 +those under SMP: concurrency and reentrancy. Thankfully, the Linux preemptible
940 +kernel model leverages existing SMP locking mechanisms. Thus, the kernel
941 +requires explicit additional locking for very few additional situations.
943 +This document is for all kernel hackers. Developing code in the kernel
944 +requires protecting these situations.
947 +RULE #1: Per-CPU data structures need explicit protection
950 +Two similar problems arise. An example code snippet:
952 + struct this_needs_locking tux[NR_CPUS];
953 + tux[smp_processor_id()] = some_value;
954 + /* task is preempted here... */
955 + something = tux[smp_processor_id()];
957 +First, since the data is per-CPU, it may not have explicit SMP locking, but
958 +require it otherwise. Second, when a preempted task is finally rescheduled,
959 +the previous value of smp_processor_id may not equal the current. You must
960 +protect these situations by disabling preemption around them.
963 +RULE #2: CPU state must be protected.
966 +Under preemption, the state of the CPU must be protected. This is arch-
967 +dependent, but includes CPU structures and state not preserved over a context
968 +switch. For example, on x86, entering and exiting FPU mode is now a critical
969 +section that must occur while preemption is disabled. Think what would happen
970 +if the kernel is executing a floating-point instruction and is then preempted.
971 +Remember, the kernel does not save FPU state except for user tasks. Therefore,
972 +upon preemption, the FPU registers will be sold to the lowest bidder. Thus,
973 +preemption must be disabled around such regions.
975 +Note, some FPU functions are already explicitly preempt safe. For example,
976 +kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
977 +However, math_state_restore must be called with preemption disabled.
980 +RULE #3: Lock acquire and release must be performed by same task
983 +A lock acquired in one task must be released by the same task. This
984 +means you can't do oddball things like acquire a lock and go off to
985 +play while another task releases it. If you want to do something
986 +like this, acquire and release the task in the same code path and
987 +have the caller wait on an event by the other task.
993 +Data protection under preemption is achieved by disabling preemption for the
994 +duration of the critical region.
996 +preempt_enable() decrement the preempt counter
997 +preempt_disable() increment the preempt counter
998 +preempt_enable_no_resched() decrement, but do not immediately preempt
999 +preempt_get_count() return the preempt counter
1001 +The functions are nestable. In other words, you can call preempt_disable
1002 +n-times in a code path, and preemption will not be reenabled until the n-th
1003 +call to preempt_enable. The preempt statements define to nothing if
1004 +preemption is not enabled.
1006 +Note that you do not need to explicitly prevent preemption if you are holding
1007 +any locks or interrupts are disabled, since preemption is implicitly disabled
1012 + cpucache_t *cc; /* this is per-CPU */
1013 + preempt_disable();
1014 + cc = cc_data(searchp);
1015 + if (cc && cc->avail) {
1016 + __free_block(searchp, cc_entry(cc), cc->avail);
1022 +Notice how the preemption statements must encompass every reference of the
1023 +critical variables. Another example:
1027 + if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n");
1028 + spin_lock(&buf_lock);
1031 +This code is not preempt-safe, but see how easily we can fix it by simply
1032 +moving the spin_lock up two lines.
1033 diff -urN linux-2.4.20/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c
1034 --- linux-2.4.20/drivers/ieee1394/csr.c 2002-11-28 18:53:13.000000000 -0500
1035 +++ linux/drivers/ieee1394/csr.c 2002-12-11 02:34:47.000000000 -0500
1039 #include <linux/string.h>
1040 +#include <linux/sched.h>
1042 #include "ieee1394_types.h"
1044 diff -urN linux-2.4.20/drivers/sound/sound_core.c linux/drivers/sound/sound_core.c
1045 --- linux-2.4.20/drivers/sound/sound_core.c 2001-09-30 15:26:08.000000000 -0400
1046 +++ linux/drivers/sound/sound_core.c 2002-12-11 02:34:47.000000000 -0500
1048 #include <linux/config.h>
1049 #include <linux/module.h>
1050 #include <linux/init.h>
1051 +#include <linux/sched.h>
1052 #include <linux/slab.h>
1053 #include <linux/types.h>
1054 #include <linux/kernel.h>
1055 diff -urN linux-2.4.20/fs/adfs/map.c linux/fs/adfs/map.c
1056 --- linux-2.4.20/fs/adfs/map.c 2001-10-25 16:53:53.000000000 -0400
1057 +++ linux/fs/adfs/map.c 2002-12-11 02:34:47.000000000 -0500
1059 #include <linux/fs.h>
1060 #include <linux/adfs_fs.h>
1061 #include <linux/spinlock.h>
1062 +#include <linux/sched.h>
1066 diff -urN linux-2.4.20/fs/exec.c linux/fs/exec.c
1067 --- linux-2.4.20/fs/exec.c 2002-11-28 18:53:15.000000000 -0500
1068 +++ linux/fs/exec.c 2002-12-11 02:34:47.000000000 -0500
1070 active_mm = current->active_mm;
1072 current->active_mm = mm;
1073 - task_unlock(current);
1074 activate_mm(active_mm, mm);
1075 + task_unlock(current);
1078 if (active_mm != old_mm) BUG();
1079 diff -urN linux-2.4.20/fs/fat/cache.c linux/fs/fat/cache.c
1080 --- linux-2.4.20/fs/fat/cache.c 2001-10-12 16:48:42.000000000 -0400
1081 +++ linux/fs/fat/cache.c 2002-12-11 02:34:47.000000000 -0500
1083 #include <linux/string.h>
1084 #include <linux/stat.h>
1085 #include <linux/fat_cvf.h>
1086 +#include <linux/sched.h>
1089 # define PRINTK(x) printk x
1090 diff -urN linux-2.4.20/fs/nls/nls_base.c linux/fs/nls/nls_base.c
1091 --- linux-2.4.20/fs/nls/nls_base.c 2002-08-02 20:39:45.000000000 -0400
1092 +++ linux/fs/nls/nls_base.c 2002-12-11 02:34:47.000000000 -0500
1095 #include <linux/kmod.h>
1097 +#include <linux/sched.h>
1098 #include <linux/spinlock.h>
1100 static struct nls_table *tables;
1101 diff -urN linux-2.4.20/include/asm-arm/dma.h linux/include/asm-arm/dma.h
1102 --- linux-2.4.20/include/asm-arm/dma.h 2001-08-12 14:14:00.000000000 -0400
1103 +++ linux/include/asm-arm/dma.h 2002-12-11 02:34:47.000000000 -0500
1106 #include <linux/config.h>
1107 #include <linux/spinlock.h>
1108 +#include <linux/sched.h>
1109 #include <asm/system.h>
1110 #include <asm/memory.h>
1111 #include <asm/scatterlist.h>
1112 diff -urN linux-2.4.20/include/asm-arm/hardirq.h linux/include/asm-arm/hardirq.h
1113 --- linux-2.4.20/include/asm-arm/hardirq.h 2001-10-11 12:04:57.000000000 -0400
1114 +++ linux/include/asm-arm/hardirq.h 2002-12-11 02:34:47.000000000 -0500
1116 #define irq_exit(cpu,irq) (local_irq_count(cpu)--)
1118 #define synchronize_irq() do { } while (0)
1119 +#define release_irqlock(cpu) do { } while (0)
1122 #error SMP not supported
1123 diff -urN linux-2.4.20/include/asm-arm/pgalloc.h linux/include/asm-arm/pgalloc.h
1124 --- linux-2.4.20/include/asm-arm/pgalloc.h 2001-08-12 14:14:00.000000000 -0400
1125 +++ linux/include/asm-arm/pgalloc.h 2002-12-11 02:34:47.000000000 -0500
1130 + preempt_disable();
1131 if ((ret = pgd_quicklist) != NULL) {
1132 pgd_quicklist = (unsigned long *)__pgd_next(ret);
1134 clean_dcache_entry(ret + 1);
1135 pgtable_cache_size--;
1138 return (pgd_t *)ret;
1141 static inline void free_pgd_fast(pgd_t *pgd)
1143 + preempt_disable();
1144 __pgd_next(pgd) = (unsigned long) pgd_quicklist;
1145 pgd_quicklist = (unsigned long *) pgd;
1146 pgtable_cache_size++;
1150 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
1154 + preempt_disable();
1155 if((ret = pte_quicklist) != NULL) {
1156 pte_quicklist = (unsigned long *)__pte_next(ret);
1158 clean_dcache_entry(ret);
1159 pgtable_cache_size--;
1162 return (pte_t *)ret;
1165 static inline void free_pte_fast(pte_t *pte)
1167 + preempt_disable();
1168 __pte_next(pte) = (unsigned long) pte_quicklist;
1169 pte_quicklist = (unsigned long *) pte;
1170 pgtable_cache_size++;
1174 #else /* CONFIG_NO_PGT_CACHE */
1175 diff -urN linux-2.4.20/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h
1176 --- linux-2.4.20/include/asm-arm/smplock.h 2001-08-12 14:14:00.000000000 -0400
1177 +++ linux/include/asm-arm/smplock.h 2002-12-11 02:34:47.000000000 -0500
1180 * Default SMP lock implementation
1182 +#include <linux/config.h>
1183 #include <linux/interrupt.h>
1184 #include <linux/spinlock.h>
1186 extern spinlock_t kernel_flag;
1188 +#ifdef CONFIG_PREEMPT
1189 +#define kernel_locked() preempt_get_count()
1191 #define kernel_locked() spin_is_locked(&kernel_flag)
1195 * Release global kernel lock and global interrupt lock
1198 static inline void lock_kernel(void)
1200 +#ifdef CONFIG_PREEMPT
1201 + if (current->lock_depth == -1)
1202 + spin_lock(&kernel_flag);
1203 + ++current->lock_depth;
1205 if (!++current->lock_depth)
1206 spin_lock(&kernel_flag);
1210 static inline void unlock_kernel(void)
1211 diff -urN linux-2.4.20/include/asm-arm/softirq.h linux/include/asm-arm/softirq.h
1212 --- linux-2.4.20/include/asm-arm/softirq.h 2001-09-08 15:02:31.000000000 -0400
1213 +++ linux/include/asm-arm/softirq.h 2002-12-11 02:34:47.000000000 -0500
1215 #include <asm/hardirq.h>
1217 #define __cpu_bh_enable(cpu) \
1218 - do { barrier(); local_bh_count(cpu)--; } while (0)
1219 + do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
1220 #define cpu_bh_disable(cpu) \
1221 - do { local_bh_count(cpu)++; barrier(); } while (0)
1222 + do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
1224 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
1225 #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
1227 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1229 -#define local_bh_enable() \
1230 +#define _local_bh_enable() \
1232 unsigned int *ptr = &local_bh_count(smp_processor_id()); \
1233 if (!--*ptr && ptr[-2]) \
1234 __asm__("bl%? __do_softirq": : : "lr");/* out of line */\
1237 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
1239 #endif /* __ASM_SOFTIRQ_H */
1240 diff -urN linux-2.4.20/include/asm-arm/system.h linux/include/asm-arm/system.h
1241 --- linux-2.4.20/include/asm-arm/system.h 2000-11-27 20:07:59.000000000 -0500
1242 +++ linux/include/asm-arm/system.h 2002-12-11 02:34:47.000000000 -0500
1244 #define local_irq_disable() __cli()
1245 #define local_irq_enable() __sti()
1247 +#define irqs_disabled() \
1249 + unsigned long cpsr_val; \
1250 + asm ("mrs %0, cpsr" : "=r" (cpsr_val)); \
1255 #error SMP not supported
1257 diff -urN linux-2.4.20/include/asm-i386/hardirq.h linux/include/asm-i386/hardirq.h
1258 --- linux-2.4.20/include/asm-i386/hardirq.h 2001-11-22 14:46:19.000000000 -0500
1259 +++ linux/include/asm-i386/hardirq.h 2002-12-11 02:34:47.000000000 -0500
1263 * Are we in an interrupt context? Either doing bottom half
1264 - * or hardware interrupt processing?
1265 + * or hardware interrupt processing? Note the preempt check,
1266 + * this is both a bugfix and an optimization. If we are
1267 + * preemptible, we cannot be in an interrupt.
1269 -#define in_interrupt() ({ int __cpu = smp_processor_id(); \
1270 - (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
1271 +#define in_interrupt() (preempt_is_disabled() && \
1272 + ({unsigned long __cpu = smp_processor_id(); \
1273 + (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }))
1275 -#define in_irq() (local_irq_count(smp_processor_id()) != 0)
1276 +#define in_irq() (preempt_is_disabled() && \
1277 + (local_irq_count(smp_processor_id()) != 0))
1283 #define synchronize_irq() barrier()
1285 +#define release_irqlock(cpu) do { } while (0)
1289 #include <asm/atomic.h>
1290 diff -urN linux-2.4.20/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h
1291 --- linux-2.4.20/include/asm-i386/highmem.h 2002-08-02 20:39:45.000000000 -0400
1292 +++ linux/include/asm-i386/highmem.h 2002-12-11 02:34:47.000000000 -0500
1294 enum fixed_addresses idx;
1295 unsigned long vaddr;
1297 + preempt_disable();
1298 if (page < highmem_start_page)
1299 return page_address(page);
1301 @@ -109,8 +110,10 @@
1302 unsigned long vaddr = (unsigned long) kvaddr;
1303 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
1305 - if (vaddr < FIXADDR_START) // FIXME
1306 + if (vaddr < FIXADDR_START) { // FIXME
1311 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
1314 pte_clear(kmap_pte-idx);
1315 __flush_tlb_one(vaddr);
1321 #endif /* __KERNEL__ */
1322 diff -urN linux-2.4.20/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h
1323 --- linux-2.4.20/include/asm-i386/hw_irq.h 2001-11-22 14:46:18.000000000 -0500
1324 +++ linux/include/asm-i386/hw_irq.h 2002-12-11 02:34:47.000000000 -0500
1327 #define STR(x) __STR(x)
1329 +#define GET_CURRENT \
1330 + "movl %esp, %ebx\n\t" \
1331 + "andl $-8192, %ebx\n\t"
1333 +#ifdef CONFIG_PREEMPT
1334 +#define BUMP_LOCK_COUNT \
1336 + "incl 4(%ebx)\n\t"
1338 +#define BUMP_LOCK_COUNT
1344 @@ -108,15 +120,12 @@
1346 "movl $" STR(__KERNEL_DS) ",%edx\n\t" \
1347 "movl %edx,%ds\n\t" \
1348 - "movl %edx,%es\n\t"
1349 + "movl %edx,%es\n\t" \
1352 #define IRQ_NAME2(nr) nr##_interrupt(void)
1353 #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
1355 -#define GET_CURRENT \
1356 - "movl %esp, %ebx\n\t" \
1357 - "andl $-8192, %ebx\n\t"
1360 * SMP has a few special interrupts for IPI messages
1362 diff -urN linux-2.4.20/include/asm-i386/i387.h linux/include/asm-i386/i387.h
1363 --- linux-2.4.20/include/asm-i386/i387.h 2002-08-02 20:39:45.000000000 -0400
1364 +++ linux/include/asm-i386/i387.h 2002-12-11 02:34:47.000000000 -0500
1366 #define __ASM_I386_I387_H
1368 #include <linux/sched.h>
1369 +#include <linux/spinlock.h>
1370 #include <asm/processor.h>
1371 #include <asm/sigcontext.h>
1372 #include <asm/user.h>
1374 extern void restore_fpu( struct task_struct *tsk );
1376 extern void kernel_fpu_begin(void);
1377 -#define kernel_fpu_end() stts()
1378 +#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
1381 #define unlazy_fpu( tsk ) do { \
1382 diff -urN linux-2.4.20/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h
1383 --- linux-2.4.20/include/asm-i386/pgalloc.h 2002-08-02 20:39:45.000000000 -0400
1384 +++ linux/include/asm-i386/pgalloc.h 2002-12-11 02:34:47.000000000 -0500
1389 + preempt_disable();
1390 if ((ret = pgd_quicklist) != NULL) {
1391 pgd_quicklist = (unsigned long *)(*ret);
1393 pgtable_cache_size--;
1398 ret = (unsigned long *)get_pgd_slow();
1400 return (pgd_t *)ret;
1403 static inline void free_pgd_fast(pgd_t *pgd)
1405 + preempt_disable();
1406 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
1407 pgd_quicklist = (unsigned long *) pgd;
1408 pgtable_cache_size++;
1412 static inline void free_pgd_slow(pgd_t *pgd)
1413 @@ -119,19 +125,23 @@
1417 + preempt_disable();
1418 if ((ret = (unsigned long *)pte_quicklist) != NULL) {
1419 pte_quicklist = (unsigned long *)(*ret);
1421 pgtable_cache_size--;
1424 return (pte_t *)ret;
1427 static inline void pte_free_fast(pte_t *pte)
1429 + preempt_disable();
1430 *(unsigned long *)pte = (unsigned long) pte_quicklist;
1431 pte_quicklist = (unsigned long *) pte;
1432 pgtable_cache_size++;
1436 static __inline__ void pte_free_slow(pte_t *pte)
1437 diff -urN linux-2.4.20/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h
1438 --- linux-2.4.20/include/asm-i386/smplock.h 2002-08-02 20:39:45.000000000 -0400
1439 +++ linux/include/asm-i386/smplock.h 2002-12-11 02:34:47.000000000 -0500
1441 extern spinlock_cacheline_t kernel_flag_cacheline;
1442 #define kernel_flag kernel_flag_cacheline.lock
1445 #define kernel_locked() spin_is_locked(&kernel_flag)
1447 +#ifdef CONFIG_PREEMPT
1448 +#define kernel_locked() preempt_get_count()
1450 +#define kernel_locked() 1
1455 * Release global kernel lock and global interrupt lock
1458 static __inline__ void lock_kernel(void)
1460 +#ifdef CONFIG_PREEMPT
1461 + if (current->lock_depth == -1)
1462 + spin_lock(&kernel_flag);
1463 + ++current->lock_depth;
1466 if (!++current->lock_depth)
1467 spin_lock(&kernel_flag);
1469 :"=m" (__dummy_lock(&kernel_flag)),
1470 "=m" (current->lock_depth));
1475 static __inline__ void unlock_kernel(void)
1476 diff -urN linux-2.4.20/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h
1477 --- linux-2.4.20/include/asm-i386/softirq.h 2002-08-02 20:39:45.000000000 -0400
1478 +++ linux/include/asm-i386/softirq.h 2002-12-11 02:34:48.000000000 -0500
1480 #include <asm/hardirq.h>
1482 #define __cpu_bh_enable(cpu) \
1483 - do { barrier(); local_bh_count(cpu)--; } while (0)
1484 + do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
1485 #define cpu_bh_disable(cpu) \
1486 - do { local_bh_count(cpu)++; barrier(); } while (0)
1487 + do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
1489 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
1490 #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
1492 * If you change the offsets in irq_stat then you have to
1493 * update this code as well.
1495 -#define local_bh_enable() \
1496 +#define _local_bh_enable() \
1498 unsigned int *ptr = &local_bh_count(smp_processor_id()); \
1501 /* no registers clobbered */ ); \
1504 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
1506 #endif /* __ASM_SOFTIRQ_H */
1507 diff -urN linux-2.4.20/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
1508 --- linux-2.4.20/include/asm-i386/spinlock.h 2002-11-28 18:53:15.000000000 -0500
1509 +++ linux/include/asm-i386/spinlock.h 2002-12-11 02:34:48.000000000 -0500
1511 :"=m" (lock->lock) : : "memory"
1514 -static inline void spin_unlock(spinlock_t *lock)
1515 +static inline void _raw_spin_unlock(spinlock_t *lock)
1518 if (lock->magic != SPINLOCK_MAGIC)
1520 :"=q" (oldval), "=m" (lock->lock) \
1521 :"0" (oldval) : "memory"
1523 -static inline void spin_unlock(spinlock_t *lock)
1524 +static inline void _raw_spin_unlock(spinlock_t *lock)
1532 -static inline int spin_trylock(spinlock_t *lock)
1533 +static inline int _raw_spin_trylock(spinlock_t *lock)
1536 __asm__ __volatile__(
1541 -static inline void spin_lock(spinlock_t *lock)
1542 +static inline void _raw_spin_lock(spinlock_t *lock)
1548 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
1550 -static inline void read_lock(rwlock_t *rw)
1551 +static inline void _raw_read_lock(rwlock_t *rw)
1554 if (rw->magic != RWLOCK_MAGIC)
1556 __build_read_lock(rw, "__read_lock_failed");
1559 -static inline void write_lock(rwlock_t *rw)
1560 +static inline void _raw_write_lock(rwlock_t *rw)
1563 if (rw->magic != RWLOCK_MAGIC)
1564 @@ -197,10 +197,10 @@
1565 __build_write_lock(rw, "__write_lock_failed");
1568 -#define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
1569 -#define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
1570 +#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
1571 +#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
1573 -static inline int write_trylock(rwlock_t *lock)
1574 +static inline int _raw_write_trylock(rwlock_t *lock)
1576 atomic_t *count = (atomic_t *)lock;
1577 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
1578 diff -urN linux-2.4.20/include/asm-i386/system.h linux/include/asm-i386/system.h
1579 --- linux-2.4.20/include/asm-i386/system.h 2002-11-28 18:53:15.000000000 -0500
1580 +++ linux/include/asm-i386/system.h 2002-12-11 02:34:48.000000000 -0500
1581 @@ -322,6 +322,13 @@
1582 /* used in the idle loop; sti takes one instruction cycle to complete */
1583 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
1585 +#define irqs_disabled() \
1587 + unsigned long flags; \
1588 + __save_flags(flags); \
1589 + !(flags & (1<<9)); \
1592 /* For spinlocks etc */
1593 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
1594 #define local_irq_restore(x) __restore_flags(x)
1595 diff -urN linux-2.4.20/include/asm-mips/smplock.h linux/include/asm-mips/smplock.h
1596 --- linux-2.4.20/include/asm-mips/smplock.h 2002-08-02 20:39:45.000000000 -0400
1597 +++ linux/include/asm-mips/smplock.h 2002-12-11 02:34:48.000000000 -0500
1600 * Default SMP lock implementation
1602 +#include <linux/config.h>
1603 #include <linux/interrupt.h>
1604 #include <linux/spinlock.h>
1606 extern spinlock_t kernel_flag;
1609 #define kernel_locked() spin_is_locked(&kernel_flag)
1611 +#ifdef CONFIG_PREEMPT
1612 +#define kernel_locked() preempt_get_count()
1614 +#define kernel_locked() 1
1619 * Release global kernel lock and global interrupt lock
1622 extern __inline__ void lock_kernel(void)
1624 +#ifdef CONFIG_PREEMPT
1625 + if (current->lock_depth == -1)
1626 + spin_lock(&kernel_flag);
1627 + ++current->lock_depth;
1629 if (!++current->lock_depth)
1630 spin_lock(&kernel_flag);
1634 extern __inline__ void unlock_kernel(void)
1635 diff -urN linux-2.4.20/include/asm-mips/softirq.h linux/include/asm-mips/softirq.h
1636 --- linux-2.4.20/include/asm-mips/softirq.h 2002-11-28 18:53:15.000000000 -0500
1637 +++ linux/include/asm-mips/softirq.h 2002-12-11 02:34:48.000000000 -0500
1640 static inline void cpu_bh_disable(int cpu)
1642 + preempt_disable();
1643 local_bh_count(cpu)++;
1649 local_bh_count(cpu)--;
1655 cpu = smp_processor_id(); \
1656 if (!--local_bh_count(cpu) && softirq_pending(cpu)) \
1658 + preempt_enable(); \
1661 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1662 diff -urN linux-2.4.20/include/asm-mips/system.h linux/include/asm-mips/system.h
1663 --- linux-2.4.20/include/asm-mips/system.h 2002-11-28 18:53:15.000000000 -0500
1664 +++ linux/include/asm-mips/system.h 2002-12-11 02:34:48.000000000 -0500
1665 @@ -322,4 +322,18 @@
1666 #define die_if_kernel(msg, regs) \
1667 __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
1669 +extern __inline__ int intr_on(void)
1671 + unsigned long flags;
1672 + save_flags(flags);
1676 +extern __inline__ int intr_off(void)
1678 + return ! intr_on();
1681 +#define irqs_disabled() intr_off()
1683 #endif /* _ASM_SYSTEM_H */
1684 diff -urN linux-2.4.20/include/asm-ppc/dma.h linux/include/asm-ppc/dma.h
1685 --- linux-2.4.20/include/asm-ppc/dma.h 2001-05-21 18:02:06.000000000 -0400
1686 +++ linux/include/asm-ppc/dma.h 2002-12-11 02:34:48.000000000 -0500
1688 #include <linux/config.h>
1690 #include <linux/spinlock.h>
1691 +#include <linux/sched.h>
1692 #include <asm/system.h>
1695 diff -urN linux-2.4.20/include/asm-ppc/hardirq.h linux/include/asm-ppc/hardirq.h
1696 --- linux-2.4.20/include/asm-ppc/hardirq.h 2002-11-28 18:53:15.000000000 -0500
1697 +++ linux/include/asm-ppc/hardirq.h 2002-12-11 02:34:48.000000000 -0500
1699 #define hardirq_exit(cpu) (local_irq_count(cpu)--)
1701 #define synchronize_irq() do { } while (0)
1702 +#define release_irqlock(cpu) do { } while (0)
1704 #else /* CONFIG_SMP */
1706 diff -urN linux-2.4.20/include/asm-ppc/highmem.h linux/include/asm-ppc/highmem.h
1707 --- linux-2.4.20/include/asm-ppc/highmem.h 2001-07-02 17:34:57.000000000 -0400
1708 +++ linux/include/asm-ppc/highmem.h 2002-12-11 02:34:48.000000000 -0500
1711 unsigned long vaddr;
1713 + preempt_disable();
1714 if (page < highmem_start_page)
1715 return page_address(page);
1717 @@ -105,8 +106,10 @@
1718 unsigned long vaddr = (unsigned long) kvaddr;
1719 unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
1721 - if (vaddr < KMAP_FIX_BEGIN) // FIXME
1722 + if (vaddr < KMAP_FIX_BEGIN) { // FIXME
1727 if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
1730 pte_clear(kmap_pte+idx);
1731 flush_tlb_page(0, vaddr);
1736 #endif /* __KERNEL__ */
1737 diff -urN linux-2.4.20/include/asm-ppc/hw_irq.h linux/include/asm-ppc/hw_irq.h
1738 --- linux-2.4.20/include/asm-ppc/hw_irq.h 2002-11-28 18:53:15.000000000 -0500
1739 +++ linux/include/asm-ppc/hw_irq.h 2002-12-11 02:34:48.000000000 -0500
1741 #define __save_flags(flags) __save_flags_ptr((unsigned long *)&flags)
1742 #define __save_and_cli(flags) ({__save_flags(flags);__cli();})
1744 +#define mfmsr() ({unsigned int rval; \
1745 + asm volatile("mfmsr %0" : "=r" (rval)); rval;})
1746 +#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
1748 +#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
1750 extern void do_lost_interrupts(unsigned long);
1752 #define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
1753 diff -urN linux-2.4.20/include/asm-ppc/mmu_context.h linux/include/asm-ppc/mmu_context.h
1754 --- linux-2.4.20/include/asm-ppc/mmu_context.h 2001-10-02 12:12:44.000000000 -0400
1755 +++ linux/include/asm-ppc/mmu_context.h 2002-12-11 02:34:48.000000000 -0500
1756 @@ -158,6 +158,10 @@
1757 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
1758 struct task_struct *tsk, int cpu)
1760 +#ifdef CONFIG_PREEMPT
1761 + if (preempt_get_count() == 0)
1764 tsk->thread.pgdir = next->pgd;
1765 get_mmu_context(next);
1766 set_context(next->context, next->pgd);
1767 diff -urN linux-2.4.20/include/asm-ppc/pgalloc.h linux/include/asm-ppc/pgalloc.h
1768 --- linux-2.4.20/include/asm-ppc/pgalloc.h 2001-05-21 18:02:06.000000000 -0400
1769 +++ linux/include/asm-ppc/pgalloc.h 2002-12-11 02:34:48.000000000 -0500
1774 + preempt_disable();
1775 if ((ret = pgd_quicklist) != NULL) {
1776 pgd_quicklist = (unsigned long *)(*ret);
1778 pgtable_cache_size--;
1782 ret = (unsigned long *)get_pgd_slow();
1783 return (pgd_t *)ret;
1786 extern __inline__ void free_pgd_fast(pgd_t *pgd)
1788 + preempt_disable();
1789 *(unsigned long **)pgd = pgd_quicklist;
1790 pgd_quicklist = (unsigned long *) pgd;
1791 pgtable_cache_size++;
1795 extern __inline__ void free_pgd_slow(pgd_t *pgd)
1796 @@ -120,19 +125,23 @@
1800 + preempt_disable();
1801 if ((ret = pte_quicklist) != NULL) {
1802 pte_quicklist = (unsigned long *)(*ret);
1804 pgtable_cache_size--;
1807 return (pte_t *)ret;
1810 extern __inline__ void pte_free_fast(pte_t *pte)
1812 + preempt_disable();
1813 *(unsigned long **)pte = pte_quicklist;
1814 pte_quicklist = (unsigned long *) pte;
1815 pgtable_cache_size++;
1819 extern __inline__ void pte_free_slow(pte_t *pte)
1820 diff -urN linux-2.4.20/include/asm-ppc/smplock.h linux/include/asm-ppc/smplock.h
1821 --- linux-2.4.20/include/asm-ppc/smplock.h 2001-11-02 20:43:54.000000000 -0500
1822 +++ linux/include/asm-ppc/smplock.h 2002-12-11 02:34:48.000000000 -0500
1825 extern spinlock_t kernel_flag;
1828 #define kernel_locked() spin_is_locked(&kernel_flag)
1830 +#ifdef CONFIG_PREEMPT
1831 +#define kernel_locked() preempt_get_count()
1833 +#define kernel_locked() 1
1838 * Release global kernel lock and global interrupt lock
1841 static __inline__ void lock_kernel(void)
1843 +#ifdef CONFIG_PREEMPT
1844 + if (current->lock_depth == -1)
1845 + spin_lock(&kernel_flag);
1846 + ++current->lock_depth;
1848 if (!++current->lock_depth)
1849 spin_lock(&kernel_flag);
1853 static __inline__ void unlock_kernel(void)
1854 diff -urN linux-2.4.20/include/asm-ppc/softirq.h linux/include/asm-ppc/softirq.h
1855 --- linux-2.4.20/include/asm-ppc/softirq.h 2001-09-08 15:02:31.000000000 -0400
1856 +++ linux/include/asm-ppc/softirq.h 2002-12-11 02:34:48.000000000 -0500
1859 #define local_bh_disable() \
1861 + preempt_disable(); \
1862 local_bh_count(smp_processor_id())++; \
1868 local_bh_count(smp_processor_id())--; \
1869 + preempt_enable(); \
1872 -#define local_bh_enable() \
1873 +#define _local_bh_enable() \
1875 if (!--local_bh_count(smp_processor_id()) \
1876 && softirq_pending(smp_processor_id())) { \
1881 +#define local_bh_enable() \
1883 + _local_bh_enable(); \
1884 + preempt_enable(); \
1887 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1889 #endif /* __ASM_SOFTIRQ_H */
1890 diff -urN linux-2.4.20/include/asm-sh/hardirq.h linux/include/asm-sh/hardirq.h
1891 --- linux-2.4.20/include/asm-sh/hardirq.h 2001-09-08 15:29:09.000000000 -0400
1892 +++ linux/include/asm-sh/hardirq.h 2002-12-11 02:34:48.000000000 -0500
1895 #define synchronize_irq() barrier()
1897 +#define release_irqlock(cpu) do { } while (0)
1901 #error Super-H SMP is not available
1902 diff -urN linux-2.4.20/include/asm-sh/smplock.h linux/include/asm-sh/smplock.h
1903 --- linux-2.4.20/include/asm-sh/smplock.h 2001-09-08 15:29:09.000000000 -0400
1904 +++ linux/include/asm-sh/smplock.h 2002-12-11 02:34:48.000000000 -0500
1907 #include <linux/config.h>
1911 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
1913 + * Should never happen, since linux/smp_lock.h catches this case;
1914 + * but in case this file is included directly with neither SMP nor
1915 + * PREEMPT configuration, provide same dummys as linux/smp_lock.h
1917 #define lock_kernel() do { } while(0)
1918 #define unlock_kernel() do { } while(0)
1919 -#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
1920 -#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
1921 +#define release_kernel_lock(task, cpu) do { } while(0)
1922 +#define reacquire_kernel_lock(task) do { } while(0)
1923 +#define kernel_locked() 1
1925 +#else /* CONFIG_SMP || CONFIG_PREEMPT */
1928 +#error "We do not support SMP on SH yet"
1931 + * Default SMP lock implementation (i.e. the i386 version)
1934 +#include <linux/interrupt.h>
1935 +#include <linux/spinlock.h>
1937 +extern spinlock_t kernel_flag;
1938 +#define lock_bkl() spin_lock(&kernel_flag)
1939 +#define unlock_bkl() spin_unlock(&kernel_flag)
1942 +#define kernel_locked() spin_is_locked(&kernel_flag)
1943 +#elif CONFIG_PREEMPT
1944 +#define kernel_locked() preempt_get_count()
1945 +#else /* neither */
1946 +#define kernel_locked() 1
1950 + * Release global kernel lock and global interrupt lock
1952 +#define release_kernel_lock(task, cpu) \
1954 + if (task->lock_depth >= 0) \
1955 + spin_unlock(&kernel_flag); \
1956 + release_irqlock(cpu); \
1961 + * Re-acquire the kernel lock
1963 +#define reacquire_kernel_lock(task) \
1965 + if (task->lock_depth >= 0) \
1966 + spin_lock(&kernel_flag); \
1970 + * Getting the big kernel lock.
1972 + * This cannot happen asynchronously,
1973 + * so we only need to worry about other
1976 +static __inline__ void lock_kernel(void)
1978 +#ifdef CONFIG_PREEMPT
1979 + if (current->lock_depth == -1)
1980 + spin_lock(&kernel_flag);
1981 + ++current->lock_depth;
1983 -#error "We do not support SMP on SH"
1984 -#endif /* CONFIG_SMP */
1985 + if (!++current->lock_depth)
1986 + spin_lock(&kernel_flag);
1990 +static __inline__ void unlock_kernel(void)
1992 + if (current->lock_depth < 0)
1994 + if (--current->lock_depth < 0)
1995 + spin_unlock(&kernel_flag);
1997 +#endif /* CONFIG_SMP || CONFIG_PREEMPT */
1999 #endif /* __ASM_SH_SMPLOCK_H */
2000 diff -urN linux-2.4.20/include/asm-sh/softirq.h linux/include/asm-sh/softirq.h
2001 --- linux-2.4.20/include/asm-sh/softirq.h 2001-09-08 15:29:09.000000000 -0400
2002 +++ linux/include/asm-sh/softirq.h 2002-12-11 02:34:48.000000000 -0500
2005 #define local_bh_disable() \
2007 + preempt_disable(); \
2008 local_bh_count(smp_processor_id())++; \
2014 local_bh_count(smp_processor_id())--; \
2015 + preempt_enable(); \
2018 #define local_bh_enable() \
2020 && softirq_pending(smp_processor_id())) { \
2023 + preempt_enable(); \
2026 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
2027 diff -urN linux-2.4.20/include/asm-sh/system.h linux/include/asm-sh/system.h
2028 --- linux-2.4.20/include/asm-sh/system.h 2001-09-08 15:29:09.000000000 -0400
2029 +++ linux/include/asm-sh/system.h 2002-12-11 02:34:48.000000000 -0500
2030 @@ -285,4 +285,17 @@
2031 void disable_hlt(void);
2032 void enable_hlt(void);
2035 + * irqs_disabled - are interrupts disabled?
2037 +static inline int irqs_disabled(void)
2039 + unsigned long flags;
2041 + __save_flags(flags);
2042 + if (flags & 0x000000f0)
2048 diff -urN linux-2.4.20/include/linux/brlock.h linux/include/linux/brlock.h
2049 --- linux-2.4.20/include/linux/brlock.h 2002-11-28 18:53:15.000000000 -0500
2050 +++ linux/include/linux/brlock.h 2002-12-11 02:34:48.000000000 -0500
2051 @@ -171,11 +171,11 @@
2055 -# define br_read_lock(idx) ((void)(idx))
2056 -# define br_read_unlock(idx) ((void)(idx))
2057 -# define br_write_lock(idx) ((void)(idx))
2058 -# define br_write_unlock(idx) ((void)(idx))
2060 +# define br_read_lock(idx) ({ (void)(idx); preempt_disable(); })
2061 +# define br_read_unlock(idx) ({ (void)(idx); preempt_enable(); })
2062 +# define br_write_lock(idx) ({ (void)(idx); preempt_disable(); })
2063 +# define br_write_unlock(idx) ({ (void)(idx); preempt_enable(); })
2064 +#endif /* CONFIG_SMP */
2067 * Now enumerate all of the possible sw/hw IRQ protected
2068 diff -urN linux-2.4.20/include/linux/dcache.h linux/include/linux/dcache.h
2069 --- linux-2.4.20/include/linux/dcache.h 2002-11-28 18:53:15.000000000 -0500
2070 +++ linux/include/linux/dcache.h 2002-12-11 02:34:48.000000000 -0500
2071 @@ -127,31 +127,6 @@
2073 extern spinlock_t dcache_lock;
2076 - * d_drop - drop a dentry
2077 - * @dentry: dentry to drop
2079 - * d_drop() unhashes the entry from the parent
2080 - * dentry hashes, so that it won't be found through
2081 - * a VFS lookup any more. Note that this is different
2082 - * from deleting the dentry - d_delete will try to
2083 - * mark the dentry negative if possible, giving a
2084 - * successful _negative_ lookup, while d_drop will
2085 - * just make the cache lookup fail.
2087 - * d_drop() is used mainly for stuff that wants
2088 - * to invalidate a dentry for some reason (NFS
2089 - * timeouts or autofs deletes).
2092 -static __inline__ void d_drop(struct dentry * dentry)
2094 - spin_lock(&dcache_lock);
2095 - list_del(&dentry->d_hash);
2096 - INIT_LIST_HEAD(&dentry->d_hash);
2097 - spin_unlock(&dcache_lock);
2100 static __inline__ int dname_external(struct dentry *d)
2102 return d->d_name.name != d->d_iname;
2103 @@ -276,3 +251,34 @@
2104 #endif /* __KERNEL__ */
2106 #endif /* __LINUX_DCACHE_H */
2108 +#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2109 +#define __LINUX_DCACHE_H_INLINES
2113 + * d_drop - drop a dentry
2114 + * @dentry: dentry to drop
2116 + * d_drop() unhashes the entry from the parent
2117 + * dentry hashes, so that it won't be found through
2118 + * a VFS lookup any more. Note that this is different
2119 + * from deleting the dentry - d_delete will try to
2120 + * mark the dentry negative if possible, giving a
2121 + * successful _negative_ lookup, while d_drop will
2122 + * just make the cache lookup fail.
2124 + * d_drop() is used mainly for stuff that wants
2125 + * to invalidate a dentry for some reason (NFS
2126 + * timeouts or autofs deletes).
2129 +static __inline__ void d_drop(struct dentry * dentry)
2131 + spin_lock(&dcache_lock);
2132 + list_del(&dentry->d_hash);
2133 + INIT_LIST_HEAD(&dentry->d_hash);
2134 + spin_unlock(&dcache_lock);
2138 diff -urN linux-2.4.20/include/linux/fs_struct.h linux/include/linux/fs_struct.h
2139 --- linux-2.4.20/include/linux/fs_struct.h 2001-07-13 18:10:44.000000000 -0400
2140 +++ linux/include/linux/fs_struct.h 2002-12-11 02:34:48.000000000 -0500
2142 extern void exit_fs(struct task_struct *);
2143 extern void set_fs_altroot(void);
2145 +struct fs_struct *copy_fs_struct(struct fs_struct *old);
2146 +void put_fs_struct(struct fs_struct *fs);
2151 +#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2152 +#define _LINUX_FS_STRUCT_H_INLINES
2155 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
2156 * It can block. Requires the big lock held.
2162 -struct fs_struct *copy_fs_struct(struct fs_struct *old);
2163 -void put_fs_struct(struct fs_struct *fs);
2167 diff -urN linux-2.4.20/include/linux/sched.h linux/include/linux/sched.h
2168 --- linux-2.4.20/include/linux/sched.h 2002-11-28 18:53:15.000000000 -0500
2169 +++ linux/include/linux/sched.h 2002-12-11 02:34:48.000000000 -0500
2171 #define TASK_UNINTERRUPTIBLE 2
2172 #define TASK_ZOMBIE 4
2173 #define TASK_STOPPED 8
2174 +#define PREEMPT_ACTIVE 0x4000000
2176 #define __set_task_state(tsk, state_value) \
2177 do { (tsk)->state = (state_value); } while (0)
2179 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
2180 extern signed long FASTCALL(schedule_timeout(signed long timeout));
2181 asmlinkage void schedule(void);
2182 +#ifdef CONFIG_PREEMPT
2183 +asmlinkage void preempt_schedule(void);
2186 extern int schedule_task(struct tq_struct *task);
2187 extern void flush_scheduled_tasks(void);
2189 * offsets of these are hardcoded elsewhere - touch with care
2191 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
2192 - unsigned long flags; /* per process flags, defined below */
2193 + int preempt_count; /* 0 => preemptable, <0 => BUG */
2195 mm_segment_t addr_limit; /* thread address space:
2196 0-0xBFFFFFFF for user-thead
2198 struct mm_struct *active_mm;
2199 struct list_head local_pages;
2200 unsigned int allocation_order, nr_local_pages;
2201 + unsigned long flags;
2204 struct linux_binfmt *binfmt;
2205 @@ -955,5 +960,10 @@
2209 +#define _TASK_STRUCT_DEFINED
2210 +#include <linux/dcache.h>
2211 +#include <linux/tqueue.h>
2212 +#include <linux/fs_struct.h>
2214 #endif /* __KERNEL__ */
2216 diff -urN linux-2.4.20/include/linux/smp_lock.h linux/include/linux/smp_lock.h
2217 --- linux-2.4.20/include/linux/smp_lock.h 2001-11-22 14:46:27.000000000 -0500
2218 +++ linux/include/linux/smp_lock.h 2002-12-11 02:34:48.000000000 -0500
2221 #include <linux/config.h>
2224 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
2226 #define lock_kernel() do { } while(0)
2227 #define unlock_kernel() do { } while(0)
2228 diff -urN linux-2.4.20/include/linux/spinlock.h linux/include/linux/spinlock.h
2229 --- linux-2.4.20/include/linux/spinlock.h 2002-11-28 18:53:15.000000000 -0500
2230 +++ linux/include/linux/spinlock.h 2002-12-11 02:34:48.000000000 -0500
2232 #define __LINUX_SPINLOCK_H
2234 #include <linux/config.h>
2235 +#include <linux/compiler.h>
2238 * These are the generic versions of the spinlocks and read-write
2241 #if (DEBUG_SPINLOCKS < 1)
2243 +#ifndef CONFIG_PREEMPT
2244 #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
2245 #define ATOMIC_DEC_AND_LOCK
2249 * Your basic spinlocks, allowing only a single CPU anywhere
2253 #define spin_lock_init(lock) do { } while(0)
2254 -#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
2255 +#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */
2256 #define spin_is_locked(lock) (0)
2257 -#define spin_trylock(lock) ({1; })
2258 +#define _raw_spin_trylock(lock) ({1; })
2259 #define spin_unlock_wait(lock) do { } while(0)
2260 -#define spin_unlock(lock) do { } while(0)
2261 +#define _raw_spin_unlock(lock) do { } while(0)
2263 #elif (DEBUG_SPINLOCKS < 2)
2265 @@ -144,13 +147,78 @@
2268 #define rwlock_init(lock) do { } while(0)
2269 -#define read_lock(lock) (void)(lock) /* Not "unused variable". */
2270 -#define read_unlock(lock) do { } while(0)
2271 -#define write_lock(lock) (void)(lock) /* Not "unused variable". */
2272 -#define write_unlock(lock) do { } while(0)
2273 +#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */
2274 +#define _raw_read_unlock(lock) do { } while(0)
2275 +#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */
2276 +#define _raw_write_unlock(lock) do { } while(0)
2280 +#ifdef CONFIG_PREEMPT
2282 +#define preempt_get_count() (current->preempt_count)
2283 +#define preempt_is_disabled() (preempt_get_count() != 0)
2285 +#define preempt_disable() \
2287 + ++current->preempt_count; \
2291 +#define preempt_enable_no_resched() \
2293 + --current->preempt_count; \
2297 +#define preempt_enable() \
2299 + --current->preempt_count; \
2301 + if (unlikely(current->preempt_count < current->need_resched)) \
2302 + preempt_schedule(); \
2305 +#define spin_lock(lock) \
2307 + preempt_disable(); \
2308 + _raw_spin_lock(lock); \
2311 +#define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
2312 + 1 : ({preempt_enable(); 0;});})
2313 +#define spin_unlock(lock) \
2315 + _raw_spin_unlock(lock); \
2316 + preempt_enable(); \
2319 +#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
2320 +#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
2321 +#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
2322 +#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();})
2323 +#define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \
2324 + 1 : ({preempt_enable(); 0;});})
2328 +#define preempt_get_count() (0)
2329 +#define preempt_is_disabled() (1)
2330 +#define preempt_disable() do { } while (0)
2331 +#define preempt_enable_no_resched() do {} while(0)
2332 +#define preempt_enable() do { } while (0)
2334 +#define spin_lock(lock) _raw_spin_lock(lock)
2335 +#define spin_trylock(lock) _raw_spin_trylock(lock)
2336 +#define spin_unlock(lock) _raw_spin_unlock(lock)
2338 +#define read_lock(lock) _raw_read_lock(lock)
2339 +#define read_unlock(lock) _raw_read_unlock(lock)
2340 +#define write_lock(lock) _raw_write_lock(lock)
2341 +#define write_unlock(lock) _raw_write_unlock(lock)
2342 +#define write_trylock(lock) _raw_write_trylock(lock)
2345 /* "lock on reference count zero" */
2346 #ifndef ATOMIC_DEC_AND_LOCK
2347 #include <asm/atomic.h>
2348 diff -urN linux-2.4.20/include/linux/tqueue.h linux/include/linux/tqueue.h
2349 --- linux-2.4.20/include/linux/tqueue.h 2001-11-22 14:46:19.000000000 -0500
2350 +++ linux/include/linux/tqueue.h 2002-12-11 02:34:48.000000000 -0500
2352 extern spinlock_t tqueue_lock;
2355 + * Call all "bottom halfs" on a given list.
2358 +extern void __run_task_queue(task_queue *list);
2360 +static inline void run_task_queue(task_queue *list)
2362 + if (TQ_ACTIVE(*list))
2363 + __run_task_queue(list);
2366 +#endif /* _LINUX_TQUEUE_H */
2368 +#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2369 +#define _LINUX_TQUEUE_H_INLINES
2371 * Queue a task on a tq. Return non-zero if it was successfully
2374 @@ -109,17 +125,4 @@
2380 - * Call all "bottom halfs" on a given list.
2383 -extern void __run_task_queue(task_queue *list);
2385 -static inline void run_task_queue(task_queue *list)
2387 - if (TQ_ACTIVE(*list))
2388 - __run_task_queue(list);
2391 -#endif /* _LINUX_TQUEUE_H */
2393 diff -urN linux-2.4.20/kernel/exit.c linux/kernel/exit.c
2394 --- linux-2.4.20/kernel/exit.c 2002-11-28 18:53:15.000000000 -0500
2395 +++ linux/kernel/exit.c 2002-12-11 02:34:48.000000000 -0500
2397 /* more a memory barrier than a real lock */
2401 enter_lazy_tlb(mm, current, smp_processor_id());
2406 @@ -435,6 +435,11 @@
2407 tsk->flags |= PF_EXITING;
2408 del_timer_sync(&tsk->real_timer);
2410 + if (unlikely(preempt_get_count()))
2411 + printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
2412 + current->comm, current->pid,
2413 + preempt_get_count());
2416 #ifdef CONFIG_BSD_PROCESS_ACCT
2418 diff -urN linux-2.4.20/kernel/fork.c linux/kernel/fork.c
2419 --- linux-2.4.20/kernel/fork.c 2002-11-28 18:53:15.000000000 -0500
2420 +++ linux/kernel/fork.c 2002-12-11 02:34:48.000000000 -0500
2421 @@ -629,6 +629,13 @@
2422 if (p->binfmt && p->binfmt->module)
2423 __MOD_INC_USE_COUNT(p->binfmt->module);
2425 +#ifdef CONFIG_PREEMPT
2427 + * Continue with preemption disabled as part of the context
2428 + * switch, so start with preempt_count set to 1.
2430 + p->preempt_count = 1;
2434 p->state = TASK_UNINTERRUPTIBLE;
2435 diff -urN linux-2.4.20/kernel/ksyms.c linux/kernel/ksyms.c
2436 --- linux-2.4.20/kernel/ksyms.c 2002-11-28 18:53:15.000000000 -0500
2437 +++ linux/kernel/ksyms.c 2002-12-11 02:34:48.000000000 -0500
2439 EXPORT_SYMBOL(interruptible_sleep_on);
2440 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
2441 EXPORT_SYMBOL(schedule);
2442 +#ifdef CONFIG_PREEMPT
2443 +EXPORT_SYMBOL(preempt_schedule);
2445 EXPORT_SYMBOL(schedule_timeout);
2446 EXPORT_SYMBOL(yield);
2447 EXPORT_SYMBOL(__cond_resched);
2448 diff -urN linux-2.4.20/kernel/sched.c linux/kernel/sched.c
2449 --- linux-2.4.20/kernel/sched.c 2002-11-28 18:53:15.000000000 -0500
2450 +++ linux/kernel/sched.c 2002-12-11 02:34:48.000000000 -0500
2453 task_release_cpu(prev);
2455 - if (prev->state == TASK_RUNNING)
2456 + if (task_on_runqueue(prev))
2463 spin_lock_irqsave(&runqueue_lock, flags);
2464 - if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
2465 + if (task_on_runqueue(prev) && !task_has_cpu(prev))
2466 reschedule_idle(prev);
2467 spin_unlock_irqrestore(&runqueue_lock, flags);
2470 asmlinkage void schedule_tail(struct task_struct *prev)
2472 __schedule_tail(prev);
2477 @@ -551,9 +552,10 @@
2478 struct list_head *tmp;
2482 spin_lock_prefetch(&runqueue_lock);
2484 + preempt_disable();
2486 BUG_ON(!current->active_mm);
2489 @@ -581,6 +583,14 @@
2490 move_last_runqueue(prev);
2493 +#ifdef CONFIG_PREEMPT
2495 + * entering from preempt_schedule, off a kernel preemption,
2496 + * go straight to picking the next task.
2498 + if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
2499 + goto treat_like_run;
2501 switch (prev->state) {
2502 case TASK_INTERRUPTIBLE:
2503 if (signal_pending(prev)) {
2505 del_from_runqueue(prev);
2508 +#ifdef CONFIG_PREEMPT
2511 prev->need_resched = 0;
2514 @@ -699,9 +712,31 @@
2515 reacquire_kernel_lock(current);
2516 if (current->need_resched)
2517 goto need_resched_back;
2518 + preempt_enable_no_resched();
2522 +#ifdef CONFIG_PREEMPT
2524 + * this is is the entry point to schedule() from in-kernel preemption
2526 +asmlinkage void preempt_schedule(void)
2528 + if (unlikely(irqs_disabled()))
2532 + current->preempt_count += PREEMPT_ACTIVE;
2534 + current->preempt_count -= PREEMPT_ACTIVE;
2536 + /* we could miss a preemption opportunity between schedule and now */
2538 + if (unlikely(current->need_resched))
2539 + goto need_resched;
2541 +#endif /* CONFIG_PREEMPT */
2544 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
2545 * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
2546 @@ -1327,6 +1362,13 @@
2547 sched_data->curr = current;
2548 sched_data->last_schedule = get_cycles();
2549 clear_bit(current->processor, &wait_init_idle);
2550 +#ifdef CONFIG_PREEMPT
2552 + * fix up the preempt_count for non-CPU0 idle threads
2554 + if (current->processor)
2555 + current->preempt_count = 0;
2559 extern void init_timervecs (void);
2560 diff -urN linux-2.4.20/lib/dec_and_lock.c linux/lib/dec_and_lock.c
2561 --- linux-2.4.20/lib/dec_and_lock.c 2001-10-03 12:11:26.000000000 -0400
2562 +++ linux/lib/dec_and_lock.c 2002-12-11 02:34:48.000000000 -0500
2564 #include <linux/module.h>
2565 #include <linux/spinlock.h>
2566 +#include <linux/sched.h>
2567 #include <asm/atomic.h>
2570 diff -urN linux-2.4.20/MAINTAINERS linux/MAINTAINERS
2571 --- linux-2.4.20/MAINTAINERS 2002-11-28 18:53:08.000000000 -0500
2572 +++ linux/MAINTAINERS 2002-12-11 02:34:48.000000000 -0500
2573 @@ -1310,6 +1310,14 @@
2574 M: mostrows@styx.uwaterloo.ca
2580 +L: linux-kernel@vger.kernel.org
2581 +L: kpreempt-tech@lists.sourceforge.net
2582 +W: http://tech9.net/rml/linux
2585 PROMISE DC4030 CACHING DISK CONTROLLER DRIVER
2587 M: promise@pnd-pc.demon.co.uk
2588 diff -urN linux-2.4.20/mm/slab.c linux/mm/slab.c
2589 --- linux-2.4.20/mm/slab.c 2002-11-28 18:53:15.000000000 -0500
2590 +++ linux/mm/slab.c 2002-12-11 02:34:48.000000000 -0500
2592 * constructors and destructors are called without any locking.
2593 * Several members in kmem_cache_t and slab_t never change, they
2594 * are accessed without any locking.
2595 - * The per-cpu arrays are never accessed from the wrong cpu, no locking.
2596 + * The per-cpu arrays are never accessed from the wrong cpu, no locking,
2597 + * and local interrupts are disabled so slab code is preempt-safe.
2598 * The non-constant members are protected with a per-cache irq spinlock.
2600 * Further notes from the original documentation:
2601 diff -urN linux-2.4.20/net/core/dev.c linux/net/core/dev.c
2602 --- linux-2.4.20/net/core/dev.c 2002-11-28 18:53:15.000000000 -0500
2603 +++ linux/net/core/dev.c 2002-12-11 02:34:48.000000000 -0500
2604 @@ -1049,9 +1049,15 @@
2605 int cpu = smp_processor_id();
2607 if (dev->xmit_lock_owner != cpu) {
2609 + * The spin_lock effectivly does a preempt lock, but
2610 + * we are about to drop that...
2612 + preempt_disable();
2613 spin_unlock(&dev->queue_lock);
2614 spin_lock(&dev->xmit_lock);
2615 dev->xmit_lock_owner = cpu;
2618 if (!netif_queue_stopped(dev)) {
2620 diff -urN linux-2.4.20/net/core/skbuff.c linux/net/core/skbuff.c
2621 --- linux-2.4.20/net/core/skbuff.c 2002-08-02 20:39:46.000000000 -0400
2622 +++ linux/net/core/skbuff.c 2002-12-11 02:34:48.000000000 -0500
2623 @@ -111,33 +111,37 @@
2625 static __inline__ struct sk_buff *skb_head_from_pool(void)
2627 - struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
2628 + struct sk_buff_head *list;
2629 + struct sk_buff *skb = NULL;
2630 + unsigned long flags;
2632 - if (skb_queue_len(list)) {
2633 - struct sk_buff *skb;
2634 - unsigned long flags;
2635 + local_irq_save(flags);
2637 - local_irq_save(flags);
2638 + list = &skb_head_pool[smp_processor_id()].list;
2640 + if (skb_queue_len(list))
2641 skb = __skb_dequeue(list);
2642 - local_irq_restore(flags);
2647 + local_irq_restore(flags);
2651 static __inline__ void skb_head_to_pool(struct sk_buff *skb)
2653 - struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
2654 + struct sk_buff_head *list;
2655 + unsigned long flags;
2657 - if (skb_queue_len(list) < sysctl_hot_list_len) {
2658 - unsigned long flags;
2659 + local_irq_save(flags);
2660 + list = &skb_head_pool[smp_processor_id()].list;
2662 - local_irq_save(flags);
2663 + if (skb_queue_len(list) < sysctl_hot_list_len) {
2664 __skb_queue_head(list, skb);
2665 local_irq_restore(flags);
2670 + local_irq_restore(flags);
2671 kmem_cache_free(skbuff_head_cache, skb);
2674 diff -urN linux-2.4.20/net/socket.c linux/net/socket.c
2675 --- linux-2.4.20/net/socket.c 2002-11-28 18:53:16.000000000 -0500
2676 +++ linux/net/socket.c 2002-12-11 02:34:48.000000000 -0500
2679 static struct net_proto_family *net_families[NPROTO];
2682 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2683 static atomic_t net_family_lockct = ATOMIC_INIT(0);
2684 static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
2686 diff -urN linux-2.4.20/net/sunrpc/pmap_clnt.c linux/net/sunrpc/pmap_clnt.c
2687 --- linux-2.4.20/net/sunrpc/pmap_clnt.c 2002-08-02 20:39:46.000000000 -0400
2688 +++ linux/net/sunrpc/pmap_clnt.c 2002-12-11 02:34:48.000000000 -0500
2690 #include <linux/config.h>
2691 #include <linux/types.h>
2692 #include <linux/socket.h>
2693 +#include <linux/sched.h>
2694 #include <linux/kernel.h>
2695 #include <linux/errno.h>
2696 #include <linux/uio.h>