3 Documentation/Configure.help | 11 ++++
4 Documentation/preempt-locking.txt | 104 ++++++++++++++++++++++++++++++++++++++
6 arch/alpha/kernel/process.c | 1
8 arch/arm/kernel/entry-armv.S | 40 ++++++++++++++
9 arch/arm/tools/getconstants.c | 6 ++
10 arch/i386/config.in | 8 ++
11 arch/i386/kernel/cpuid.c | 4 +
12 arch/i386/kernel/entry.S | 49 +++++++++++++++++
13 arch/i386/kernel/i387.c | 3 +
14 arch/i386/kernel/ioport.c | 5 +
15 arch/i386/kernel/irq.c | 15 ++++-
16 arch/i386/kernel/ldt.c | 2
17 arch/i386/kernel/microcode.c | 3 +
18 arch/i386/kernel/msr.c | 15 +++--
19 arch/i386/kernel/mtrr.c | 6 ++
20 arch/i386/kernel/smp.c | 29 ++++++++--
21 arch/i386/kernel/traps.c | 2
22 arch/i386/lib/dec_and_lock.c | 1
23 arch/i386/mm/init.c | 2
24 arch/mips/config-shared.in | 1
25 arch/mips/kernel/i8259.c | 1
26 arch/mips/kernel/irq.c | 29 ++++++++++
27 arch/mips/mm/extable.c | 1
28 arch/ppc/config.in | 2
29 arch/ppc/kernel/entry.S | 40 ++++++++++++++
30 arch/ppc/kernel/irq.c | 52 ++++++++++++++++---
31 arch/ppc/kernel/mk_defs.c | 3 +
32 arch/ppc/kernel/open_pic.c | 9 ++-
33 arch/ppc/kernel/setup.c | 14 +++++
34 arch/ppc/kernel/temp.c | 8 ++
35 arch/ppc/lib/dec_and_lock.c | 1
36 arch/ppc/mm/tlb.c | 16 +++++
38 arch/sh/kernel/entry.S | 104 +++++++++++++++++++++++++++++++++++---
39 arch/sh/kernel/irq.c | 17 ++++++
40 drivers/ieee1394/csr.c | 1
41 drivers/sound/sound_core.c | 1
46 include/asm-arm/dma.h | 1
47 include/asm-arm/hardirq.h | 1
48 include/asm-arm/pgalloc.h | 8 ++
49 include/asm-arm/smplock.h | 11 ++++
50 include/asm-arm/softirq.h | 8 +-
51 include/asm-arm/system.h | 7 ++
52 include/asm-i386/desc.h | 5 +
53 include/asm-i386/hardirq.h | 14 +++--
54 include/asm-i386/highmem.h | 7 ++
55 include/asm-i386/hw_irq.h | 19 +++++-
56 include/asm-i386/i387.h | 3 -
57 include/asm-i386/pgalloc.h | 12 ++++
58 include/asm-i386/smplock.h | 14 +++++
59 include/asm-i386/softirq.h | 11 ++--
60 include/asm-i386/spinlock.h | 18 +++---
61 include/asm-i386/system.h | 7 ++
62 include/asm-mips/smplock.h | 15 +++++
63 include/asm-mips/softirq.h | 3 +
64 include/asm-mips/system.h | 14 +++++
65 include/asm-ppc/dma.h | 1
66 include/asm-ppc/hardirq.h | 9 ++-
67 include/asm-ppc/highmem.h | 6 +-
68 include/asm-ppc/hw_irq.h | 6 ++
69 include/asm-ppc/mmu_context.h | 4 +
70 include/asm-ppc/pgalloc.h | 9 +++
71 include/asm-ppc/smplock.h | 14 +++++
72 include/asm-ppc/softirq.h | 13 ++++
73 include/asm-sh/hardirq.h | 2
74 include/asm-sh/smplock.h | 85 ++++++++++++++++++++++++++++---
75 include/asm-sh/softirq.h | 3 +
76 include/asm-sh/system.h | 13 ++++
77 include/linux/brlock.h | 10 +--
78 include/linux/dcache.h | 56 +++++++++++---------
79 include/linux/fs_struct.h | 13 +++-
80 include/linux/sched.h | 12 ++++
81 include/linux/smp_lock.h | 2
82 include/linux/spinlock.h | 82 +++++++++++++++++++++++++++--
83 include/linux/tqueue.h | 31 ++++++-----
87 kernel/sched.c | 48 ++++++++++++++++-
88 kernel/softirq.c | 13 +++-
89 lib/dec_and_lock.c | 1
91 net/core/dev.c | 11 +++-
92 net/core/skbuff.c | 30 ++++++----
94 net/sunrpc/pmap_clnt.c | 1
95 93 files changed, 1138 insertions(+), 165 deletions(-)
98 diff -urN linux-2.4.20/arch/alpha/kernel/process.c linux/arch/alpha/kernel/process.c
99 --- linux-2.4.20/arch/alpha/kernel/process.c 2001-09-30 15:26:08.000000000 -0400
100 +++ linux/arch/alpha/kernel/process.c 2003-04-11 17:03:05.182081640 -0400
103 args.restart_cmd = restart_cmd;
106 smp_call_function(common_shutdown_1, &args, 1, 0);
108 common_shutdown_1(&args);
109 diff -urN linux-2.4.20/arch/arm/config.in linux/arch/arm/config.in
110 --- linux-2.4.20/arch/arm/config.in 2002-11-28 18:53:09.000000000 -0500
111 +++ linux/arch/arm/config.in 2003-04-11 17:02:55.318581120 -0400
114 define_bool CONFIG_DISCONTIGMEM n
117 +dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_CPU_32
120 mainmenu_option next_comment
121 diff -urN linux-2.4.20/arch/arm/kernel/entry-armv.S linux/arch/arm/kernel/entry-armv.S
122 --- linux-2.4.20/arch/arm/kernel/entry-armv.S 2002-08-02 20:39:42.000000000 -0400
123 +++ linux/arch/arm/kernel/entry-armv.S 2003-04-11 17:02:55.393569720 -0400
127 stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
128 +#ifdef CONFIG_PREEMPT
129 + get_current_task r9
130 + ldr r8, [r9, #TSK_PREEMPT]
132 + str r8, [r9, #TSK_PREEMPT]
134 1: get_irqnr_and_base r0, r6, r5, lr
141 +#ifdef CONFIG_PREEMPT
142 +2: ldr r8, [r9, #TSK_PREEMPT]
145 + ldr r7, [r9, #TSK_NEED_RESCHED]
149 + ldr r0, [r6, #IRQSTAT_BH_COUNT]
153 + msr cpsr_c, r0 @ enable interrupts
154 + bl SYMBOL_NAME(preempt_schedule)
155 + mov r0, #I_BIT | MODE_SVC
156 + msr cpsr_c, r0 @ disable interrupts
158 +3: str r8, [r9, #TSK_PREEMPT]
160 ldr r0, [sp, #S_PSR] @ irqs are already disabled
162 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
164 .LCprocfns: .word SYMBOL_NAME(processor)
166 .LCfp: .word SYMBOL_NAME(fp_enter)
167 +#ifdef CONFIG_PREEMPT
168 +.LCirqstat: .word SYMBOL_NAME(irq_stat)
175 alignment_trap r4, r7, __temp_irq
177 + get_current_task tsk
178 +#ifdef CONFIG_PREEMPT
179 + ldr r0, [tsk, #TSK_PREEMPT]
181 + str r0, [tsk, #TSK_PREEMPT]
183 1: get_irqnr_and_base r0, r6, r5, lr
187 @ routine called with r0 = irq number, r1 = struct pt_regs *
190 +#ifdef CONFIG_PREEMPT
191 + ldr r0, [tsk, #TSK_PREEMPT]
193 + str r0, [tsk, #TSK_PREEMPT]
196 - get_current_task tsk
200 diff -urN linux-2.4.20/arch/arm/tools/getconstants.c linux/arch/arm/tools/getconstants.c
201 --- linux-2.4.20/arch/arm/tools/getconstants.c 2001-10-11 12:04:57.000000000 -0400
202 +++ linux/arch/arm/tools/getconstants.c 2003-04-11 17:02:55.394569568 -0400
205 #include <asm/pgtable.h>
206 #include <asm/uaccess.h>
207 +#include <asm/hardirq.h>
210 * Make sure that the compiler and target are compatible.
212 DEFN("TSS_SAVE", OFF_TSK(thread.save));
213 DEFN("TSS_FPESAVE", OFF_TSK(thread.fpstate.soft.save));
215 +#ifdef CONFIG_PREEMPT
216 +DEFN("TSK_PREEMPT", OFF_TSK(preempt_count));
217 +DEFN("IRQSTAT_BH_COUNT", (unsigned long)&(((irq_cpustat_t *)0)->__local_bh_count));
221 DEFN("TSS_DOMAIN", OFF_TSK(thread.domain));
223 diff -urN linux-2.4.20/arch/i386/config.in linux/arch/i386/config.in
224 --- linux-2.4.20/arch/i386/config.in 2002-11-28 18:53:09.000000000 -0500
225 +++ linux/arch/i386/config.in 2003-04-11 17:02:55.395569416 -0400
227 bool 'Math emulation' CONFIG_MATH_EMULATION
228 bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
229 bool 'Symmetric multi-processing support' CONFIG_SMP
230 +bool 'Preemptible Kernel' CONFIG_PREEMPT
231 if [ "$CONFIG_SMP" != "y" ]; then
232 bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
233 dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
235 define_bool CONFIG_X86_TSC y
238 -if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
239 - define_bool CONFIG_HAVE_DEC_LOCK y
240 +if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then
241 + if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then
242 + define_bool CONFIG_HAVE_DEC_LOCK y
248 mainmenu_option next_comment
249 diff -urN linux-2.4.20/arch/i386/kernel/cpuid.c linux/arch/i386/kernel/cpuid.c
250 --- linux-2.4.20/arch/i386/kernel/cpuid.c 2001-10-11 12:04:57.000000000 -0400
251 +++ linux/arch/i386/kernel/cpuid.c 2003-04-11 17:03:05.217076320 -0400
253 static inline void do_cpuid(int cpu, u32 reg, u32 *data)
255 struct cpuid_command cmd;
259 if ( cpu == smp_processor_id() ) {
260 cpuid(reg, &data[0], &data[1], &data[2], &data[3]);
264 smp_call_function(cpuid_smp_cpuid, &cmd, 1, 1);
268 #else /* ! CONFIG_SMP */
270 diff -urN linux-2.4.20/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
271 --- linux-2.4.20/arch/i386/kernel/entry.S 2002-11-28 18:53:09.000000000 -0500
272 +++ linux/arch/i386/kernel/entry.S 2003-04-11 17:02:55.397569112 -0400
274 * these are offsets into the task-struct.
286 +/* These are offsets into the irq_stat structure
287 + * There is one per cpu and it is aligned to 32
288 + * byte boundry (we put that here as a shift count)
290 +irq_array_shift = CONFIG_X86_L1_CACHE_SHIFT
292 +irq_stat_local_irq_count = 4
293 +irq_stat_local_bh_count = 8
298 +#define GET_CPU_INDX movl processor(%ebx),%eax; \
299 + shll $irq_array_shift,%eax
300 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \
302 +#define CPU_INDX (,%eax)
304 +#define GET_CPU_INDX
305 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx)
311 @@ -255,12 +275,30 @@
315 +#ifdef CONFIG_PREEMPT
317 + decl preempt_count(%ebx)
320 movl EFLAGS(%esp),%eax # mix EFLAGS and CS
322 testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
323 jne ret_from_sys_call
324 +#ifdef CONFIG_PREEMPT
325 + cmpl $0,preempt_count(%ebx)
327 + cmpl $0,need_resched(%ebx)
329 + movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
330 + addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
332 + incl preempt_count(%ebx)
334 + call SYMBOL_NAME(preempt_schedule)
346 +#ifdef CONFIG_PREEMPT
349 jmp ret_from_exception
351 ENTRY(coprocessor_error)
352 @@ -316,12 +357,18 @@
354 testl $0x4,%eax # EM (math emulation bit)
355 jne device_not_available_emulate
356 +#ifdef CONFIG_PREEMPT
359 call SYMBOL_NAME(math_state_restore)
360 jmp ret_from_exception
361 device_not_available_emulate:
362 pushl $0 # temporary storage for ORIG_EIP
363 call SYMBOL_NAME(math_emulate)
365 +#ifdef CONFIG_PREEMPT
368 jmp ret_from_exception
371 diff -urN linux-2.4.20/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c
372 --- linux-2.4.20/arch/i386/kernel/i387.c 2002-08-02 20:39:42.000000000 -0400
373 +++ linux/arch/i386/kernel/i387.c 2003-04-11 17:02:55.398568960 -0400
376 #include <linux/config.h>
377 #include <linux/sched.h>
378 +#include <linux/spinlock.h>
379 #include <linux/init.h>
380 #include <asm/processor.h>
381 #include <asm/i387.h>
384 struct task_struct *tsk = current;
388 if (tsk->flags & PF_USEDFPU) {
389 __save_init_fpu(tsk);
391 diff -urN linux-2.4.20/arch/i386/kernel/ioport.c linux/arch/i386/kernel/ioport.c
392 --- linux-2.4.20/arch/i386/kernel/ioport.c 1999-07-19 18:22:48.000000000 -0400
393 +++ linux/arch/i386/kernel/ioport.c 2003-04-11 17:03:05.218076168 -0400
395 asmlinkage int sys_ioperm(unsigned long from, unsigned long num, int turn_on)
397 struct thread_struct * t = ¤t->thread;
398 - struct tss_struct * tss = init_tss + smp_processor_id();
399 + struct tss_struct * tss;
401 if ((from + num <= from) || (from + num > IO_BITMAP_SIZE*32))
404 * IO bitmap up. ioperm() is much less timing critical than clone(),
405 * this is why we delay this operation until now:
408 + tss = init_tss + smp_processor_id();
414 set_bitmap(t->io_bitmap, from, num, !turn_on);
415 set_bitmap(tss->io_bitmap, from, num, !turn_on);
420 diff -urN linux-2.4.20/arch/i386/kernel/irq.c linux/arch/i386/kernel/irq.c
421 --- linux-2.4.20/arch/i386/kernel/irq.c 2002-11-28 18:53:09.000000000 -0500
422 +++ linux/arch/i386/kernel/irq.c 2003-04-11 17:03:05.255070544 -0400
429 SYNC_OTHER_CORES(cpu);
431 + preempt_enable_no_resched();
438 if (flags & (1 << EFLAGS_IF_SHIFT)) {
439 - int cpu = smp_processor_id();
442 + cpu = smp_processor_id();
443 if (!local_irq_count(cpu))
446 @@ -368,11 +371,14 @@
448 void __global_sti(void)
450 - int cpu = smp_processor_id();
454 + cpu = smp_processor_id();
455 if (!local_irq_count(cpu))
456 release_irqlock(cpu);
462 @@ -387,13 +393,15 @@
466 - int cpu = smp_processor_id();
470 local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
471 /* default to local */
472 retval = 2 + local_enabled;
475 + cpu = smp_processor_id();
476 /* check for global flags if we're not in an interrupt */
477 if (!local_irq_count(cpu)) {
480 if (global_irq_holder == cpu)
487 diff -urN linux-2.4.20/arch/i386/kernel/ldt.c linux/arch/i386/kernel/ldt.c
488 --- linux-2.4.20/arch/i386/kernel/ldt.c 2001-10-17 17:46:29.000000000 -0400
489 +++ linux/arch/i386/kernel/ldt.c 2003-04-11 17:03:05.322060360 -0400
491 * the GDT index of the LDT is allocated dynamically, and is
492 * limited by MAX_LDT_DESCRIPTORS.
495 down_write(&mm->mmap_sem);
496 if (!mm->context.segments) {
497 void * segments = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
501 up_write(&mm->mmap_sem);
506 diff -urN linux-2.4.20/arch/i386/kernel/microcode.c linux/arch/i386/kernel/microcode.c
507 --- linux-2.4.20/arch/i386/kernel/microcode.c 2002-08-02 20:39:42.000000000 -0400
508 +++ linux/arch/i386/kernel/microcode.c 2003-04-11 17:03:05.323060208 -0400
509 @@ -182,11 +182,14 @@
510 int i, error = 0, err;
514 if (smp_call_function(do_update_one, NULL, 1, 1) != 0) {
515 printk(KERN_ERR "microcode: IPI timeout, giving up\n");
522 for (i=0; i<smp_num_cpus; i++) {
523 err = update_req[i].err;
524 diff -urN linux-2.4.20/arch/i386/kernel/msr.c linux/arch/i386/kernel/msr.c
525 --- linux-2.4.20/arch/i386/kernel/msr.c 2001-10-11 12:04:57.000000000 -0400
526 +++ linux/arch/i386/kernel/msr.c 2003-04-11 17:03:05.359054736 -0400
529 struct msr_command cmd;
532 if ( cpu == smp_processor_id() ) {
533 - return wrmsr_eio(reg, eax, edx);
534 + cmd.err = wrmsr_eio(reg, eax, edx);
538 @@ -123,16 +124,19 @@
541 smp_call_function(msr_smp_wrmsr, &cmd, 1, 1);
549 static inline int do_rdmsr(int cpu, u32 reg, u32 *eax, u32 *edx)
551 struct msr_command cmd;
554 if ( cpu == smp_processor_id() ) {
555 - return rdmsr_eio(reg, eax, edx);
556 + cmd.err = rdmsr_eio(reg, eax, edx);
572 #else /* ! CONFIG_SMP */
573 diff -urN linux-2.4.20/arch/i386/kernel/mtrr.c linux/arch/i386/kernel/mtrr.c
574 --- linux-2.4.20/arch/i386/kernel/mtrr.c 2002-08-02 20:39:42.000000000 -0400
575 +++ linux/arch/i386/kernel/mtrr.c 2003-04-11 17:03:05.397048960 -0400
576 @@ -1057,6 +1057,9 @@
577 wait_barrier_execute = TRUE;
578 wait_barrier_cache_enable = TRUE;
579 atomic_set (&undone_count, smp_num_cpus - 1);
583 /* Start the ball rolling on other CPUs */
584 if (smp_call_function (ipi_handler, &data, 1, 0) != 0)
585 panic ("mtrr: timed out waiting for other CPUs\n");
586 @@ -1082,6 +1085,9 @@
587 then enable the local cache and return */
588 wait_barrier_cache_enable = FALSE;
589 set_mtrr_done (&ctxt);
593 } /* End Function set_mtrr_smp */
596 diff -urN linux-2.4.20/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c
597 --- linux-2.4.20/arch/i386/kernel/smp.c 2002-11-28 18:53:09.000000000 -0500
598 +++ linux/arch/i386/kernel/smp.c 2003-04-11 17:03:05.435043184 -0400
599 @@ -357,10 +357,14 @@
601 asmlinkage void smp_invalidate_interrupt (void)
603 - unsigned long cpu = smp_processor_id();
608 + cpu = smp_processor_id();
610 if (!test_bit(cpu, &flush_cpumask))
614 * This was a BUG() but until someone can quote me the
615 * line from the intel manual that guarantees an IPI to
619 clear_bit(cpu, &flush_cpumask);
624 static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
625 @@ -430,17 +436,22 @@
626 void flush_tlb_current_task(void)
628 struct mm_struct *mm = current->mm;
629 - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
630 + unsigned long cpu_mask;
633 + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
636 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
640 void flush_tlb_mm (struct mm_struct * mm)
642 - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
643 + unsigned long cpu_mask;
646 + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
647 if (current->active_mm == mm) {
650 @@ -449,13 +460,16 @@
653 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
657 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
659 struct mm_struct *mm = vma->vm_mm;
660 - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
661 + unsigned long cpu_mask;
664 + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
665 if (current->active_mm == mm) {
671 flush_tlb_others(cpu_mask, mm, va);
675 static inline void do_flush_tlb_all_local(void)
678 void flush_tlb_all(void)
681 smp_call_function (flush_tlb_all_ipi,0,1,1);
683 do_flush_tlb_all_local();
689 static void stop_this_cpu (void * dummy)
693 + * Remove this CPU: assumes preemption is disabled
695 clear_bit(smp_processor_id(), &cpu_online_map);
697 diff -urN linux-2.4.20/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
698 --- linux-2.4.20/arch/i386/kernel/traps.c 2002-11-28 18:53:09.000000000 -0500
699 +++ linux/arch/i386/kernel/traps.c 2003-04-11 17:02:55.401568504 -0400
702 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
703 * Don't touch unless you *really* know how it works.
705 + * Must be called with kernel preemption disabled.
707 asmlinkage void math_state_restore(struct pt_regs regs)
709 diff -urN linux-2.4.20/arch/i386/lib/dec_and_lock.c linux/arch/i386/lib/dec_and_lock.c
710 --- linux-2.4.20/arch/i386/lib/dec_and_lock.c 2000-07-07 21:20:16.000000000 -0400
711 +++ linux/arch/i386/lib/dec_and_lock.c 2003-04-11 17:02:55.401568504 -0400
715 #include <linux/spinlock.h>
716 +#include <linux/sched.h>
717 #include <asm/atomic.h>
719 int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
720 diff -urN linux-2.4.20/arch/i386/mm/init.c linux/arch/i386/mm/init.c
721 --- linux-2.4.20/arch/i386/mm/init.c 2002-11-28 18:53:09.000000000 -0500
722 +++ linux/arch/i386/mm/init.c 2003-04-11 17:03:05.471037712 -0400
724 int do_check_pgt_cache(int low, int high)
728 if(pgtable_cache_size > high) {
733 } while(pgtable_cache_size > low);
739 diff -urN linux-2.4.20/arch/mips/config-shared.in linux/arch/mips/config-shared.in
740 --- linux-2.4.20/arch/mips/config-shared.in 2002-11-28 18:53:09.000000000 -0500
741 +++ linux/arch/mips/config-shared.in 2003-04-11 17:02:55.403568200 -0400
743 define_bool CONFIG_HOTPLUG_PCI n
746 +dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_NEW_IRQ
747 bool 'System V IPC' CONFIG_SYSVIPC
748 bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
749 bool 'Sysctl support' CONFIG_SYSCTL
750 diff -urN linux-2.4.20/arch/mips/kernel/i8259.c linux/arch/mips/kernel/i8259.c
751 --- linux-2.4.20/arch/mips/kernel/i8259.c 2002-11-28 18:53:10.000000000 -0500
752 +++ linux/arch/mips/kernel/i8259.c 2003-04-11 17:02:55.475557256 -0400
754 * Copyright (C) 1992 Linus Torvalds
755 * Copyright (C) 1994 - 2000 Ralf Baechle
757 +#include <linux/sched.h>
758 #include <linux/delay.h>
759 #include <linux/init.h>
760 #include <linux/ioport.h>
761 diff -urN linux-2.4.20/arch/mips/kernel/irq.c linux/arch/mips/kernel/irq.c
762 --- linux-2.4.20/arch/mips/kernel/irq.c 2002-11-28 18:53:10.000000000 -0500
763 +++ linux/arch/mips/kernel/irq.c 2003-04-11 17:02:55.514551328 -0400
765 * Copyright (C) 1992 Linus Torvalds
766 * Copyright (C) 1994 - 2000 Ralf Baechle
769 +#include <linux/sched.h>
770 #include <linux/config.h>
771 #include <linux/kernel.h>
772 #include <linux/delay.h>
774 #include <linux/slab.h>
775 #include <linux/mm.h>
776 #include <linux/random.h>
777 -#include <linux/sched.h>
778 +#include <linux/spinlock.h>
779 +#include <linux/ptrace.h>
781 #include <asm/atomic.h>
782 #include <asm/system.h>
783 #include <asm/uaccess.h>
784 +#include <asm/debug.h>
787 * Controller mappings for all interrupt sources:
789 struct irqaction * action;
794 kstat.irqs[cpu][irq]++;
795 spin_lock(&desc->lock);
796 desc->handler->ack(irq);
799 if (softirq_pending(cpu))
802 +#if defined(CONFIG_PREEMPT)
803 + while (--current->preempt_count == 0) {
804 + db_assert(intr_off());
805 + db_assert(!in_interrupt());
807 + if (current->need_resched == 0) {
811 + current->preempt_count ++;
813 + if (user_mode(regs)) {
816 + preempt_schedule();
825 diff -urN linux-2.4.20/arch/mips/mm/extable.c linux/arch/mips/mm/extable.c
826 --- linux-2.4.20/arch/mips/mm/extable.c 2002-11-28 18:53:10.000000000 -0500
827 +++ linux/arch/mips/mm/extable.c 2003-04-11 17:02:55.515551176 -0400
830 #include <linux/config.h>
831 #include <linux/module.h>
832 +#include <linux/sched.h>
833 #include <linux/spinlock.h>
834 #include <asm/uaccess.h>
836 diff -urN linux-2.4.20/arch/ppc/config.in linux/arch/ppc/config.in
837 --- linux-2.4.20/arch/ppc/config.in 2002-11-28 18:53:11.000000000 -0500
838 +++ linux/arch/ppc/config.in 2003-04-11 17:02:55.516551024 -0400
840 bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
843 +bool 'Preemptible kernel support' CONFIG_PREEMPT
845 if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ];then
846 bool 'AltiVec Support' CONFIG_ALTIVEC
847 bool 'Thermal Management Support' CONFIG_TAU
848 diff -urN linux-2.4.20/arch/ppc/kernel/entry.S linux/arch/ppc/kernel/entry.S
849 --- linux-2.4.20/arch/ppc/kernel/entry.S 2002-11-28 18:53:11.000000000 -0500
850 +++ linux/arch/ppc/kernel/entry.S 2003-04-11 17:03:05.473037408 -0400
855 +#ifdef CONFIG_PREEMPT
856 + lwz r3,PREEMPT_COUNT(r2)
858 + bge ret_from_except
862 + lwz r5,NEED_RESCHED(r2)
864 + beq ret_from_except
866 + ori r3,r3,irq_stat@l
869 + rlwinm r5,r5,5,0,26
876 + bne ret_from_except
877 + lwz r3,PREEMPT_COUNT(r2)
879 + stw r3,PREEMPT_COUNT(r2)
884 + bl preempt_schedule
886 + rlwinm r0,r0,0,17,15
889 + lwz r3,PREEMPT_COUNT(r2)
891 + stw r3,PREEMPT_COUNT(r2)
893 + b ret_from_intercept
894 +#endif /* CONFIG_PREEMPT */
895 .globl ret_from_except
897 lwz r3,_MSR(r1) /* Returning to user mode? */
898 diff -urN linux-2.4.20/arch/ppc/kernel/irq.c linux/arch/ppc/kernel/irq.c
899 --- linux-2.4.20/arch/ppc/kernel/irq.c 2002-11-28 18:53:11.000000000 -0500
900 +++ linux/arch/ppc/kernel/irq.c 2003-04-11 17:03:54.067649904 -0400
902 return 1; /* lets ret_from_int know we can do checks */
905 +#ifdef CONFIG_PREEMPT
907 +preempt_intercept(struct pt_regs *regs)
913 + switch(regs->trap) {
915 + ret = do_IRQ(regs);
922 + ret = timer_interrupt(regs);
931 +#endif /* CONFIG_PREEMPT */
933 unsigned long probe_irq_on (void)
936 @@ -652,11 +680,13 @@
942 /* don't worry about the lock race Linus found
943 * on intel here. -- Cort
946 + preempt_enable_no_resched();
947 if (atomic_read(&global_irq_count))
951 global_irq_holder = cpu;
954 +#define EFLAGS_IF_SHIFT 15
957 * A global "cli()" while in an interrupt context
958 * turns into just a local cli(). Interrupts
963 - if (flags & (1 << 15)) {
964 - int cpu = smp_processor_id();
965 + if (flags & (1 << EFLAGS_IF_SHIFT)) {
968 + cpu = smp_processor_id();
969 if (!local_irq_count(cpu))
972 @@ -759,11 +792,14 @@
974 void __global_sti(void)
976 - int cpu = smp_processor_id();
980 + cpu = smp_processor_id();
981 if (!local_irq_count(cpu))
982 release_irqlock(cpu);
988 @@ -778,19 +814,23 @@
995 - local_enabled = (flags >> 15) & 1;
996 + local_enabled = (flags >> EFLAGS_IF_SHIFT) & 1;
997 /* default to local */
998 retval = 2 + local_enabled;
1000 /* check for global flags if we're not in an interrupt */
1001 - if (!local_irq_count(smp_processor_id())) {
1002 + preempt_disable();
1003 + cpu = smp_processor_id();
1004 + if (!local_irq_count(cpu)) {
1007 - if (global_irq_holder == (unsigned char) smp_processor_id())
1008 + if (global_irq_holder == cpu)
1015 diff -urN linux-2.4.20/arch/ppc/kernel/mk_defs.c linux/arch/ppc/kernel/mk_defs.c
1016 --- linux-2.4.20/arch/ppc/kernel/mk_defs.c 2001-08-28 09:58:33.000000000 -0400
1017 +++ linux/arch/ppc/kernel/mk_defs.c 2003-04-11 17:02:55.598538560 -0400
1019 DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
1020 DEFINE(THREAD, offsetof(struct task_struct, thread));
1021 DEFINE(MM, offsetof(struct task_struct, mm));
1022 +#ifdef CONFIG_PREEMPT
1023 + DEFINE(PREEMPT_COUNT, offsetof(struct task_struct, preempt_count));
1025 DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm));
1026 DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
1027 DEFINE(KSP, offsetof(struct thread_struct, ksp));
1028 diff -urN linux-2.4.20/arch/ppc/kernel/open_pic.c linux/arch/ppc/kernel/open_pic.c
1029 --- linux-2.4.20/arch/ppc/kernel/open_pic.c 2002-11-28 18:53:11.000000000 -0500
1030 +++ linux/arch/ppc/kernel/open_pic.c 2003-04-11 17:03:05.592019320 -0400
1031 @@ -567,19 +567,24 @@
1032 void __init do_openpic_setup_cpu(void)
1035 - u32 msk = 1 << smp_hw_index[smp_processor_id()];
1036 +#ifdef CONFIG_IRQ_ALL_CPUS
1038 +#endif /* CONFIG_IRQ_ALL_CPUS */
1040 spin_lock(&openpic_setup_lock);
1042 #ifdef CONFIG_IRQ_ALL_CPUS
1043 + msk = 1 << smp_hw_index[smp_processor_id()];
1045 /* let the openpic know we want intrs. default affinity
1046 * is 0xffffffff until changed via /proc
1047 * That's how it's done on x86. If we want it differently, then
1048 * we should make sure we also change the default values of irq_affinity
1051 - for (i = 0; i < NumSources; i++)
1052 + for (i = 0; i < NumSources; i++) {
1053 openpic_mapirq(i, msk, ~0U);
1055 #endif /* CONFIG_IRQ_ALL_CPUS */
1056 openpic_set_priority(0);
1058 diff -urN linux-2.4.20/arch/ppc/kernel/setup.c linux/arch/ppc/kernel/setup.c
1059 --- linux-2.4.20/arch/ppc/kernel/setup.c 2002-11-28 18:53:11.000000000 -0500
1060 +++ linux/arch/ppc/kernel/setup.c 2003-04-11 17:02:55.635532936 -0400
1061 @@ -498,6 +498,20 @@
1062 strcpy(cmd_line, CONFIG_CMDLINE);
1063 #endif /* CONFIG_CMDLINE */
1065 +#ifdef CONFIG_PREEMPT
1066 + /* Override the irq routines for external & timer interrupts here,
1067 + * as the MMU has only been minimally setup at this point and
1068 + * there are no protections on page zero.
1071 + extern int preempt_intercept(struct pt_regs *);
1073 + do_IRQ_intercept = (unsigned long) &preempt_intercept;
1074 + timer_interrupt_intercept = (unsigned long) &preempt_intercept;
1077 +#endif /* CONFIG_PREEMPT */
1079 platform_init(r3, r4, r5, r6, r7);
1081 if (ppc_md.progress)
1082 diff -urN linux-2.4.20/arch/ppc/kernel/temp.c linux/arch/ppc/kernel/temp.c
1083 --- linux-2.4.20/arch/ppc/kernel/temp.c 2001-08-28 09:58:33.000000000 -0400
1084 +++ linux/arch/ppc/kernel/temp.c 2003-04-11 17:03:05.593019168 -0400
1087 static void tau_timeout(void * info)
1089 - unsigned long cpu = smp_processor_id();
1090 + unsigned long cpu;
1091 unsigned long flags;
1095 /* disabling interrupts *should* be okay */
1096 save_flags(flags); cli();
1098 + cpu = smp_processor_id();
1100 #ifndef CONFIG_TAU_INT
1103 @@ -191,13 +193,15 @@
1105 static void tau_timeout_smp(unsigned long unused)
1108 /* schedule ourselves to be run again */
1109 mod_timer(&tau_timer, jiffies + shrink_timer) ;
1111 + preempt_disable();
1113 smp_call_function(tau_timeout, NULL, 1, 0);
1120 diff -urN linux-2.4.20/arch/ppc/lib/dec_and_lock.c linux/arch/ppc/lib/dec_and_lock.c
1121 --- linux-2.4.20/arch/ppc/lib/dec_and_lock.c 2001-11-16 13:10:08.000000000 -0500
1122 +++ linux/arch/ppc/lib/dec_and_lock.c 2003-04-11 17:02:55.636532784 -0400
1124 #include <linux/module.h>
1125 +#include <linux/sched.h>
1126 #include <linux/spinlock.h>
1127 #include <asm/atomic.h>
1128 #include <asm/system.h>
1129 diff -urN linux-2.4.20/arch/ppc/mm/tlb.c linux/arch/ppc/mm/tlb.c
1130 --- linux-2.4.20/arch/ppc/mm/tlb.c 2001-08-28 09:58:33.000000000 -0400
1131 +++ linux/arch/ppc/mm/tlb.c 2003-04-11 17:03:05.630013544 -0400
1133 * we can and should dispense with flush_tlb_all().
1137 + preempt_disable();
1138 local_flush_tlb_range(&init_mm, TASK_SIZE, ~0UL);
1141 smp_send_tlb_invalidate(0);
1142 #endif /* CONFIG_SMP */
1149 local_flush_tlb_mm(struct mm_struct *mm)
1151 + preempt_disable();
1160 smp_send_tlb_invalidate(0);
1166 @@ -100,8 +106,10 @@
1170 + preempt_disable();
1176 mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
1179 smp_send_tlb_invalidate(0);
1185 @@ -130,13 +139,17 @@
1186 unsigned long pmd_end;
1187 unsigned int ctx = mm->context;
1189 + preempt_disable();
1197 + if (start >= end) {
1201 pmd = pmd_offset(pgd_offset(mm, start), start);
1203 pmd_end = (start + PGDIR_SIZE) & PGDIR_MASK;
1206 smp_send_tlb_invalidate(0);
1210 diff -urN linux-2.4.20/arch/sh/config.in linux/arch/sh/config.in
1211 --- linux-2.4.20/arch/sh/config.in 2002-11-28 18:53:11.000000000 -0500
1212 +++ linux/arch/sh/config.in 2003-04-11 17:02:55.637532632 -0400
1214 hex 'Physical memory start address' CONFIG_MEMORY_START 08000000
1215 hex 'Physical memory size' CONFIG_MEMORY_SIZE 00400000
1217 +bool 'Preemptible Kernel' CONFIG_PREEMPT
1220 if [ "$CONFIG_SH_HP690" = "y" ]; then
1221 diff -urN linux-2.4.20/arch/sh/kernel/entry.S linux/arch/sh/kernel/entry.S
1222 --- linux-2.4.20/arch/sh/kernel/entry.S 2002-08-02 20:39:43.000000000 -0400
1223 +++ linux/arch/sh/kernel/entry.S 2003-04-11 17:02:55.709521688 -0400
1226 * These are offsets into the task-struct.
1236 + * These offsets are into irq_stat.
1237 + * (Find irq_cpustat_t in asm-sh/hardirq.h)
1239 +local_irq_count = 8
1240 +local_bh_count = 12
1242 PT_TRACESYS = 0x00000002
1243 PF_USEDFPU = 0x00100000
1245 mov.l __INV_IMASK, r11; \
1248 - stc k_g_imask, r11; \
1249 + stc k_g_imask, r11; \
1254 mov.l @(tsk_ptrace,r0), r0 ! Is current PTRACE_SYSCALL'd?
1255 mov #PT_TRACESYS, r1
1257 - bt ret_from_syscall
1258 - bra syscall_ret_trace
1259 + bf syscall_ret_trace
1260 + bra ret_from_syscall
1265 .long syscall_ret_trace
1269 - .long 0xffffff0f ! ~(IMASK)
1273 @@ -518,7 +524,84 @@
1275 1: .long SYMBOL_NAME(schedule)
1277 +#ifdef CONFIG_PREEMPT
1279 + ! Returning from interrupt during kernel mode: check if
1280 + ! preempt_schedule should be called. If need_resched flag
1281 + ! is set, preempt_count is zero, and we're not currently
1282 + ! in an interrupt handler (local irq or bottom half) then
1283 + ! call preempt_schedule.
1285 + ! Increment preempt_count to prevent a nested interrupt
1286 + ! from reentering preempt_schedule, then decrement after
1287 + ! and drop through to regular interrupt return which will
1288 + ! jump back and check again in case such an interrupt did
1289 + ! come in (and didn't preempt due to preempt_count).
1291 + ! NOTE: because we just checked that preempt_count was
1292 + ! zero before getting to the call, can't we use immediate
1293 + ! values (1 and 0) rather than inc/dec? Also, rather than
1294 + ! drop through to ret_from_irq, we already know this thread
1295 + ! is kernel mode, can't we go direct to ret_from_kirq? In
1296 + ! fact, with proper interrupt nesting and so forth could
1297 + ! the loop simply be on the need_resched w/o checking the
1298 + ! other stuff again? Optimize later...
1302 + ! Nonzero preempt_count prevents scheduling
1304 + mov.l @(preempt_count,r1), r0
1307 + ! Zero need_resched prevents scheduling
1308 + mov.l @(need_resched,r1), r0
1311 + ! If in_interrupt(), don't schedule
1312 + mov.l __irq_stat, r1
1313 + mov.l @(local_irq_count,r1), r0
1314 + mov.l @(local_bh_count,r1), r1
1318 + ! Allow scheduling using preempt_schedule
1319 + ! Adjust preempt_count and SR as needed.
1321 + mov.l @(preempt_count,r1), r0 ! Could replace this ...
1322 + add #1, r0 ! ... and this w/mov #1?
1323 + mov.l r0, @(preempt_count,r1)
1325 + mov.l __preempt_schedule, r0
1334 + mov.l @(preempt_count,r1), r0 ! Could replace this ...
1335 + add #-1, r0 ! ... and this w/mov #0?
1336 + mov.l r0, @(preempt_count,r1)
1337 + ! Maybe should bra ret_from_kirq, or loop over need_resched?
1338 + ! For now, fall through to ret_from_irq again...
1339 +#endif /* CONFIG_PREEMPT */
1343 + mov.l @(r0,r15), r0 ! get status register
1345 + shll r0 ! kernel space?
1346 +#ifndef CONFIG_PREEMPT
1347 + bt restore_all ! Yes, it's from kernel, go back soon
1348 +#else /* CONFIG_PREEMPT */
1349 + bt ret_from_kirq ! From kernel: maybe preempt_schedule
1350 +#endif /* CONFIG_PREEMPT */
1352 + bra ret_from_syscall
1357 mov.l @(r0,r15), r0 ! get status register
1358 @@ -564,6 +647,13 @@
1359 .long SYMBOL_NAME(do_signal)
1361 .long SYMBOL_NAME(irq_stat)
1362 +#ifdef CONFIG_PREEMPT
1363 +__preempt_schedule:
1364 + .long SYMBOL_NAME(preempt_schedule)
1365 +#endif /* CONFIG_PREEMPT */
1367 + .long 0xffffff0f ! ~(IMASK)
1374 .long SYMBOL_NAME(fpu_prepare_fd)
1376 - .long SYMBOL_NAME(init_task_union)+4
1377 + .long SYMBOL_NAME(init_task_union)+flags
1381 diff -urN linux-2.4.20/arch/sh/kernel/irq.c linux/arch/sh/kernel/irq.c
1382 --- linux-2.4.20/arch/sh/kernel/irq.c 2001-09-08 15:29:09.000000000 -0400
1383 +++ linux/arch/sh/kernel/irq.c 2003-04-11 17:02:55.711521384 -0400
1384 @@ -229,6 +229,14 @@
1385 struct irqaction * action;
1386 unsigned int status;
1389 + * At this point we're now about to actually call handlers,
1390 + * and interrupts might get reenabled during them... bump
1391 + * preempt_count to prevent any preemption while the handler
1392 + * called here is pending...
1394 + preempt_disable();
1396 /* Get IRQ number */
1397 asm volatile("stc r2_bank, %0\n\t"
1399 @@ -298,8 +306,17 @@
1400 desc->handler->end(irq);
1401 spin_unlock(&desc->lock);
1404 if (softirq_pending(cpu))
1408 + * We're done with the handlers, interrupts should be
1409 + * currently disabled; decrement preempt_count now so
1410 + * as we return preemption may be allowed...
1412 + preempt_enable_no_resched();
1417 diff -urN linux-2.4.20/CREDITS linux/CREDITS
1418 --- linux-2.4.20/CREDITS 2002-11-28 18:53:08.000000000 -0500
1419 +++ linux/CREDITS 2003-04-11 17:02:55.789509528 -0400
1420 @@ -1001,8 +1001,8 @@
1425 D: Interrupt-driven printer driver
1426 +D: Preemptible kernel
1428 S: Mountain View, California 94040
1430 diff -urN linux-2.4.20/Documentation/Configure.help linux/Documentation/Configure.help
1431 --- linux-2.4.20/Documentation/Configure.help 2002-11-28 18:53:08.000000000 -0500
1432 +++ linux/Documentation/Configure.help 2003-04-11 17:02:55.883495240 -0400
1433 @@ -279,6 +279,17 @@
1434 If you have a system with several CPUs, you do not need to say Y
1435 here: the local APIC will be used automatically.
1439 + This option reduces the latency of the kernel when reacting to
1440 + real-time or interactive events by allowing a low priority process to
1441 + be preempted even if it is in kernel mode executing a system call.
1442 + This allows applications to run more reliably even when the system is
1445 + Say Y here if you are building a kernel for a desktop, embedded or
1446 + real-time system. Say N if you are unsure.
1448 Kernel math emulation
1449 CONFIG_MATH_EMULATION
1450 Linux can emulate a math coprocessor (used for floating point
1451 diff -urN linux-2.4.20/Documentation/preempt-locking.txt linux/Documentation/preempt-locking.txt
1452 --- linux-2.4.20/Documentation/preempt-locking.txt 1969-12-31 19:00:00.000000000 -0500
1453 +++ linux/Documentation/preempt-locking.txt 2003-04-11 17:02:55.940486576 -0400
1455 + Proper Locking Under a Preemptible Kernel:
1456 + Keeping Kernel Code Preempt-Safe
1457 + Robert Love <rml@tech9.net>
1458 + Last Updated: 22 Jan 2002
1464 +A preemptible kernel creates new locking issues. The issues are the same as
1465 +those under SMP: concurrency and reentrancy. Thankfully, the Linux preemptible
1466 +kernel model leverages existing SMP locking mechanisms. Thus, the kernel
1467 +requires explicit additional locking for very few additional situations.
1469 +This document is for all kernel hackers. Developing code in the kernel
1470 +requires protecting these situations.
1473 +RULE #1: Per-CPU data structures need explicit protection
1476 +Two similar problems arise. An example code snippet:
1478 + struct this_needs_locking tux[NR_CPUS];
1479 + tux[smp_processor_id()] = some_value;
1480 + /* task is preempted here... */
1481 + something = tux[smp_processor_id()];
1483 +First, since the data is per-CPU, it may not have explicit SMP locking, but
1484 +require it otherwise. Second, when a preempted task is finally rescheduled,
1485 +the previous value of smp_processor_id may not equal the current. You must
1486 +protect these situations by disabling preemption around them.
1489 +RULE #2: CPU state must be protected.
1492 +Under preemption, the state of the CPU must be protected. This is arch-
1493 +dependent, but includes CPU structures and state not preserved over a context
1494 +switch. For example, on x86, entering and exiting FPU mode is now a critical
1495 +section that must occur while preemption is disabled. Think what would happen
1496 +if the kernel is executing a floating-point instruction and is then preempted.
1497 +Remember, the kernel does not save FPU state except for user tasks. Therefore,
1498 +upon preemption, the FPU registers will be sold to the lowest bidder. Thus,
1499 +preemption must be disabled around such regions.
1501 +Note, some FPU functions are already explicitly preempt safe. For example,
1502 +kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
1503 +However, math_state_restore must be called with preemption disabled.
1506 +RULE #3: Lock acquire and release must be performed by same task
1509 +A lock acquired in one task must be released by the same task. This
1510 +means you can't do oddball things like acquire a lock and go off to
1511 +play while another task releases it. If you want to do something
1512 +like this, acquire and release the task in the same code path and
1513 +have the caller wait on an event by the other task.
1519 +Data protection under preemption is achieved by disabling preemption for the
1520 +duration of the critical region.
1522 +preempt_enable() decrement the preempt counter
1523 +preempt_disable() increment the preempt counter
1524 +preempt_enable_no_resched() decrement, but do not immediately preempt
1525 +preempt_get_count() return the preempt counter
1527 +The functions are nestable. In other words, you can call preempt_disable
1528 +n-times in a code path, and preemption will not be reenabled until the n-th
1529 +call to preempt_enable. The preempt statements define to nothing if
1530 +preemption is not enabled.
1532 +Note that you do not need to explicitly prevent preemption if you are holding
1533 +any locks or interrupts are disabled, since preemption is implicitly disabled
1538 + cpucache_t *cc; /* this is per-CPU */
1539 + preempt_disable();
1540 + cc = cc_data(searchp);
1541 + if (cc && cc->avail) {
1542 + __free_block(searchp, cc_entry(cc), cc->avail);
1548 +Notice how the preemption statements must encompass every reference of the
1549 +critical variables. Another example:
1553 + if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n");
1554 + spin_lock(&buf_lock);
1557 +This code is not preempt-safe, but see how easily we can fix it by simply
1558 +moving the spin_lock up two lines.
1559 diff -urN linux-2.4.20/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c
1560 --- linux-2.4.20/drivers/ieee1394/csr.c 2002-11-28 18:53:13.000000000 -0500
1561 +++ linux/drivers/ieee1394/csr.c 2003-04-11 17:02:55.941486424 -0400
1565 #include <linux/string.h>
1566 +#include <linux/sched.h>
1568 #include "ieee1394_types.h"
1570 diff -urN linux-2.4.20/drivers/sound/sound_core.c linux/drivers/sound/sound_core.c
1571 --- linux-2.4.20/drivers/sound/sound_core.c 2001-09-30 15:26:08.000000000 -0400
1572 +++ linux/drivers/sound/sound_core.c 2003-04-11 17:02:55.977480952 -0400
1574 #include <linux/config.h>
1575 #include <linux/module.h>
1576 #include <linux/init.h>
1577 +#include <linux/sched.h>
1578 #include <linux/slab.h>
1579 #include <linux/types.h>
1580 #include <linux/kernel.h>
1581 diff -urN linux-2.4.20/fs/adfs/map.c linux/fs/adfs/map.c
1582 --- linux-2.4.20/fs/adfs/map.c 2001-10-25 16:53:53.000000000 -0400
1583 +++ linux/fs/adfs/map.c 2003-04-11 17:02:56.014475328 -0400
1585 #include <linux/fs.h>
1586 #include <linux/adfs_fs.h>
1587 #include <linux/spinlock.h>
1588 +#include <linux/sched.h>
1592 diff -urN linux-2.4.20/fs/exec.c linux/fs/exec.c
1593 --- linux-2.4.20/fs/exec.c 2002-11-28 18:53:15.000000000 -0500
1594 +++ linux/fs/exec.c 2003-04-11 17:02:56.050469856 -0400
1596 active_mm = current->active_mm;
1598 current->active_mm = mm;
1599 - task_unlock(current);
1600 activate_mm(active_mm, mm);
1601 + task_unlock(current);
1604 if (active_mm != old_mm) BUG();
1605 diff -urN linux-2.4.20/fs/fat/cache.c linux/fs/fat/cache.c
1606 --- linux-2.4.20/fs/fat/cache.c 2001-10-12 16:48:42.000000000 -0400
1607 +++ linux/fs/fat/cache.c 2003-04-11 17:02:56.085464536 -0400
1609 #include <linux/string.h>
1610 #include <linux/stat.h>
1611 #include <linux/fat_cvf.h>
1612 +#include <linux/sched.h>
1615 # define PRINTK(x) printk x
1616 diff -urN linux-2.4.20/fs/nls/nls_base.c linux/fs/nls/nls_base.c
1617 --- linux-2.4.20/fs/nls/nls_base.c 2002-08-02 20:39:45.000000000 -0400
1618 +++ linux/fs/nls/nls_base.c 2003-04-11 17:02:56.121459064 -0400
1621 #include <linux/kmod.h>
1623 +#include <linux/sched.h>
1624 #include <linux/spinlock.h>
1626 static struct nls_table *tables;
1627 diff -urN linux-2.4.20/include/asm-arm/dma.h linux/include/asm-arm/dma.h
1628 --- linux-2.4.20/include/asm-arm/dma.h 2001-08-12 14:14:00.000000000 -0400
1629 +++ linux/include/asm-arm/dma.h 2003-04-11 17:02:56.155453896 -0400
1632 #include <linux/config.h>
1633 #include <linux/spinlock.h>
1634 +#include <linux/sched.h>
1635 #include <asm/system.h>
1636 #include <asm/memory.h>
1637 #include <asm/scatterlist.h>
1638 diff -urN linux-2.4.20/include/asm-arm/hardirq.h linux/include/asm-arm/hardirq.h
1639 --- linux-2.4.20/include/asm-arm/hardirq.h 2001-10-11 12:04:57.000000000 -0400
1640 +++ linux/include/asm-arm/hardirq.h 2003-04-11 17:02:56.156453744 -0400
1642 #define irq_exit(cpu,irq) (local_irq_count(cpu)--)
1644 #define synchronize_irq() do { } while (0)
1645 +#define release_irqlock(cpu) do { } while (0)
1648 #error SMP not supported
1649 diff -urN linux-2.4.20/include/asm-arm/pgalloc.h linux/include/asm-arm/pgalloc.h
1650 --- linux-2.4.20/include/asm-arm/pgalloc.h 2001-08-12 14:14:00.000000000 -0400
1651 +++ linux/include/asm-arm/pgalloc.h 2003-04-11 17:02:56.191448424 -0400
1656 + preempt_disable();
1657 if ((ret = pgd_quicklist) != NULL) {
1658 pgd_quicklist = (unsigned long *)__pgd_next(ret);
1660 clean_dcache_entry(ret + 1);
1661 pgtable_cache_size--;
1664 return (pgd_t *)ret;
1667 static inline void free_pgd_fast(pgd_t *pgd)
1669 + preempt_disable();
1670 __pgd_next(pgd) = (unsigned long) pgd_quicklist;
1671 pgd_quicklist = (unsigned long *) pgd;
1672 pgtable_cache_size++;
1676 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
1680 + preempt_disable();
1681 if((ret = pte_quicklist) != NULL) {
1682 pte_quicklist = (unsigned long *)__pte_next(ret);
1684 clean_dcache_entry(ret);
1685 pgtable_cache_size--;
1688 return (pte_t *)ret;
1691 static inline void free_pte_fast(pte_t *pte)
1693 + preempt_disable();
1694 __pte_next(pte) = (unsigned long) pte_quicklist;
1695 pte_quicklist = (unsigned long *) pte;
1696 pgtable_cache_size++;
1700 #else /* CONFIG_NO_PGT_CACHE */
1701 diff -urN linux-2.4.20/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h
1702 --- linux-2.4.20/include/asm-arm/smplock.h 2001-08-12 14:14:00.000000000 -0400
1703 +++ linux/include/asm-arm/smplock.h 2003-04-11 17:02:56.227442952 -0400
1706 * Default SMP lock implementation
1708 +#include <linux/config.h>
1709 #include <linux/interrupt.h>
1710 #include <linux/spinlock.h>
1712 extern spinlock_t kernel_flag;
1714 +#ifdef CONFIG_PREEMPT
1715 +#define kernel_locked() preempt_get_count()
1717 #define kernel_locked() spin_is_locked(&kernel_flag)
1721 * Release global kernel lock and global interrupt lock
1724 static inline void lock_kernel(void)
1726 +#ifdef CONFIG_PREEMPT
1727 + if (current->lock_depth == -1)
1728 + spin_lock(&kernel_flag);
1729 + ++current->lock_depth;
1731 if (!++current->lock_depth)
1732 spin_lock(&kernel_flag);
1736 static inline void unlock_kernel(void)
1737 diff -urN linux-2.4.20/include/asm-arm/softirq.h linux/include/asm-arm/softirq.h
1738 --- linux-2.4.20/include/asm-arm/softirq.h 2001-09-08 15:02:31.000000000 -0400
1739 +++ linux/include/asm-arm/softirq.h 2003-04-11 17:02:56.228442800 -0400
1741 #include <asm/hardirq.h>
1743 #define __cpu_bh_enable(cpu) \
1744 - do { barrier(); local_bh_count(cpu)--; } while (0)
1745 + do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
1746 #define cpu_bh_disable(cpu) \
1747 - do { local_bh_count(cpu)++; barrier(); } while (0)
1748 + do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
1750 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
1751 #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
1753 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1755 -#define local_bh_enable() \
1756 +#define _local_bh_enable() \
1758 unsigned int *ptr = &local_bh_count(smp_processor_id()); \
1759 if (!--*ptr && ptr[-2]) \
1760 __asm__("bl%? __do_softirq": : : "lr");/* out of line */\
1763 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
1765 #endif /* __ASM_SOFTIRQ_H */
1766 diff -urN linux-2.4.20/include/asm-arm/system.h linux/include/asm-arm/system.h
1767 --- linux-2.4.20/include/asm-arm/system.h 2000-11-27 20:07:59.000000000 -0500
1768 +++ linux/include/asm-arm/system.h 2003-04-11 17:02:56.228442800 -0400
1770 #define local_irq_disable() __cli()
1771 #define local_irq_enable() __sti()
1773 +#define irqs_disabled() \
1775 + unsigned long cpsr_val; \
1776 + asm ("mrs %0, cpsr" : "=r" (cpsr_val)); \
1781 #error SMP not supported
1783 diff -urN linux-2.4.20/include/asm-i386/desc.h linux/include/asm-i386/desc.h
1784 --- linux-2.4.20/include/asm-i386/desc.h 2001-07-26 16:40:32.000000000 -0400
1785 +++ linux/include/asm-i386/desc.h 2003-04-11 17:03:05.667007920 -0400
1788 static inline void clear_LDT(void)
1790 - int cpu = smp_processor_id();
1792 + preempt_disable();
1793 + cpu = smp_processor_id();
1794 set_ldt_desc(cpu, &default_ldt[0], 5);
1800 diff -urN linux-2.4.20/include/asm-i386/hardirq.h linux/include/asm-i386/hardirq.h
1801 --- linux-2.4.20/include/asm-i386/hardirq.h 2001-11-22 14:46:19.000000000 -0500
1802 +++ linux/include/asm-i386/hardirq.h 2003-04-11 17:02:56.263437480 -0400
1806 * Are we in an interrupt context? Either doing bottom half
1807 - * or hardware interrupt processing?
1808 + * or hardware interrupt processing? Note the preempt check,
1809 + * this is both a bugfix and an optimization. If we are
1810 + * preemptible, we cannot be in an interrupt.
1812 -#define in_interrupt() ({ int __cpu = smp_processor_id(); \
1813 - (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
1814 +#define in_interrupt() (preempt_is_disabled() && \
1815 + ({unsigned long __cpu = smp_processor_id(); \
1816 + (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }))
1818 -#define in_irq() (local_irq_count(smp_processor_id()) != 0)
1819 +#define in_irq() (preempt_is_disabled() && \
1820 + (local_irq_count(smp_processor_id()) != 0))
1826 #define synchronize_irq() barrier()
1828 +#define release_irqlock(cpu) do { } while (0)
1832 #include <asm/atomic.h>
1833 diff -urN linux-2.4.20/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h
1834 --- linux-2.4.20/include/asm-i386/highmem.h 2002-08-02 20:39:45.000000000 -0400
1835 +++ linux/include/asm-i386/highmem.h 2003-04-11 17:02:56.297432312 -0400
1837 enum fixed_addresses idx;
1838 unsigned long vaddr;
1840 + preempt_disable();
1841 if (page < highmem_start_page)
1842 return page_address(page);
1844 @@ -109,8 +110,10 @@
1845 unsigned long vaddr = (unsigned long) kvaddr;
1846 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
1848 - if (vaddr < FIXADDR_START) // FIXME
1849 + if (vaddr < FIXADDR_START) { // FIXME
1854 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
1857 pte_clear(kmap_pte-idx);
1858 __flush_tlb_one(vaddr);
1864 #endif /* __KERNEL__ */
1865 diff -urN linux-2.4.20/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h
1866 --- linux-2.4.20/include/asm-i386/hw_irq.h 2001-11-22 14:46:18.000000000 -0500
1867 +++ linux/include/asm-i386/hw_irq.h 2003-04-11 17:02:56.333426840 -0400
1870 #define STR(x) __STR(x)
1872 +#define GET_CURRENT \
1873 + "movl %esp, %ebx\n\t" \
1874 + "andl $-8192, %ebx\n\t"
1876 +#ifdef CONFIG_PREEMPT
1877 +#define BUMP_LOCK_COUNT \
1879 + "incl 4(%ebx)\n\t"
1881 +#define BUMP_LOCK_COUNT
1887 @@ -108,15 +120,12 @@
1889 "movl $" STR(__KERNEL_DS) ",%edx\n\t" \
1890 "movl %edx,%ds\n\t" \
1891 - "movl %edx,%es\n\t"
1892 + "movl %edx,%es\n\t" \
1895 #define IRQ_NAME2(nr) nr##_interrupt(void)
1896 #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
1898 -#define GET_CURRENT \
1899 - "movl %esp, %ebx\n\t" \
1900 - "andl $-8192, %ebx\n\t"
1903 * SMP has a few special interrupts for IPI messages
1905 diff -urN linux-2.4.20/include/asm-i386/i387.h linux/include/asm-i386/i387.h
1906 --- linux-2.4.20/include/asm-i386/i387.h 2002-08-02 20:39:45.000000000 -0400
1907 +++ linux/include/asm-i386/i387.h 2003-04-11 17:02:56.333426840 -0400
1909 #define __ASM_I386_I387_H
1911 #include <linux/sched.h>
1912 +#include <linux/spinlock.h>
1913 #include <asm/processor.h>
1914 #include <asm/sigcontext.h>
1915 #include <asm/user.h>
1917 extern void restore_fpu( struct task_struct *tsk );
1919 extern void kernel_fpu_begin(void);
1920 -#define kernel_fpu_end() stts()
1921 +#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
1924 #define unlazy_fpu( tsk ) do { \
1925 diff -urN linux-2.4.20/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h
1926 --- linux-2.4.20/include/asm-i386/pgalloc.h 2002-08-02 20:39:45.000000000 -0400
1927 +++ linux/include/asm-i386/pgalloc.h 2003-04-11 17:02:56.334426688 -0400
1932 + preempt_disable();
1933 if ((ret = pgd_quicklist) != NULL) {
1934 pgd_quicklist = (unsigned long *)(*ret);
1936 pgtable_cache_size--;
1941 ret = (unsigned long *)get_pgd_slow();
1943 return (pgd_t *)ret;
1946 static inline void free_pgd_fast(pgd_t *pgd)
1948 + preempt_disable();
1949 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
1950 pgd_quicklist = (unsigned long *) pgd;
1951 pgtable_cache_size++;
1955 static inline void free_pgd_slow(pgd_t *pgd)
1956 @@ -119,19 +125,23 @@
1960 + preempt_disable();
1961 if ((ret = (unsigned long *)pte_quicklist) != NULL) {
1962 pte_quicklist = (unsigned long *)(*ret);
1964 pgtable_cache_size--;
1967 return (pte_t *)ret;
1970 static inline void pte_free_fast(pte_t *pte)
1972 + preempt_disable();
1973 *(unsigned long *)pte = (unsigned long) pte_quicklist;
1974 pte_quicklist = (unsigned long *) pte;
1975 pgtable_cache_size++;
1979 static __inline__ void pte_free_slow(pte_t *pte)
1980 diff -urN linux-2.4.20/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h
1981 --- linux-2.4.20/include/asm-i386/smplock.h 2002-08-02 20:39:45.000000000 -0400
1982 +++ linux/include/asm-i386/smplock.h 2003-04-11 17:02:56.369421368 -0400
1984 extern spinlock_cacheline_t kernel_flag_cacheline;
1985 #define kernel_flag kernel_flag_cacheline.lock
1988 #define kernel_locked() spin_is_locked(&kernel_flag)
1990 +#ifdef CONFIG_PREEMPT
1991 +#define kernel_locked() preempt_get_count()
1993 +#define kernel_locked() 1
1998 * Release global kernel lock and global interrupt lock
2001 static __inline__ void lock_kernel(void)
2003 +#ifdef CONFIG_PREEMPT
2004 + if (current->lock_depth == -1)
2005 + spin_lock(&kernel_flag);
2006 + ++current->lock_depth;
2009 if (!++current->lock_depth)
2010 spin_lock(&kernel_flag);
2012 :"=m" (__dummy_lock(&kernel_flag)),
2013 "=m" (current->lock_depth));
2018 static __inline__ void unlock_kernel(void)
2019 diff -urN linux-2.4.20/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h
2020 --- linux-2.4.20/include/asm-i386/softirq.h 2002-08-02 20:39:45.000000000 -0400
2021 +++ linux/include/asm-i386/softirq.h 2003-04-11 17:03:05.668007768 -0400
2023 #include <asm/hardirq.h>
2025 #define __cpu_bh_enable(cpu) \
2026 - do { barrier(); local_bh_count(cpu)--; } while (0)
2027 + do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
2028 #define cpu_bh_disable(cpu) \
2029 - do { local_bh_count(cpu)++; barrier(); } while (0)
2030 + do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
2032 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
2033 #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
2035 -#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
2036 +#define in_softirq() ( preempt_is_disabled() & \
2037 + (local_bh_count(smp_processor_id()) != 0))
2040 * NOTE: this assembly code assumes:
2042 * If you change the offsets in irq_stat then you have to
2043 * update this code as well.
2045 -#define local_bh_enable() \
2046 +#define _local_bh_enable() \
2048 unsigned int *ptr = &local_bh_count(smp_processor_id()); \
2051 /* no registers clobbered */ ); \
2054 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
2056 #endif /* __ASM_SOFTIRQ_H */
2057 diff -urN linux-2.4.20/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
2058 --- linux-2.4.20/include/asm-i386/spinlock.h 2002-11-28 18:53:15.000000000 -0500
2059 +++ linux/include/asm-i386/spinlock.h 2003-04-11 17:02:56.406415744 -0400
2061 :"=m" (lock->lock) : : "memory"
2064 -static inline void spin_unlock(spinlock_t *lock)
2065 +static inline void _raw_spin_unlock(spinlock_t *lock)
2068 if (lock->magic != SPINLOCK_MAGIC)
2070 :"=q" (oldval), "=m" (lock->lock) \
2071 :"0" (oldval) : "memory"
2073 -static inline void spin_unlock(spinlock_t *lock)
2074 +static inline void _raw_spin_unlock(spinlock_t *lock)
2082 -static inline int spin_trylock(spinlock_t *lock)
2083 +static inline int _raw_spin_trylock(spinlock_t *lock)
2086 __asm__ __volatile__(
2091 -static inline void spin_lock(spinlock_t *lock)
2092 +static inline void _raw_spin_lock(spinlock_t *lock)
2098 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
2100 -static inline void read_lock(rwlock_t *rw)
2101 +static inline void _raw_read_lock(rwlock_t *rw)
2104 if (rw->magic != RWLOCK_MAGIC)
2106 __build_read_lock(rw, "__read_lock_failed");
2109 -static inline void write_lock(rwlock_t *rw)
2110 +static inline void _raw_write_lock(rwlock_t *rw)
2113 if (rw->magic != RWLOCK_MAGIC)
2114 @@ -197,10 +197,10 @@
2115 __build_write_lock(rw, "__write_lock_failed");
2118 -#define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
2119 -#define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
2120 +#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
2121 +#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
2123 -static inline int write_trylock(rwlock_t *lock)
2124 +static inline int _raw_write_trylock(rwlock_t *lock)
2126 atomic_t *count = (atomic_t *)lock;
2127 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
2128 diff -urN linux-2.4.20/include/asm-i386/system.h linux/include/asm-i386/system.h
2129 --- linux-2.4.20/include/asm-i386/system.h 2002-11-28 18:53:15.000000000 -0500
2130 +++ linux/include/asm-i386/system.h 2003-04-11 17:02:56.441410424 -0400
2131 @@ -322,6 +322,13 @@
2132 /* used in the idle loop; sti takes one instruction cycle to complete */
2133 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
2135 +#define irqs_disabled() \
2137 + unsigned long flags; \
2138 + __save_flags(flags); \
2139 + !(flags & (1<<9)); \
2142 /* For spinlocks etc */
2143 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
2144 #define local_irq_restore(x) __restore_flags(x)
2145 diff -urN linux-2.4.20/include/asm-mips/smplock.h linux/include/asm-mips/smplock.h
2146 --- linux-2.4.20/include/asm-mips/smplock.h 2002-08-02 20:39:45.000000000 -0400
2147 +++ linux/include/asm-mips/smplock.h 2003-04-11 17:02:56.476405104 -0400
2150 * Default SMP lock implementation
2152 +#include <linux/config.h>
2153 #include <linux/interrupt.h>
2154 #include <linux/spinlock.h>
2156 extern spinlock_t kernel_flag;
2159 #define kernel_locked() spin_is_locked(&kernel_flag)
2161 +#ifdef CONFIG_PREEMPT
2162 +#define kernel_locked() preempt_get_count()
2164 +#define kernel_locked() 1
2169 * Release global kernel lock and global interrupt lock
2172 extern __inline__ void lock_kernel(void)
2174 +#ifdef CONFIG_PREEMPT
2175 + if (current->lock_depth == -1)
2176 + spin_lock(&kernel_flag);
2177 + ++current->lock_depth;
2179 if (!++current->lock_depth)
2180 spin_lock(&kernel_flag);
2184 extern __inline__ void unlock_kernel(void)
2185 diff -urN linux-2.4.20/include/asm-mips/softirq.h linux/include/asm-mips/softirq.h
2186 --- linux-2.4.20/include/asm-mips/softirq.h 2002-11-28 18:53:15.000000000 -0500
2187 +++ linux/include/asm-mips/softirq.h 2003-04-11 17:02:56.512399632 -0400
2190 static inline void cpu_bh_disable(int cpu)
2192 + preempt_disable();
2193 local_bh_count(cpu)++;
2199 local_bh_count(cpu)--;
2205 cpu = smp_processor_id(); \
2206 if (!--local_bh_count(cpu) && softirq_pending(cpu)) \
2208 + preempt_enable(); \
2211 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
2212 diff -urN linux-2.4.20/include/asm-mips/system.h linux/include/asm-mips/system.h
2213 --- linux-2.4.20/include/asm-mips/system.h 2002-11-28 18:53:15.000000000 -0500
2214 +++ linux/include/asm-mips/system.h 2003-04-11 17:02:56.513399480 -0400
2215 @@ -322,4 +322,18 @@
2216 #define die_if_kernel(msg, regs) \
2217 __die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
2219 +extern __inline__ int intr_on(void)
2221 + unsigned long flags;
2222 + save_flags(flags);
2226 +extern __inline__ int intr_off(void)
2228 + return ! intr_on();
2231 +#define irqs_disabled() intr_off()
2233 #endif /* _ASM_SYSTEM_H */
2234 diff -urN linux-2.4.20/include/asm-ppc/dma.h linux/include/asm-ppc/dma.h
2235 --- linux-2.4.20/include/asm-ppc/dma.h 2001-05-21 18:02:06.000000000 -0400
2236 +++ linux/include/asm-ppc/dma.h 2003-04-11 17:02:56.550393856 -0400
2238 #include <linux/config.h>
2240 #include <linux/spinlock.h>
2241 +#include <linux/sched.h>
2242 #include <asm/system.h>
2245 diff -urN linux-2.4.20/include/asm-ppc/hardirq.h linux/include/asm-ppc/hardirq.h
2246 --- linux-2.4.20/include/asm-ppc/hardirq.h 2002-11-28 18:53:15.000000000 -0500
2247 +++ linux/include/asm-ppc/hardirq.h 2003-04-11 17:03:05.705002144 -0400
2249 * Are we in an interrupt context? Either doing bottom half
2250 * or hardware interrupt processing?
2252 -#define in_interrupt() ({ int __cpu = smp_processor_id(); \
2253 - (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
2254 +#define in_interrupt() (preempt_is_disabled() && \
2255 + ({ unsigned long __cpu = smp_processor_id(); \
2256 + (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }))
2258 -#define in_irq() (local_irq_count(smp_processor_id()) != 0)
2259 +#define in_irq() (preempt_is_disabled() && \
2260 + (local_irq_count(smp_processor_id()) != 0))
2265 #define hardirq_exit(cpu) (local_irq_count(cpu)--)
2267 #define synchronize_irq() do { } while (0)
2268 +#define release_irqlock(cpu) do { } while (0)
2270 #else /* CONFIG_SMP */
2272 diff -urN linux-2.4.20/include/asm-ppc/highmem.h linux/include/asm-ppc/highmem.h
2273 --- linux-2.4.20/include/asm-ppc/highmem.h 2001-07-02 17:34:57.000000000 -0400
2274 +++ linux/include/asm-ppc/highmem.h 2003-04-11 17:02:56.587388232 -0400
2277 unsigned long vaddr;
2279 + preempt_disable();
2280 if (page < highmem_start_page)
2281 return page_address(page);
2283 @@ -105,8 +106,10 @@
2284 unsigned long vaddr = (unsigned long) kvaddr;
2285 unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
2287 - if (vaddr < KMAP_FIX_BEGIN) // FIXME
2288 + if (vaddr < KMAP_FIX_BEGIN) { // FIXME
2293 if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
2296 pte_clear(kmap_pte+idx);
2297 flush_tlb_page(0, vaddr);
2302 #endif /* __KERNEL__ */
2303 diff -urN linux-2.4.20/include/asm-ppc/hw_irq.h linux/include/asm-ppc/hw_irq.h
2304 --- linux-2.4.20/include/asm-ppc/hw_irq.h 2002-11-28 18:53:15.000000000 -0500
2305 +++ linux/include/asm-ppc/hw_irq.h 2003-04-11 17:02:56.623382760 -0400
2307 #define __save_flags(flags) __save_flags_ptr((unsigned long *)&flags)
2308 #define __save_and_cli(flags) ({__save_flags(flags);__cli();})
2310 +#define mfmsr() ({unsigned int rval; \
2311 + asm volatile("mfmsr %0" : "=r" (rval)); rval;})
2312 +#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
2314 +#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
2316 extern void do_lost_interrupts(unsigned long);
2318 #define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
2319 diff -urN linux-2.4.20/include/asm-ppc/mmu_context.h linux/include/asm-ppc/mmu_context.h
2320 --- linux-2.4.20/include/asm-ppc/mmu_context.h 2001-10-02 12:12:44.000000000 -0400
2321 +++ linux/include/asm-ppc/mmu_context.h 2003-04-11 17:02:56.624382608 -0400
2322 @@ -158,6 +158,10 @@
2323 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
2324 struct task_struct *tsk, int cpu)
2326 +#ifdef CONFIG_PREEMPT
2327 + if (preempt_get_count() == 0)
2330 tsk->thread.pgdir = next->pgd;
2331 get_mmu_context(next);
2332 set_context(next->context, next->pgd);
2333 diff -urN linux-2.4.20/include/asm-ppc/pgalloc.h linux/include/asm-ppc/pgalloc.h
2334 --- linux-2.4.20/include/asm-ppc/pgalloc.h 2001-05-21 18:02:06.000000000 -0400
2335 +++ linux/include/asm-ppc/pgalloc.h 2003-04-11 17:02:56.662376832 -0400
2340 + preempt_disable();
2341 if ((ret = pgd_quicklist) != NULL) {
2342 pgd_quicklist = (unsigned long *)(*ret);
2344 pgtable_cache_size--;
2348 ret = (unsigned long *)get_pgd_slow();
2349 return (pgd_t *)ret;
2352 extern __inline__ void free_pgd_fast(pgd_t *pgd)
2354 + preempt_disable();
2355 *(unsigned long **)pgd = pgd_quicklist;
2356 pgd_quicklist = (unsigned long *) pgd;
2357 pgtable_cache_size++;
2361 extern __inline__ void free_pgd_slow(pgd_t *pgd)
2362 @@ -120,19 +125,23 @@
2366 + preempt_disable();
2367 if ((ret = pte_quicklist) != NULL) {
2368 pte_quicklist = (unsigned long *)(*ret);
2370 pgtable_cache_size--;
2373 return (pte_t *)ret;
2376 extern __inline__ void pte_free_fast(pte_t *pte)
2378 + preempt_disable();
2379 *(unsigned long **)pte = pte_quicklist;
2380 pte_quicklist = (unsigned long *) pte;
2381 pgtable_cache_size++;
2385 extern __inline__ void pte_free_slow(pte_t *pte)
2386 diff -urN linux-2.4.20/include/asm-ppc/smplock.h linux/include/asm-ppc/smplock.h
2387 --- linux-2.4.20/include/asm-ppc/smplock.h 2001-11-02 20:43:54.000000000 -0500
2388 +++ linux/include/asm-ppc/smplock.h 2003-04-11 17:02:56.698371360 -0400
2391 extern spinlock_t kernel_flag;
2394 #define kernel_locked() spin_is_locked(&kernel_flag)
2396 +#ifdef CONFIG_PREEMPT
2397 +#define kernel_locked() preempt_get_count()
2399 +#define kernel_locked() 1
2404 * Release global kernel lock and global interrupt lock
2407 static __inline__ void lock_kernel(void)
2409 +#ifdef CONFIG_PREEMPT
2410 + if (current->lock_depth == -1)
2411 + spin_lock(&kernel_flag);
2412 + ++current->lock_depth;
2414 if (!++current->lock_depth)
2415 spin_lock(&kernel_flag);
2419 static __inline__ void unlock_kernel(void)
2420 diff -urN linux-2.4.20/include/asm-ppc/softirq.h linux/include/asm-ppc/softirq.h
2421 --- linux-2.4.20/include/asm-ppc/softirq.h 2001-09-08 15:02:31.000000000 -0400
2422 +++ linux/include/asm-ppc/softirq.h 2003-04-11 17:03:05.741996520 -0400
2425 #define local_bh_disable() \
2427 + preempt_disable(); \
2428 local_bh_count(smp_processor_id())++; \
2434 local_bh_count(smp_processor_id())--; \
2435 + preempt_enable(); \
2438 -#define local_bh_enable() \
2439 +#define _local_bh_enable() \
2441 if (!--local_bh_count(smp_processor_id()) \
2442 && softirq_pending(smp_processor_id())) { \
2447 -#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
2448 +#define local_bh_enable() \
2450 + _local_bh_enable(); \
2451 + preempt_enable(); \
2454 +#define in_softirq() (preempt_is_disabled() && \
2455 + (local_bh_count(smp_processor_id()) != 0))
2457 #endif /* __ASM_SOFTIRQ_H */
2458 #endif /* __KERNEL__ */
2459 diff -urN linux-2.4.20/include/asm-sh/hardirq.h linux/include/asm-sh/hardirq.h
2460 --- linux-2.4.20/include/asm-sh/hardirq.h 2001-09-08 15:29:09.000000000 -0400
2461 +++ linux/include/asm-sh/hardirq.h 2003-04-11 17:02:56.737365432 -0400
2464 #define synchronize_irq() barrier()
2466 +#define release_irqlock(cpu) do { } while (0)
2470 #error Super-H SMP is not available
2471 diff -urN linux-2.4.20/include/asm-sh/smplock.h linux/include/asm-sh/smplock.h
2472 --- linux-2.4.20/include/asm-sh/smplock.h 2001-09-08 15:29:09.000000000 -0400
2473 +++ linux/include/asm-sh/smplock.h 2003-04-11 17:02:56.738365280 -0400
2476 #include <linux/config.h>
2480 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
2482 + * Should never happen, since linux/smp_lock.h catches this case;
2483 + * but in case this file is included directly with neither SMP nor
2484 + * PREEMPT configuration, provide same dummys as linux/smp_lock.h
2486 #define lock_kernel() do { } while(0)
2487 #define unlock_kernel() do { } while(0)
2488 -#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
2489 -#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
2490 +#define release_kernel_lock(task, cpu) do { } while(0)
2491 +#define reacquire_kernel_lock(task) do { } while(0)
2492 +#define kernel_locked() 1
2494 +#else /* CONFIG_SMP || CONFIG_PREEMPT */
2497 +#error "We do not support SMP on SH yet"
2500 + * Default SMP lock implementation (i.e. the i386 version)
2503 +#include <linux/interrupt.h>
2504 +#include <linux/spinlock.h>
2506 +extern spinlock_t kernel_flag;
2507 +#define lock_bkl() spin_lock(&kernel_flag)
2508 +#define unlock_bkl() spin_unlock(&kernel_flag)
2511 +#define kernel_locked() spin_is_locked(&kernel_flag)
2512 +#elif CONFIG_PREEMPT
2513 +#define kernel_locked() preempt_get_count()
2514 +#else /* neither */
2515 +#define kernel_locked() 1
2519 + * Release global kernel lock and global interrupt lock
2521 +#define release_kernel_lock(task, cpu) \
2523 + if (task->lock_depth >= 0) \
2524 + spin_unlock(&kernel_flag); \
2525 + release_irqlock(cpu); \
2530 + * Re-acquire the kernel lock
2532 +#define reacquire_kernel_lock(task) \
2534 + if (task->lock_depth >= 0) \
2535 + spin_lock(&kernel_flag); \
2539 + * Getting the big kernel lock.
2541 + * This cannot happen asynchronously,
2542 + * so we only need to worry about other
2545 +static __inline__ void lock_kernel(void)
2547 +#ifdef CONFIG_PREEMPT
2548 + if (current->lock_depth == -1)
2549 + spin_lock(&kernel_flag);
2550 + ++current->lock_depth;
2552 -#error "We do not support SMP on SH"
2553 -#endif /* CONFIG_SMP */
2554 + if (!++current->lock_depth)
2555 + spin_lock(&kernel_flag);
2559 +static __inline__ void unlock_kernel(void)
2561 + if (current->lock_depth < 0)
2563 + if (--current->lock_depth < 0)
2564 + spin_unlock(&kernel_flag);
2566 +#endif /* CONFIG_SMP || CONFIG_PREEMPT */
2568 #endif /* __ASM_SH_SMPLOCK_H */
2569 diff -urN linux-2.4.20/include/asm-sh/softirq.h linux/include/asm-sh/softirq.h
2570 --- linux-2.4.20/include/asm-sh/softirq.h 2001-09-08 15:29:09.000000000 -0400
2571 +++ linux/include/asm-sh/softirq.h 2003-04-11 17:02:56.775359656 -0400
2574 #define local_bh_disable() \
2576 + preempt_disable(); \
2577 local_bh_count(smp_processor_id())++; \
2583 local_bh_count(smp_processor_id())--; \
2584 + preempt_enable(); \
2587 #define local_bh_enable() \
2589 && softirq_pending(smp_processor_id())) { \
2592 + preempt_enable(); \
2595 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
2596 diff -urN linux-2.4.20/include/asm-sh/system.h linux/include/asm-sh/system.h
2597 --- linux-2.4.20/include/asm-sh/system.h 2001-09-08 15:29:09.000000000 -0400
2598 +++ linux/include/asm-sh/system.h 2003-04-11 17:02:56.776359504 -0400
2599 @@ -285,4 +285,17 @@
2600 void disable_hlt(void);
2601 void enable_hlt(void);
2604 + * irqs_disabled - are interrupts disabled?
2606 +static inline int irqs_disabled(void)
2608 + unsigned long flags;
2610 + __save_flags(flags);
2611 + if (flags & 0x000000f0)
2617 diff -urN linux-2.4.20/include/linux/brlock.h linux/include/linux/brlock.h
2618 --- linux-2.4.20/include/linux/brlock.h 2002-11-28 18:53:15.000000000 -0500
2619 +++ linux/include/linux/brlock.h 2003-04-11 17:02:56.812354032 -0400
2620 @@ -171,11 +171,11 @@
2624 -# define br_read_lock(idx) ((void)(idx))
2625 -# define br_read_unlock(idx) ((void)(idx))
2626 -# define br_write_lock(idx) ((void)(idx))
2627 -# define br_write_unlock(idx) ((void)(idx))
2629 +# define br_read_lock(idx) ({ (void)(idx); preempt_disable(); })
2630 +# define br_read_unlock(idx) ({ (void)(idx); preempt_enable(); })
2631 +# define br_write_lock(idx) ({ (void)(idx); preempt_disable(); })
2632 +# define br_write_unlock(idx) ({ (void)(idx); preempt_enable(); })
2633 +#endif /* CONFIG_SMP */
2636 * Now enumerate all of the possible sw/hw IRQ protected
2637 diff -urN linux-2.4.20/include/linux/dcache.h linux/include/linux/dcache.h
2638 --- linux-2.4.20/include/linux/dcache.h 2002-11-28 18:53:15.000000000 -0500
2639 +++ linux/include/linux/dcache.h 2003-04-11 17:02:56.849348408 -0400
2640 @@ -127,31 +127,6 @@
2642 extern spinlock_t dcache_lock;
2645 - * d_drop - drop a dentry
2646 - * @dentry: dentry to drop
2648 - * d_drop() unhashes the entry from the parent
2649 - * dentry hashes, so that it won't be found through
2650 - * a VFS lookup any more. Note that this is different
2651 - * from deleting the dentry - d_delete will try to
2652 - * mark the dentry negative if possible, giving a
2653 - * successful _negative_ lookup, while d_drop will
2654 - * just make the cache lookup fail.
2656 - * d_drop() is used mainly for stuff that wants
2657 - * to invalidate a dentry for some reason (NFS
2658 - * timeouts or autofs deletes).
2661 -static __inline__ void d_drop(struct dentry * dentry)
2663 - spin_lock(&dcache_lock);
2664 - list_del(&dentry->d_hash);
2665 - INIT_LIST_HEAD(&dentry->d_hash);
2666 - spin_unlock(&dcache_lock);
2669 static __inline__ int dname_external(struct dentry *d)
2671 return d->d_name.name != d->d_iname;
2672 @@ -276,3 +251,34 @@
2673 #endif /* __KERNEL__ */
2675 #endif /* __LINUX_DCACHE_H */
2677 +#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2678 +#define __LINUX_DCACHE_H_INLINES
2682 + * d_drop - drop a dentry
2683 + * @dentry: dentry to drop
2685 + * d_drop() unhashes the entry from the parent
2686 + * dentry hashes, so that it won't be found through
2687 + * a VFS lookup any more. Note that this is different
2688 + * from deleting the dentry - d_delete will try to
2689 + * mark the dentry negative if possible, giving a
2690 + * successful _negative_ lookup, while d_drop will
2691 + * just make the cache lookup fail.
2693 + * d_drop() is used mainly for stuff that wants
2694 + * to invalidate a dentry for some reason (NFS
2695 + * timeouts or autofs deletes).
2698 +static __inline__ void d_drop(struct dentry * dentry)
2700 + spin_lock(&dcache_lock);
2701 + list_del(&dentry->d_hash);
2702 + INIT_LIST_HEAD(&dentry->d_hash);
2703 + spin_unlock(&dcache_lock);
2707 diff -urN linux-2.4.20/include/linux/fs_struct.h linux/include/linux/fs_struct.h
2708 --- linux-2.4.20/include/linux/fs_struct.h 2001-07-13 18:10:44.000000000 -0400
2709 +++ linux/include/linux/fs_struct.h 2003-04-11 17:02:56.852347952 -0400
2711 extern void exit_fs(struct task_struct *);
2712 extern void set_fs_altroot(void);
2714 +struct fs_struct *copy_fs_struct(struct fs_struct *old);
2715 +void put_fs_struct(struct fs_struct *fs);
2720 +#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2721 +#define _LINUX_FS_STRUCT_H_INLINES
2724 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
2725 * It can block. Requires the big lock held.
2731 -struct fs_struct *copy_fs_struct(struct fs_struct *old);
2732 -void put_fs_struct(struct fs_struct *fs);
2736 diff -urN linux-2.4.20/include/linux/sched.h linux/include/linux/sched.h
2737 --- linux-2.4.20/include/linux/sched.h 2002-11-28 18:53:15.000000000 -0500
2738 +++ linux/include/linux/sched.h 2003-04-11 17:02:56.908339440 -0400
2740 #define TASK_UNINTERRUPTIBLE 2
2741 #define TASK_ZOMBIE 4
2742 #define TASK_STOPPED 8
2743 +#define PREEMPT_ACTIVE 0x4000000
2745 #define __set_task_state(tsk, state_value) \
2746 do { (tsk)->state = (state_value); } while (0)
2748 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
2749 extern signed long FASTCALL(schedule_timeout(signed long timeout));
2750 asmlinkage void schedule(void);
2751 +#ifdef CONFIG_PREEMPT
2752 +asmlinkage void preempt_schedule(void);
2755 extern int schedule_task(struct tq_struct *task);
2756 extern void flush_scheduled_tasks(void);
2758 * offsets of these are hardcoded elsewhere - touch with care
2760 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
2761 - unsigned long flags; /* per process flags, defined below */
2762 + int preempt_count; /* 0 => preemptable, <0 => BUG */
2764 mm_segment_t addr_limit; /* thread address space:
2765 0-0xBFFFFFFF for user-thead
2767 struct mm_struct *active_mm;
2768 struct list_head local_pages;
2769 unsigned int allocation_order, nr_local_pages;
2770 + unsigned long flags;
2773 struct linux_binfmt *binfmt;
2774 @@ -955,5 +960,10 @@
2778 +#define _TASK_STRUCT_DEFINED
2779 +#include <linux/dcache.h>
2780 +#include <linux/tqueue.h>
2781 +#include <linux/fs_struct.h>
2783 #endif /* __KERNEL__ */
2785 diff -urN linux-2.4.20/include/linux/smp_lock.h linux/include/linux/smp_lock.h
2786 --- linux-2.4.20/include/linux/smp_lock.h 2001-11-22 14:46:27.000000000 -0500
2787 +++ linux/include/linux/smp_lock.h 2003-04-11 17:02:56.944333968 -0400
2790 #include <linux/config.h>
2793 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
2795 #define lock_kernel() do { } while(0)
2796 #define unlock_kernel() do { } while(0)
2797 diff -urN linux-2.4.20/include/linux/spinlock.h linux/include/linux/spinlock.h
2798 --- linux-2.4.20/include/linux/spinlock.h 2002-11-28 18:53:15.000000000 -0500
2799 +++ linux/include/linux/spinlock.h 2003-04-11 17:02:56.981328344 -0400
2801 #define __LINUX_SPINLOCK_H
2803 #include <linux/config.h>
2804 +#include <linux/compiler.h>
2807 * These are the generic versions of the spinlocks and read-write
2810 #if (DEBUG_SPINLOCKS < 1)
2812 +#ifndef CONFIG_PREEMPT
2813 #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
2814 #define ATOMIC_DEC_AND_LOCK
2818 * Your basic spinlocks, allowing only a single CPU anywhere
2822 #define spin_lock_init(lock) do { } while(0)
2823 -#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
2824 +#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */
2825 #define spin_is_locked(lock) (0)
2826 -#define spin_trylock(lock) ({1; })
2827 +#define _raw_spin_trylock(lock) ({1; })
2828 #define spin_unlock_wait(lock) do { } while(0)
2829 -#define spin_unlock(lock) do { } while(0)
2830 +#define _raw_spin_unlock(lock) do { } while(0)
2832 #elif (DEBUG_SPINLOCKS < 2)
2834 @@ -144,13 +147,78 @@
2837 #define rwlock_init(lock) do { } while(0)
2838 -#define read_lock(lock) (void)(lock) /* Not "unused variable". */
2839 -#define read_unlock(lock) do { } while(0)
2840 -#define write_lock(lock) (void)(lock) /* Not "unused variable". */
2841 -#define write_unlock(lock) do { } while(0)
2842 +#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */
2843 +#define _raw_read_unlock(lock) do { } while(0)
2844 +#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */
2845 +#define _raw_write_unlock(lock) do { } while(0)
2849 +#ifdef CONFIG_PREEMPT
2851 +#define preempt_get_count() (current->preempt_count)
2852 +#define preempt_is_disabled() (preempt_get_count() != 0)
2854 +#define preempt_disable() \
2856 + ++current->preempt_count; \
2860 +#define preempt_enable_no_resched() \
2862 + --current->preempt_count; \
2866 +#define preempt_enable() \
2868 + --current->preempt_count; \
2870 + if (unlikely(current->preempt_count < current->need_resched)) \
2871 + preempt_schedule(); \
2874 +#define spin_lock(lock) \
2876 + preempt_disable(); \
2877 + _raw_spin_lock(lock); \
2880 +#define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
2881 + 1 : ({preempt_enable(); 0;});})
2882 +#define spin_unlock(lock) \
2884 + _raw_spin_unlock(lock); \
2885 + preempt_enable(); \
2888 +#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
2889 +#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
2890 +#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
2891 +#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();})
2892 +#define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \
2893 + 1 : ({preempt_enable(); 0;});})
2897 +#define preempt_get_count() (0)
2898 +#define preempt_is_disabled() (1)
2899 +#define preempt_disable() do { } while (0)
2900 +#define preempt_enable_no_resched() do {} while(0)
2901 +#define preempt_enable() do { } while (0)
2903 +#define spin_lock(lock) _raw_spin_lock(lock)
2904 +#define spin_trylock(lock) _raw_spin_trylock(lock)
2905 +#define spin_unlock(lock) _raw_spin_unlock(lock)
2907 +#define read_lock(lock) _raw_read_lock(lock)
2908 +#define read_unlock(lock) _raw_read_unlock(lock)
2909 +#define write_lock(lock) _raw_write_lock(lock)
2910 +#define write_unlock(lock) _raw_write_unlock(lock)
2911 +#define write_trylock(lock) _raw_write_trylock(lock)
2914 /* "lock on reference count zero" */
2915 #ifndef ATOMIC_DEC_AND_LOCK
2916 #include <asm/atomic.h>
2917 diff -urN linux-2.4.20/include/linux/tqueue.h linux/include/linux/tqueue.h
2918 --- linux-2.4.20/include/linux/tqueue.h 2001-11-22 14:46:19.000000000 -0500
2919 +++ linux/include/linux/tqueue.h 2003-04-11 17:02:56.989327128 -0400
2921 extern spinlock_t tqueue_lock;
2924 + * Call all "bottom halfs" on a given list.
2927 +extern void __run_task_queue(task_queue *list);
2929 +static inline void run_task_queue(task_queue *list)
2931 + if (TQ_ACTIVE(*list))
2932 + __run_task_queue(list);
2935 +#endif /* _LINUX_TQUEUE_H */
2937 +#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2938 +#define _LINUX_TQUEUE_H_INLINES
2940 * Queue a task on a tq. Return non-zero if it was successfully
2943 @@ -109,17 +125,4 @@
2949 - * Call all "bottom halfs" on a given list.
2952 -extern void __run_task_queue(task_queue *list);
2954 -static inline void run_task_queue(task_queue *list)
2956 - if (TQ_ACTIVE(*list))
2957 - __run_task_queue(list);
2960 -#endif /* _LINUX_TQUEUE_H */
2962 diff -urN linux-2.4.20/kernel/exit.c linux/kernel/exit.c
2963 --- linux-2.4.20/kernel/exit.c 2002-11-28 18:53:15.000000000 -0500
2964 +++ linux/kernel/exit.c 2003-04-11 17:03:05.778990896 -0400
2967 /* active_mm is still 'mm' */
2968 atomic_inc(&mm->mm_count);
2969 + preempt_disable();
2970 enter_lazy_tlb(mm, current, smp_processor_id());
2976 /* more a memory barrier than a real lock */
2980 enter_lazy_tlb(mm, current, smp_processor_id());
2985 @@ -435,6 +437,11 @@
2986 tsk->flags |= PF_EXITING;
2987 del_timer_sync(&tsk->real_timer);
2989 + if (unlikely(preempt_get_count()))
2990 + printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
2991 + current->comm, current->pid,
2992 + preempt_get_count());
2995 #ifdef CONFIG_BSD_PROCESS_ACCT
2997 diff -urN linux-2.4.20/kernel/fork.c linux/kernel/fork.c
2998 --- linux-2.4.20/kernel/fork.c 2002-11-28 18:53:15.000000000 -0500
2999 +++ linux/kernel/fork.c 2003-04-11 17:02:57.063315880 -0400
3000 @@ -629,6 +629,13 @@
3001 if (p->binfmt && p->binfmt->module)
3002 __MOD_INC_USE_COUNT(p->binfmt->module);
3004 +#ifdef CONFIG_PREEMPT
3006 + * Continue with preemption disabled as part of the context
3007 + * switch, so start with preempt_count set to 1.
3009 + p->preempt_count = 1;
3013 p->state = TASK_UNINTERRUPTIBLE;
3014 diff -urN linux-2.4.20/kernel/ksyms.c linux/kernel/ksyms.c
3015 --- linux-2.4.20/kernel/ksyms.c 2002-11-28 18:53:15.000000000 -0500
3016 +++ linux/kernel/ksyms.c 2003-04-11 17:02:57.098310560 -0400
3018 EXPORT_SYMBOL(interruptible_sleep_on);
3019 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
3020 EXPORT_SYMBOL(schedule);
3021 +#ifdef CONFIG_PREEMPT
3022 +EXPORT_SYMBOL(preempt_schedule);
3024 EXPORT_SYMBOL(schedule_timeout);
3025 EXPORT_SYMBOL(yield);
3026 EXPORT_SYMBOL(__cond_resched);
3027 diff -urN linux-2.4.20/kernel/sched.c linux/kernel/sched.c
3028 --- linux-2.4.20/kernel/sched.c 2002-11-28 18:53:15.000000000 -0500
3029 +++ linux/kernel/sched.c 2003-04-11 17:02:57.135304936 -0400
3032 task_release_cpu(prev);
3034 - if (prev->state == TASK_RUNNING)
3035 + if (task_on_runqueue(prev))
3042 spin_lock_irqsave(&runqueue_lock, flags);
3043 - if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
3044 + if (task_on_runqueue(prev) && !task_has_cpu(prev))
3045 reschedule_idle(prev);
3046 spin_unlock_irqrestore(&runqueue_lock, flags);
3049 asmlinkage void schedule_tail(struct task_struct *prev)
3051 __schedule_tail(prev);
3056 @@ -551,9 +552,10 @@
3057 struct list_head *tmp;
3061 spin_lock_prefetch(&runqueue_lock);
3063 + preempt_disable();
3065 BUG_ON(!current->active_mm);
3068 @@ -581,6 +583,14 @@
3069 move_last_runqueue(prev);
3072 +#ifdef CONFIG_PREEMPT
3074 + * entering from preempt_schedule, off a kernel preemption,
3075 + * go straight to picking the next task.
3077 + if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
3078 + goto treat_like_run;
3080 switch (prev->state) {
3081 case TASK_INTERRUPTIBLE:
3082 if (signal_pending(prev)) {
3084 del_from_runqueue(prev);
3087 +#ifdef CONFIG_PREEMPT
3090 prev->need_resched = 0;
3093 @@ -699,9 +712,31 @@
3094 reacquire_kernel_lock(current);
3095 if (current->need_resched)
3096 goto need_resched_back;
3097 + preempt_enable_no_resched();
3101 +#ifdef CONFIG_PREEMPT
3103 + * this is is the entry point to schedule() from in-kernel preemption
3105 +asmlinkage void preempt_schedule(void)
3107 + if (unlikely(irqs_disabled()))
3111 + current->preempt_count += PREEMPT_ACTIVE;
3113 + current->preempt_count -= PREEMPT_ACTIVE;
3115 + /* we could miss a preemption opportunity between schedule and now */
3117 + if (unlikely(current->need_resched))
3118 + goto need_resched;
3120 +#endif /* CONFIG_PREEMPT */
3123 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
3124 * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
3125 @@ -1327,6 +1362,13 @@
3126 sched_data->curr = current;
3127 sched_data->last_schedule = get_cycles();
3128 clear_bit(current->processor, &wait_init_idle);
3129 +#ifdef CONFIG_PREEMPT
3131 + * fix up the preempt_count for non-CPU0 idle threads
3133 + if (current->processor)
3134 + current->preempt_count = 0;
3138 extern void init_timervecs (void);
3139 diff -urN linux-2.4.20/kernel/softirq.c linux/kernel/softirq.c
3140 --- linux-2.4.20/kernel/softirq.c 2002-11-28 18:53:15.000000000 -0500
3141 +++ linux/kernel/softirq.c 2003-04-11 17:03:05.853979496 -0400
3144 asmlinkage void do_softirq()
3146 - int cpu = smp_processor_id();
3149 unsigned long flags;
3153 local_irq_save(flags);
3155 + cpu = smp_processor_id();
3157 pending = softirq_pending(cpu);
3160 @@ -151,10 +153,11 @@
3162 void __tasklet_schedule(struct tasklet_struct *t)
3164 - int cpu = smp_processor_id();
3166 unsigned long flags;
3168 local_irq_save(flags);
3169 + cpu = smp_processor_id();
3170 t->next = tasklet_vec[cpu].list;
3171 tasklet_vec[cpu].list = t;
3172 cpu_raise_softirq(cpu, TASKLET_SOFTIRQ);
3173 @@ -175,10 +178,11 @@
3175 static void tasklet_action(struct softirq_action *a)
3177 - int cpu = smp_processor_id();
3179 struct tasklet_struct *list;
3181 local_irq_disable();
3182 + cpu = smp_processor_id();
3183 list = tasklet_vec[cpu].list;
3184 tasklet_vec[cpu].list = NULL;
3186 @@ -209,10 +213,11 @@
3188 static void tasklet_hi_action(struct softirq_action *a)
3190 - int cpu = smp_processor_id();
3192 struct tasklet_struct *list;
3194 local_irq_disable();
3195 + cpu = smp_processor_id();
3196 list = tasklet_hi_vec[cpu].list;
3197 tasklet_hi_vec[cpu].list = NULL;
3199 diff -urN linux-2.4.20/lib/dec_and_lock.c linux/lib/dec_and_lock.c
3200 --- linux-2.4.20/lib/dec_and_lock.c 2001-10-03 12:11:26.000000000 -0400
3201 +++ linux/lib/dec_and_lock.c 2003-04-11 17:02:57.173299160 -0400
3203 #include <linux/module.h>
3204 #include <linux/spinlock.h>
3205 +#include <linux/sched.h>
3206 #include <asm/atomic.h>
3209 diff -urN linux-2.4.20/MAINTAINERS linux/MAINTAINERS
3210 --- linux-2.4.20/MAINTAINERS 2002-11-28 18:53:08.000000000 -0500
3211 +++ linux/MAINTAINERS 2003-04-11 17:02:57.244288368 -0400
3212 @@ -1310,6 +1310,14 @@
3213 M: mostrows@styx.uwaterloo.ca
3219 +L: linux-kernel@vger.kernel.org
3220 +L: kpreempt-tech@lists.sourceforge.net
3221 +W: http://tech9.net/rml/linux
3224 PROMISE DC4030 CACHING DISK CONTROLLER DRIVER
3226 M: promise@pnd-pc.demon.co.uk
3227 diff -urN linux-2.4.20/mm/slab.c linux/mm/slab.c
3228 --- linux-2.4.20/mm/slab.c 2002-11-28 18:53:15.000000000 -0500
3229 +++ linux/mm/slab.c 2003-04-11 17:03:05.946965360 -0400
3231 * constructors and destructors are called without any locking.
3232 * Several members in kmem_cache_t and slab_t never change, they
3233 * are accessed without any locking.
3234 - * The per-cpu arrays are never accessed from the wrong cpu, no locking.
3235 + * The per-cpu arrays are never accessed from the wrong cpu, no locking,
3236 + * and local interrupts are disabled so slab code is preempt-safe.
3237 * The non-constant members are protected with a per-cache irq spinlock.
3239 * Further notes from the original documentation:
3240 @@ -858,12 +859,14 @@
3242 static void smp_call_function_all_cpus(void (*func) (void *arg), void *arg)
3244 + preempt_disable();
3245 local_irq_disable();
3249 if (smp_call_function(func, arg, 1, 1))
3253 typedef struct ccupdate_struct_s
3255 diff -urN linux-2.4.20/net/core/dev.c linux/net/core/dev.c
3256 --- linux-2.4.20/net/core/dev.c 2002-11-28 18:53:15.000000000 -0500
3257 +++ linux/net/core/dev.c 2003-04-11 17:03:06.026953200 -0400
3258 @@ -1049,9 +1049,15 @@
3259 int cpu = smp_processor_id();
3261 if (dev->xmit_lock_owner != cpu) {
3263 + * The spin_lock effectivly does a preempt lock, but
3264 + * we are about to drop that...
3266 + preempt_disable();
3267 spin_unlock(&dev->queue_lock);
3268 spin_lock(&dev->xmit_lock);
3269 dev->xmit_lock_owner = cpu;
3272 if (!netif_queue_stopped(dev)) {
3274 @@ -1230,7 +1236,7 @@
3276 int netif_rx(struct sk_buff *skb)
3278 - int this_cpu = smp_processor_id();
3280 struct softnet_data *queue;
3281 unsigned long flags;
3283 @@ -1240,9 +1246,10 @@
3284 /* The code is rearranged so that the path is the most
3285 short when CPU is congested, but is still operating.
3287 - queue = &softnet_data[this_cpu];
3289 local_irq_save(flags);
3290 + this_cpu = smp_processor_id();
3291 + queue = &softnet_data[this_cpu];
3293 netdev_rx_stat[this_cpu].total++;
3294 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
3295 diff -urN linux-2.4.20/net/core/skbuff.c linux/net/core/skbuff.c
3296 --- linux-2.4.20/net/core/skbuff.c 2002-08-02 20:39:46.000000000 -0400
3297 +++ linux/net/core/skbuff.c 2003-04-11 17:02:57.333274840 -0400
3298 @@ -111,33 +111,37 @@
3300 static __inline__ struct sk_buff *skb_head_from_pool(void)
3302 - struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
3303 + struct sk_buff_head *list;
3304 + struct sk_buff *skb = NULL;
3305 + unsigned long flags;
3307 - if (skb_queue_len(list)) {
3308 - struct sk_buff *skb;
3309 - unsigned long flags;
3310 + local_irq_save(flags);
3312 - local_irq_save(flags);
3313 + list = &skb_head_pool[smp_processor_id()].list;
3315 + if (skb_queue_len(list))
3316 skb = __skb_dequeue(list);
3317 - local_irq_restore(flags);
3322 + local_irq_restore(flags);
3326 static __inline__ void skb_head_to_pool(struct sk_buff *skb)
3328 - struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
3329 + struct sk_buff_head *list;
3330 + unsigned long flags;
3332 - if (skb_queue_len(list) < sysctl_hot_list_len) {
3333 - unsigned long flags;
3334 + local_irq_save(flags);
3335 + list = &skb_head_pool[smp_processor_id()].list;
3337 - local_irq_save(flags);
3338 + if (skb_queue_len(list) < sysctl_hot_list_len) {
3339 __skb_queue_head(list, skb);
3340 local_irq_restore(flags);
3345 + local_irq_restore(flags);
3346 kmem_cache_free(skbuff_head_cache, skb);
3349 diff -urN linux-2.4.20/net/socket.c linux/net/socket.c
3350 --- linux-2.4.20/net/socket.c 2002-11-28 18:53:16.000000000 -0500
3351 +++ linux/net/socket.c 2003-04-11 17:02:57.374268608 -0400
3354 static struct net_proto_family *net_families[NPROTO];
3357 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
3358 static atomic_t net_family_lockct = ATOMIC_INIT(0);
3359 static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
3361 diff -urN linux-2.4.20/net/sunrpc/pmap_clnt.c linux/net/sunrpc/pmap_clnt.c
3362 --- linux-2.4.20/net/sunrpc/pmap_clnt.c 2002-08-02 20:39:46.000000000 -0400
3363 +++ linux/net/sunrpc/pmap_clnt.c 2003-04-11 17:02:57.409263288 -0400
3365 #include <linux/config.h>
3366 #include <linux/types.h>
3367 #include <linux/socket.h>
3368 +#include <linux/sched.h>
3369 #include <linux/kernel.h>
3370 #include <linux/errno.h>
3371 #include <linux/uio.h>