1 diff -urN linux-2.4.19-ac5/CREDITS linux/CREDITS
2 --- linux-2.4.19-ac5/CREDITS Thu Aug 1 15:54:34 2002
3 +++ linux/CREDITS Fri Aug 2 10:28:17 2002
9 D: Interrupt-driven printer driver
10 +D: Preemptible kernel
12 S: Mountain View, California 94040
14 diff -urN linux-2.4.19-ac5/Documentation/Configure.help linux/Documentation/Configure.help
15 --- linux-2.4.19-ac5/Documentation/Configure.help Thu Aug 1 15:55:22 2002
16 +++ linux/Documentation/Configure.help Fri Aug 2 10:28:17 2002
18 If you have a system with several CPUs, you do not need to say Y
19 here: the local APIC will be used automatically.
23 + This option reduces the latency of the kernel when reacting to
24 + real-time or interactive events by allowing a low priority process to
25 + be preempted even if it is in kernel mode executing a system call.
26 + This allows applications to run more reliably even when the system is
29 + Say Y here if you are building a kernel for a desktop, embedded or
30 + real-time system. Say N if you are unsure.
34 Linux can emulate a math coprocessor (used for floating point
35 diff -urN linux-2.4.19-ac5/Documentation/preempt-locking.txt linux/Documentation/preempt-locking.txt
36 --- linux-2.4.19-ac5/Documentation/preempt-locking.txt Wed Dec 31 16:00:00 1969
37 +++ linux/Documentation/preempt-locking.txt Fri Aug 2 10:28:18 2002
39 + Proper Locking Under a Preemptible Kernel:
40 + Keeping Kernel Code Preempt-Safe
41 + Robert Love <rml@tech9.net>
42 + Last Updated: 22 Jan 2002
48 +A preemptible kernel creates new locking issues. The issues are the same as
49 +those under SMP: concurrency and reentrancy. Thankfully, the Linux preemptible
50 +kernel model leverages existing SMP locking mechanisms. Thus, the kernel
51 +requires explicit additional locking for very few additional situations.
53 +This document is for all kernel hackers. Developing code in the kernel
54 +requires protecting these situations.
57 +RULE #1: Per-CPU data structures need explicit protection
60 +Two similar problems arise. An example code snippet:
62 + struct this_needs_locking tux[NR_CPUS];
63 + tux[smp_processor_id()] = some_value;
64 + /* task is preempted here... */
65 + something = tux[smp_processor_id()];
67 +First, since the data is per-CPU, it may not have explicit SMP locking, but
68 +require it otherwise. Second, when a preempted task is finally rescheduled,
69 +the previous value of smp_processor_id may not equal the current. You must
70 +protect these situations by disabling preemption around them.
73 +RULE #2: CPU state must be protected.
76 +Under preemption, the state of the CPU must be protected. This is arch-
77 +dependent, but includes CPU structures and state not preserved over a context
78 +switch. For example, on x86, entering and exiting FPU mode is now a critical
79 +section that must occur while preemption is disabled. Think what would happen
80 +if the kernel is executing a floating-point instruction and is then preempted.
81 +Remember, the kernel does not save FPU state except for user tasks. Therefore,
82 +upon preemption, the FPU registers will be sold to the lowest bidder. Thus,
83 +preemption must be disabled around such regions.
85 +Note, some FPU functions are already explicitly preempt safe. For example,
86 +kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
87 +However, math_state_restore must be called with preemption disabled.
90 +RULE #3: Lock acquire and release must be performed by same task
93 +A lock acquired in one task must be released by the same task. This
94 +means you can't do oddball things like acquire a lock and go off to
95 +play while another task releases it. If you want to do something
96 +like this, acquire and release the task in the same code path and
97 +have the caller wait on an event by the other task.
103 +Data protection under preemption is achieved by disabling preemption for the
104 +duration of the critical region.
106 +preempt_enable() decrement the preempt counter
107 +preempt_disable() increment the preempt counter
108 +preempt_enable_no_resched() decrement, but do not immediately preempt
109 +preempt_get_count() return the preempt counter
111 +The functions are nestable. In other words, you can call preempt_disable
112 +n-times in a code path, and preemption will not be reenabled until the n-th
113 +call to preempt_enable. The preempt statements define to nothing if
114 +preemption is not enabled.
116 +Note that you do not need to explicitly prevent preemption if you are holding
117 +any locks or interrupts are disabled, since preemption is implicitly disabled
122 + cpucache_t *cc; /* this is per-CPU */
124 + cc = cc_data(searchp);
125 + if (cc && cc->avail) {
126 + __free_block(searchp, cc_entry(cc), cc->avail);
132 +Notice how the preemption statements must encompass every reference of the
133 +critical variables. Another example:
137 + if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n");
138 + spin_lock(&buf_lock);
141 +This code is not preempt-safe, but see how easily we can fix it by simply
142 +moving the spin_lock up two lines.
143 diff -urN linux-2.4.19-ac5/MAINTAINERS linux/MAINTAINERS
144 --- linux-2.4.19-ac5/MAINTAINERS Thu Aug 1 15:54:42 2002
145 +++ linux/MAINTAINERS Fri Aug 2 10:28:18 2002
146 @@ -1285,6 +1285,14 @@
147 M: mostrows@styx.uwaterloo.ca
153 +L: linux-kernel@vger.kernel.org
154 +L: kpreempt-tech@lists.sourceforge.net
155 +W: http://tech9.net/rml/linux
158 PROMISE DC4030 CACHING DISK CONTROLLER DRIVER
160 M: promise@pnd-pc.demon.co.uk
161 diff -urN linux-2.4.19-ac5/arch/arm/config.in linux/arch/arm/config.in
162 --- linux-2.4.19-ac5/arch/arm/config.in Thu Aug 1 15:55:14 2002
163 +++ linux/arch/arm/config.in Fri Aug 2 10:28:18 2002
166 define_bool CONFIG_DISCONTIGMEM n
169 +dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_CPU_32
172 mainmenu_option next_comment
173 diff -urN linux-2.4.19-ac5/arch/arm/kernel/entry-armv.S linux/arch/arm/kernel/entry-armv.S
174 --- linux-2.4.19-ac5/arch/arm/kernel/entry-armv.S Thu Aug 1 15:55:14 2002
175 +++ linux/arch/arm/kernel/entry-armv.S Fri Aug 2 10:28:18 2002
179 stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
180 +#ifdef CONFIG_PREEMPT
181 + get_current_task r9
182 + ldr r8, [r9, #TSK_PREEMPT]
184 + str r8, [r9, #TSK_PREEMPT]
186 1: get_irqnr_and_base r0, r6, r5, lr
193 +#ifdef CONFIG_PREEMPT
194 +2: ldr r8, [r9, #TSK_PREEMPT]
197 + ldr r7, [r9, #TSK_NEED_RESCHED]
201 + ldr r0, [r6, #IRQSTAT_BH_COUNT]
205 + msr cpsr_c, r0 @ enable interrupts
206 + bl SYMBOL_NAME(preempt_schedule)
207 + mov r0, #I_BIT | MODE_SVC
208 + msr cpsr_c, r0 @ disable interrupts
210 +3: str r8, [r9, #TSK_PREEMPT]
212 ldr r0, [sp, #S_PSR] @ irqs are already disabled
214 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
216 .LCprocfns: .word SYMBOL_NAME(processor)
218 .LCfp: .word SYMBOL_NAME(fp_enter)
219 +#ifdef CONFIG_PREEMPT
220 +.LCirqstat: .word SYMBOL_NAME(irq_stat)
227 alignment_trap r4, r7, __temp_irq
229 + get_current_task tsk
230 +#ifdef CONFIG_PREEMPT
231 + ldr r0, [tsk, #TSK_PREEMPT]
233 + str r0, [tsk, #TSK_PREEMPT]
235 1: get_irqnr_and_base r0, r6, r5, lr
239 @ routine called with r0 = irq number, r1 = struct pt_regs *
242 +#ifdef CONFIG_PREEMPT
243 + ldr r0, [tsk, #TSK_PREEMPT]
245 + str r0, [tsk, #TSK_PREEMPT]
248 - get_current_task tsk
252 diff -urN linux-2.4.19-ac5/arch/arm/tools/getconstants.c linux/arch/arm/tools/getconstants.c
253 --- linux-2.4.19-ac5/arch/arm/tools/getconstants.c Thu Aug 1 15:55:16 2002
254 +++ linux/arch/arm/tools/getconstants.c Fri Aug 2 10:28:18 2002
257 #include <asm/pgtable.h>
258 #include <asm/uaccess.h>
259 +#include <asm/hardirq.h>
262 * Make sure that the compiler and target are compatible.
264 DEFN("TSS_SAVE", OFF_TSK(thread.save));
265 DEFN("TSS_FPESAVE", OFF_TSK(thread.fpstate.soft.save));
267 +#ifdef CONFIG_PREEMPT
268 +DEFN("TSK_PREEMPT", OFF_TSK(preempt_count));
269 +DEFN("IRQSTAT_BH_COUNT", (unsigned long)&(((irq_cpustat_t *)0)->__local_bh_count));
273 DEFN("TSS_DOMAIN", OFF_TSK(thread.domain));
275 diff -urN linux-2.4.19-ac5/arch/i386/config.in linux/arch/i386/config.in
276 --- linux-2.4.19-ac5/arch/i386/config.in Thu Aug 1 15:55:07 2002
277 +++ linux/arch/i386/config.in Fri Aug 2 10:28:18 2002
279 bool 'Math emulation' CONFIG_MATH_EMULATION
280 bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
281 bool 'Symmetric multi-processing support' CONFIG_SMP
282 +bool 'Preemptible Kernel' CONFIG_PREEMPT
283 if [ "$CONFIG_SMP" != "y" ]; then
284 bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
285 dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
287 bool 'Multiquad NUMA system' CONFIG_MULTIQUAD
290 -if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
291 - define_bool CONFIG_HAVE_DEC_LOCK y
292 +if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then
293 + if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then
294 + define_bool CONFIG_HAVE_DEC_LOCK y
300 mainmenu_option next_comment
301 diff -urN linux-2.4.19-ac5/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
302 --- linux-2.4.19-ac5/arch/i386/kernel/entry.S Thu Aug 1 15:55:07 2002
303 +++ linux/arch/i386/kernel/entry.S Fri Aug 2 10:28:18 2002
305 * these are offsets into the task-struct.
317 +/* These are offsets into the irq_stat structure
318 + * There is one per cpu and it is aligned to 32
319 + * byte boundry (we put that here as a shift count)
321 +irq_array_shift = CONFIG_X86_L1_CACHE_SHIFT
323 +irq_stat_local_irq_count = 4
324 +irq_stat_local_bh_count = 8
329 +#define GET_CPU_INDX movl processor(%ebx),%eax; \
330 + shll $irq_array_shift,%eax
331 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \
333 +#define CPU_INDX (,%eax)
335 +#define GET_CPU_INDX
336 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx)
342 @@ -247,12 +267,30 @@
346 +#ifdef CONFIG_PREEMPT
348 + decl preempt_count(%ebx)
351 movl EFLAGS(%esp),%eax # mix EFLAGS and CS
353 testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
354 jne ret_from_sys_call
355 +#ifdef CONFIG_PREEMPT
356 + cmpl $0,preempt_count(%ebx)
358 + cmpl $0,need_resched(%ebx)
360 + movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
361 + addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
363 + incl preempt_count(%ebx)
365 + call SYMBOL_NAME(preempt_schedule)
377 +#ifdef CONFIG_PREEMPT
380 jmp ret_from_exception
382 ENTRY(coprocessor_error)
383 @@ -308,12 +349,18 @@
385 testl $0x4,%eax # EM (math emulation bit)
386 jne device_not_available_emulate
387 +#ifdef CONFIG_PREEMPT
390 call SYMBOL_NAME(math_state_restore)
391 jmp ret_from_exception
392 device_not_available_emulate:
393 pushl $0 # temporary storage for ORIG_EIP
394 call SYMBOL_NAME(math_emulate)
396 +#ifdef CONFIG_PREEMPT
399 jmp ret_from_exception
402 diff -urN linux-2.4.19-ac5/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c
403 --- linux-2.4.19-ac5/arch/i386/kernel/i387.c Thu Aug 1 15:55:08 2002
404 +++ linux/arch/i386/kernel/i387.c Fri Aug 2 10:28:18 2002
407 #include <linux/config.h>
408 #include <linux/sched.h>
409 +#include <linux/spinlock.h>
410 #include <linux/init.h>
411 #include <asm/processor.h>
412 #include <asm/i387.h>
415 struct task_struct *tsk = current;
419 if (tsk->flags & PF_USEDFPU) {
420 __save_init_fpu(tsk);
422 diff -urN linux-2.4.19-ac5/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c
423 --- linux-2.4.19-ac5/arch/i386/kernel/smp.c Thu Aug 1 15:55:08 2002
424 +++ linux/arch/i386/kernel/smp.c Fri Aug 2 10:28:18 2002
425 @@ -357,10 +357,13 @@
427 asmlinkage void smp_invalidate_interrupt (void)
429 - unsigned long cpu = smp_processor_id();
434 + cpu = smp_processor_id();
435 if (!test_bit(cpu, &flush_cpumask))
439 * This was a BUG() but until someone can quote me the
440 * line from the intel manual that guarantees an IPI to
444 clear_bit(cpu, &flush_cpumask);
449 static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
450 @@ -430,17 +435,22 @@
451 void flush_tlb_current_task(void)
453 struct mm_struct *mm = current->mm;
454 - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
455 + unsigned long cpu_mask;
458 + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
461 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
465 void flush_tlb_mm (struct mm_struct * mm)
467 - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
468 + unsigned long cpu_mask;
471 + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
472 if (current->active_mm == mm) {
475 @@ -449,13 +459,16 @@
478 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
482 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
484 struct mm_struct *mm = vma->vm_mm;
485 - unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
486 + unsigned long cpu_mask;
489 + cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
490 if (current->active_mm == mm) {
496 flush_tlb_others(cpu_mask, mm, va);
500 static inline void do_flush_tlb_all_local(void)
501 diff -urN linux-2.4.19-ac5/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
502 --- linux-2.4.19-ac5/arch/i386/kernel/traps.c Thu Aug 1 15:55:07 2002
503 +++ linux/arch/i386/kernel/traps.c Fri Aug 2 10:28:18 2002
506 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
507 * Don't touch unless you *really* know how it works.
509 + * Must be called with kernel preemption disabled.
511 asmlinkage void math_state_restore(struct pt_regs regs)
513 diff -urN linux-2.4.19-ac5/arch/i386/lib/dec_and_lock.c linux/arch/i386/lib/dec_and_lock.c
514 --- linux-2.4.19-ac5/arch/i386/lib/dec_and_lock.c Thu Aug 1 15:55:07 2002
515 +++ linux/arch/i386/lib/dec_and_lock.c Fri Aug 2 10:28:18 2002
519 #include <linux/spinlock.h>
520 +#include <linux/sched.h>
521 #include <asm/atomic.h>
523 int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
524 diff -urN linux-2.4.19-ac5/arch/mips/config.in linux/arch/mips/config.in
525 --- linux-2.4.19-ac5/arch/mips/config.in Thu Aug 1 15:55:08 2002
526 +++ linux/arch/mips/config.in Fri Aug 2 10:28:43 2002
528 if [ "$CONFIG_SCSI" != "n" ]; then
529 source drivers/scsi/Config.in
531 +dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_NEW_IRQ
534 if [ "$CONFIG_PCI" = "y" ]; then
535 diff -urN linux-2.4.19-ac5/arch/mips/kernel/i8259.c linux/arch/mips/kernel/i8259.c
536 --- linux-2.4.19-ac5/arch/mips/kernel/i8259.c Thu Aug 1 15:55:09 2002
537 +++ linux/arch/mips/kernel/i8259.c Fri Aug 2 10:28:43 2002
539 * Copyright (C) 1992 Linus Torvalds
540 * Copyright (C) 1994 - 2000 Ralf Baechle
542 +#include <linux/sched.h>
543 #include <linux/delay.h>
544 #include <linux/init.h>
545 #include <linux/ioport.h>
546 diff -urN linux-2.4.19-ac5/arch/mips/kernel/irq.c linux/arch/mips/kernel/irq.c
547 --- linux-2.4.19-ac5/arch/mips/kernel/irq.c Thu Aug 1 15:55:09 2002
548 +++ linux/arch/mips/kernel/irq.c Fri Aug 2 10:28:43 2002
550 * Copyright (C) 1992 Linus Torvalds
551 * Copyright (C) 1994 - 2000 Ralf Baechle
554 +#include <linux/sched.h>
555 #include <linux/config.h>
556 #include <linux/kernel.h>
557 #include <linux/delay.h>
559 #include <linux/slab.h>
560 #include <linux/mm.h>
561 #include <linux/random.h>
562 -#include <linux/sched.h>
563 +#include <linux/spinlock.h>
564 +#include <linux/ptrace.h>
566 #include <asm/atomic.h>
567 #include <asm/system.h>
568 #include <asm/uaccess.h>
569 +#include <asm/debug.h>
572 * Controller mappings for all interrupt sources:
574 struct irqaction * action;
579 kstat.irqs[cpu][irq]++;
580 spin_lock(&desc->lock);
581 desc->handler->ack(irq);
584 if (softirq_pending(cpu))
587 +#if defined(CONFIG_PREEMPT)
588 + while (--current->preempt_count == 0) {
589 + db_assert(intr_off());
590 + db_assert(!in_interrupt());
592 + if (current->need_resched == 0) {
596 + current->preempt_count ++;
598 + if (user_mode(regs)) {
601 + preempt_schedule();
610 diff -urN linux-2.4.19-ac5/arch/mips/mm/extable.c linux/arch/mips/mm/extable.c
611 --- linux-2.4.19-ac5/arch/mips/mm/extable.c Thu Aug 1 15:55:09 2002
612 +++ linux/arch/mips/mm/extable.c Fri Aug 2 10:28:43 2002
615 #include <linux/config.h>
616 #include <linux/module.h>
617 +#include <linux/sched.h>
618 #include <linux/spinlock.h>
619 #include <asm/uaccess.h>
621 diff -urN linux-2.4.19-ac5/arch/ppc/config.in linux/arch/ppc/config.in
622 --- linux-2.4.19-ac5/arch/ppc/config.in Thu Aug 1 15:55:12 2002
623 +++ linux/arch/ppc/config.in Fri Aug 2 10:29:00 2002
625 bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
628 +bool 'Preemptible kernel support' CONFIG_PREEMPT
630 if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ];then
631 bool 'AltiVec Support' CONFIG_ALTIVEC
632 bool 'Thermal Management Support' CONFIG_TAU
633 diff -urN linux-2.4.19-ac5/arch/ppc/kernel/entry.S linux/arch/ppc/kernel/entry.S
634 --- linux-2.4.19-ac5/arch/ppc/kernel/entry.S Thu Aug 1 15:55:12 2002
635 +++ linux/arch/ppc/kernel/entry.S Fri Aug 2 10:29:00 2002
640 +#ifdef CONFIG_PREEMPT
641 + lwz r3,PREEMPT_COUNT(r2)
643 + bge ret_from_except
647 + lwz r5,NEED_RESCHED(r2)
649 + beq ret_from_except
651 + ori r3,r3,irq_stat@l
656 + bne ret_from_except
657 + lwz r3,PREEMPT_COUNT(r2)
659 + stw r3,PREEMPT_COUNT(r2)
664 + bl preempt_schedule
666 + rlwinm r0,r0,0,17,15
669 + lwz r3,PREEMPT_COUNT(r2)
671 + stw r3,PREEMPT_COUNT(r2)
673 + b ret_from_intercept
674 +#endif /* CONFIG_PREEMPT */
675 .globl ret_from_except
677 lwz r3,_MSR(r1) /* Returning to user mode? */
678 diff -urN linux-2.4.19-ac5/arch/ppc/kernel/irq.c linux/arch/ppc/kernel/irq.c
679 --- linux-2.4.19-ac5/arch/ppc/kernel/irq.c Thu Aug 1 15:55:11 2002
680 +++ linux/arch/ppc/kernel/irq.c Fri Aug 2 10:29:00 2002
682 return 1; /* lets ret_from_int know we can do checks */
685 +#ifdef CONFIG_PREEMPT
687 +preempt_intercept(struct pt_regs *regs)
693 + switch(regs->trap) {
695 + ret = do_IRQ(regs);
702 + ret = timer_interrupt(regs);
711 +#endif /* CONFIG_PREEMPT */
713 unsigned long probe_irq_on (void)
716 diff -urN linux-2.4.19-ac5/arch/ppc/kernel/mk_defs.c linux/arch/ppc/kernel/mk_defs.c
717 --- linux-2.4.19-ac5/arch/ppc/kernel/mk_defs.c Thu Aug 1 15:55:11 2002
718 +++ linux/arch/ppc/kernel/mk_defs.c Fri Aug 2 10:29:00 2002
720 DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
721 DEFINE(THREAD, offsetof(struct task_struct, thread));
722 DEFINE(MM, offsetof(struct task_struct, mm));
723 +#ifdef CONFIG_PREEMPT
724 + DEFINE(PREEMPT_COUNT, offsetof(struct task_struct, preempt_count));
726 DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm));
727 DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
728 DEFINE(KSP, offsetof(struct thread_struct, ksp));
729 diff -urN linux-2.4.19-ac5/arch/ppc/kernel/setup.c linux/arch/ppc/kernel/setup.c
730 --- linux-2.4.19-ac5/arch/ppc/kernel/setup.c Thu Aug 1 15:55:11 2002
731 +++ linux/arch/ppc/kernel/setup.c Fri Aug 2 10:29:00 2002
736 +#ifdef CONFIG_PREEMPT
737 + /* Override the irq routines for external & timer interrupts here,
738 + * as the MMU has only been minimally setup at this point and
739 + * there are no protections on page zero.
742 + extern int preempt_intercept(struct pt_regs *);
744 + do_IRQ_intercept = (unsigned long) &preempt_intercept;
745 + timer_interrupt_intercept = (unsigned long) &preempt_intercept;
748 +#endif /* CONFIG_PREEMPT */
750 platform_init(r3, r4, r5, r6, r7);
753 diff -urN linux-2.4.19-ac5/arch/ppc/lib/dec_and_lock.c linux/arch/ppc/lib/dec_and_lock.c
754 --- linux-2.4.19-ac5/arch/ppc/lib/dec_and_lock.c Thu Aug 1 15:55:12 2002
755 +++ linux/arch/ppc/lib/dec_and_lock.c Fri Aug 2 10:29:00 2002
757 #include <linux/module.h>
758 +#include <linux/sched.h>
759 #include <linux/spinlock.h>
760 #include <asm/atomic.h>
761 #include <asm/system.h>
762 diff -urN linux-2.4.19-ac5/arch/sh/config.in linux/arch/sh/config.in
763 --- linux-2.4.19-ac5/arch/sh/config.in Thu Aug 1 15:55:17 2002
764 +++ linux/arch/sh/config.in Fri Aug 2 10:28:18 2002
766 hex 'Physical memory start address' CONFIG_MEMORY_START 08000000
767 hex 'Physical memory size' CONFIG_MEMORY_SIZE 00400000
769 +bool 'Preemptible Kernel' CONFIG_PREEMPT
772 if [ "$CONFIG_SH_HP690" = "y" ]; then
773 diff -urN linux-2.4.19-ac5/arch/sh/kernel/entry.S linux/arch/sh/kernel/entry.S
774 --- linux-2.4.19-ac5/arch/sh/kernel/entry.S Thu Aug 1 15:55:17 2002
775 +++ linux/arch/sh/kernel/entry.S Fri Aug 2 10:28:18 2002
778 * These are offsets into the task-struct.
788 + * These offsets are into irq_stat.
789 + * (Find irq_cpustat_t in asm-sh/hardirq.h)
794 PT_TRACESYS = 0x00000002
795 PF_USEDFPU = 0x00100000
797 mov.l __INV_IMASK, r11; \
800 - stc k_g_imask, r11; \
801 + stc k_g_imask, r11; \
806 mov.l @(tsk_ptrace,r0), r0 ! Is current PTRACE_SYSCALL'd?
809 - bt ret_from_syscall
810 - bra syscall_ret_trace
811 + bf syscall_ret_trace
812 + bra ret_from_syscall
817 .long syscall_ret_trace
821 - .long 0xffffff0f ! ~(IMASK)
827 1: .long SYMBOL_NAME(schedule)
829 +#ifdef CONFIG_PREEMPT
831 + ! Returning from interrupt during kernel mode: check if
832 + ! preempt_schedule should be called. If need_resched flag
833 + ! is set, preempt_count is zero, and we're not currently
834 + ! in an interrupt handler (local irq or bottom half) then
835 + ! call preempt_schedule.
837 + ! Increment preempt_count to prevent a nested interrupt
838 + ! from reentering preempt_schedule, then decrement after
839 + ! and drop through to regular interrupt return which will
840 + ! jump back and check again in case such an interrupt did
841 + ! come in (and didn't preempt due to preempt_count).
843 + ! NOTE: because we just checked that preempt_count was
844 + ! zero before getting to the call, can't we use immediate
845 + ! values (1 and 0) rather than inc/dec? Also, rather than
846 + ! drop through to ret_from_irq, we already know this thread
847 + ! is kernel mode, can't we go direct to ret_from_kirq? In
848 + ! fact, with proper interrupt nesting and so forth could
849 + ! the loop simply be on the need_resched w/o checking the
850 + ! other stuff again? Optimize later...
854 + ! Nonzero preempt_count prevents scheduling
856 + mov.l @(preempt_count,r1), r0
859 + ! Zero need_resched prevents scheduling
860 + mov.l @(need_resched,r1), r0
863 + ! If in_interrupt(), don't schedule
864 + mov.l __irq_stat, r1
865 + mov.l @(local_irq_count,r1), r0
866 + mov.l @(local_bh_count,r1), r1
870 + ! Allow scheduling using preempt_schedule
871 + ! Adjust preempt_count and SR as needed.
873 + mov.l @(preempt_count,r1), r0 ! Could replace this ...
874 + add #1, r0 ! ... and this w/mov #1?
875 + mov.l r0, @(preempt_count,r1)
877 + mov.l __preempt_schedule, r0
886 + mov.l @(preempt_count,r1), r0 ! Could replace this ...
887 + add #-1, r0 ! ... and this w/mov #0?
888 + mov.l r0, @(preempt_count,r1)
889 + ! Maybe should bra ret_from_kirq, or loop over need_resched?
890 + ! For now, fall through to ret_from_irq again...
891 +#endif /* CONFIG_PREEMPT */
895 + mov.l @(r0,r15), r0 ! get status register
897 + shll r0 ! kernel space?
898 +#ifndef CONFIG_PREEMPT
899 + bt restore_all ! Yes, it's from kernel, go back soon
900 +#else /* CONFIG_PREEMPT */
901 + bt ret_from_kirq ! From kernel: maybe preempt_schedule
902 +#endif /* CONFIG_PREEMPT */
904 + bra ret_from_syscall
909 mov.l @(r0,r15), r0 ! get status register
911 .long SYMBOL_NAME(do_signal)
913 .long SYMBOL_NAME(irq_stat)
914 +#ifdef CONFIG_PREEMPT
916 + .long SYMBOL_NAME(preempt_schedule)
917 +#endif /* CONFIG_PREEMPT */
919 + .long 0xffffff0f ! ~(IMASK)
926 .long SYMBOL_NAME(fpu_prepare_fd)
928 - .long SYMBOL_NAME(init_task_union)+4
929 + .long SYMBOL_NAME(init_task_union)+flags
933 diff -urN linux-2.4.19-ac5/arch/sh/kernel/irq.c linux/arch/sh/kernel/irq.c
934 --- linux-2.4.19-ac5/arch/sh/kernel/irq.c Thu Aug 1 15:55:17 2002
935 +++ linux/arch/sh/kernel/irq.c Fri Aug 2 10:28:18 2002
937 struct irqaction * action;
941 + * At this point we're now about to actually call handlers,
942 + * and interrupts might get reenabled during them... bump
943 + * preempt_count to prevent any preemption while the handler
944 + * called here is pending...
949 asm volatile("stc r2_bank, %0\n\t"
952 desc->handler->end(irq);
953 spin_unlock(&desc->lock);
956 if (softirq_pending(cpu))
960 + * We're done with the handlers, interrupts should be
961 + * currently disabled; decrement preempt_count now so
962 + * as we return preemption may be allowed...
964 + preempt_enable_no_resched();
969 diff -urN linux-2.4.19-ac5/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c
970 --- linux-2.4.19-ac5/drivers/ieee1394/csr.c Thu Aug 1 15:55:03 2002
971 +++ linux/drivers/ieee1394/csr.c Fri Aug 2 10:28:18 2002
975 #include <linux/string.h>
976 +#include <linux/sched.h>
978 #include "ieee1394_types.h"
980 diff -urN linux-2.4.19-ac5/drivers/sound/sound_core.c linux/drivers/sound/sound_core.c
981 --- linux-2.4.19-ac5/drivers/sound/sound_core.c Thu Aug 1 15:54:53 2002
982 +++ linux/drivers/sound/sound_core.c Fri Aug 2 10:28:18 2002
984 #include <linux/config.h>
985 #include <linux/module.h>
986 #include <linux/init.h>
987 +#include <linux/sched.h>
988 #include <linux/slab.h>
989 #include <linux/types.h>
990 #include <linux/kernel.h>
991 diff -urN linux-2.4.19-ac5/fs/adfs/map.c linux/fs/adfs/map.c
992 --- linux-2.4.19-ac5/fs/adfs/map.c Thu Aug 1 15:54:33 2002
993 +++ linux/fs/adfs/map.c Fri Aug 2 10:28:19 2002
995 #include <linux/fs.h>
996 #include <linux/adfs_fs.h>
997 #include <linux/spinlock.h>
998 +#include <linux/sched.h>
1002 diff -urN linux-2.4.19-ac5/fs/exec.c linux/fs/exec.c
1003 --- linux-2.4.19-ac5/fs/exec.c Thu Aug 1 15:54:33 2002
1004 +++ linux/fs/exec.c Fri Aug 2 10:28:19 2002
1006 active_mm = current->active_mm;
1008 current->active_mm = mm;
1009 - task_unlock(current);
1010 activate_mm(active_mm, mm);
1011 + task_unlock(current);
1014 if (active_mm != old_mm) BUG();
1015 diff -urN linux-2.4.19-ac5/fs/fat/cache.c linux/fs/fat/cache.c
1016 --- linux-2.4.19-ac5/fs/fat/cache.c Thu Aug 1 15:54:33 2002
1017 +++ linux/fs/fat/cache.c Fri Aug 2 10:28:19 2002
1019 #include <linux/string.h>
1020 #include <linux/stat.h>
1021 #include <linux/fat_cvf.h>
1022 +#include <linux/sched.h>
1025 # define PRINTK(x) printk x
1026 diff -urN linux-2.4.19-ac5/fs/nls/nls_base.c linux/fs/nls/nls_base.c
1027 --- linux-2.4.19-ac5/fs/nls/nls_base.c Thu Aug 1 15:54:33 2002
1028 +++ linux/fs/nls/nls_base.c Fri Aug 2 10:28:19 2002
1031 #include <linux/kmod.h>
1033 +#include <linux/sched.h>
1034 #include <linux/spinlock.h>
1036 static struct nls_table *tables;
1037 diff -urN linux-2.4.19-ac5/include/asm-arm/dma.h linux/include/asm-arm/dma.h
1038 --- linux-2.4.19-ac5/include/asm-arm/dma.h Thu Aug 1 15:54:39 2002
1039 +++ linux/include/asm-arm/dma.h Fri Aug 2 10:28:19 2002
1042 #include <linux/config.h>
1043 #include <linux/spinlock.h>
1044 +#include <linux/sched.h>
1045 #include <asm/system.h>
1046 #include <asm/memory.h>
1047 #include <asm/scatterlist.h>
1048 diff -urN linux-2.4.19-ac5/include/asm-arm/hardirq.h linux/include/asm-arm/hardirq.h
1049 --- linux-2.4.19-ac5/include/asm-arm/hardirq.h Thu Aug 1 15:54:39 2002
1050 +++ linux/include/asm-arm/hardirq.h Fri Aug 2 10:28:19 2002
1052 #define irq_exit(cpu,irq) (local_irq_count(cpu)--)
1054 #define synchronize_irq() do { } while (0)
1055 +#define release_irqlock(cpu) do { } while (0)
1058 #error SMP not supported
1059 diff -urN linux-2.4.19-ac5/include/asm-arm/pgalloc.h linux/include/asm-arm/pgalloc.h
1060 --- linux-2.4.19-ac5/include/asm-arm/pgalloc.h Thu Aug 1 15:54:39 2002
1061 +++ linux/include/asm-arm/pgalloc.h Fri Aug 2 10:28:19 2002
1066 + preempt_disable();
1067 if ((ret = pgd_quicklist) != NULL) {
1068 pgd_quicklist = (unsigned long *)__pgd_next(ret);
1070 clean_dcache_entry(ret + 1);
1071 pgtable_cache_size--;
1074 return (pgd_t *)ret;
1077 static inline void free_pgd_fast(pgd_t *pgd)
1079 + preempt_disable();
1080 __pgd_next(pgd) = (unsigned long) pgd_quicklist;
1081 pgd_quicklist = (unsigned long *) pgd;
1082 pgtable_cache_size++;
1086 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
1090 + preempt_disable();
1091 if((ret = pte_quicklist) != NULL) {
1092 pte_quicklist = (unsigned long *)__pte_next(ret);
1094 clean_dcache_entry(ret);
1095 pgtable_cache_size--;
1098 return (pte_t *)ret;
1101 static inline void free_pte_fast(pte_t *pte)
1103 + preempt_disable();
1104 __pte_next(pte) = (unsigned long) pte_quicklist;
1105 pte_quicklist = (unsigned long *) pte;
1106 pgtable_cache_size++;
1110 #else /* CONFIG_NO_PGT_CACHE */
1111 diff -urN linux-2.4.19-ac5/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h
1112 --- linux-2.4.19-ac5/include/asm-arm/smplock.h Thu Aug 1 15:54:39 2002
1113 +++ linux/include/asm-arm/smplock.h Fri Aug 2 10:28:19 2002
1116 * Default SMP lock implementation
1118 +#include <linux/config.h>
1119 #include <linux/interrupt.h>
1120 #include <linux/spinlock.h>
1122 extern spinlock_t kernel_flag;
1124 +#ifdef CONFIG_PREEMPT
1125 +#define kernel_locked() preempt_get_count()
1127 #define kernel_locked() spin_is_locked(&kernel_flag)
1131 * Release global kernel lock and global interrupt lock
1134 static inline void lock_kernel(void)
1136 +#ifdef CONFIG_PREEMPT
1137 + if (current->lock_depth == -1)
1138 + spin_lock(&kernel_flag);
1139 + ++current->lock_depth;
1141 if (!++current->lock_depth)
1142 spin_lock(&kernel_flag);
1146 static inline void unlock_kernel(void)
1147 diff -urN linux-2.4.19-ac5/include/asm-arm/softirq.h linux/include/asm-arm/softirq.h
1148 --- linux-2.4.19-ac5/include/asm-arm/softirq.h Thu Aug 1 15:54:39 2002
1149 +++ linux/include/asm-arm/softirq.h Fri Aug 2 10:28:19 2002
1151 #include <asm/hardirq.h>
1153 #define __cpu_bh_enable(cpu) \
1154 - do { barrier(); local_bh_count(cpu)--; } while (0)
1155 + do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
1156 #define cpu_bh_disable(cpu) \
1157 - do { local_bh_count(cpu)++; barrier(); } while (0)
1158 + do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
1160 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
1161 #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
1163 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1165 -#define local_bh_enable() \
1166 +#define _local_bh_enable() \
1168 unsigned int *ptr = &local_bh_count(smp_processor_id()); \
1169 if (!--*ptr && ptr[-2]) \
1170 __asm__("bl%? __do_softirq": : : "lr");/* out of line */\
1173 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
1175 #endif /* __ASM_SOFTIRQ_H */
1176 diff -urN linux-2.4.19-ac5/include/asm-arm/system.h linux/include/asm-arm/system.h
1177 --- linux-2.4.19-ac5/include/asm-arm/system.h Thu Aug 1 15:54:39 2002
1178 +++ linux/include/asm-arm/system.h Fri Aug 2 10:32:41 2002
1180 #define local_irq_disable() __cli()
1181 #define local_irq_enable() __sti()
1183 +#define irqs_disabled() \
1185 + unsigned long cpsr_val; \
1186 + asm ("mrs %0, cpsr" : "=r" (cpsr_val)); \
1191 #error SMP not supported
1193 diff -urN linux-2.4.19-ac5/include/asm-i386/hardirq.h linux/include/asm-i386/hardirq.h
1194 --- linux-2.4.19-ac5/include/asm-i386/hardirq.h Thu Aug 1 15:54:34 2002
1195 +++ linux/include/asm-i386/hardirq.h Fri Aug 2 10:28:19 2002
1199 * Are we in an interrupt context? Either doing bottom half
1200 - * or hardware interrupt processing?
1201 + * or hardware interrupt processing? Note the preempt check,
1202 + * this is both a bugfix and an optimization. If we are
1203 + * preemptible, we cannot be in an interrupt.
1205 -#define in_interrupt() ({ int __cpu = smp_processor_id(); \
1206 - (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
1207 +#define in_interrupt() (preempt_is_disabled() && \
1208 + ({unsigned long __cpu = smp_processor_id(); \
1209 + (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }))
1211 -#define in_irq() (local_irq_count(smp_processor_id()) != 0)
1212 +#define in_irq() (preempt_is_disabled() && \
1213 + (local_irq_count(smp_processor_id()) != 0))
1219 #define synchronize_irq() barrier()
1221 +#define release_irqlock(cpu) do { } while (0)
1225 #include <asm/atomic.h>
1226 diff -urN linux-2.4.19-ac5/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h
1227 --- linux-2.4.19-ac5/include/asm-i386/highmem.h Thu Aug 1 15:54:35 2002
1228 +++ linux/include/asm-i386/highmem.h Fri Aug 2 10:28:19 2002
1230 enum fixed_addresses idx;
1231 unsigned long vaddr;
1233 + preempt_disable();
1234 if (page < highmem_start_page)
1235 return page_address(page);
1237 @@ -109,8 +110,10 @@
1238 unsigned long vaddr = (unsigned long) kvaddr;
1239 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
1241 - if (vaddr < FIXADDR_START) // FIXME
1242 + if (vaddr < FIXADDR_START) { // FIXME
1247 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
1250 pte_clear(kmap_pte-idx);
1251 __flush_tlb_one(vaddr);
1257 #endif /* __KERNEL__ */
1258 diff -urN linux-2.4.19-ac5/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h
1259 --- linux-2.4.19-ac5/include/asm-i386/hw_irq.h Thu Aug 1 15:54:34 2002
1260 +++ linux/include/asm-i386/hw_irq.h Fri Aug 2 10:28:19 2002
1263 #define STR(x) __STR(x)
1265 +#define GET_CURRENT \
1266 + "movl %esp, %ebx\n\t" \
1267 + "andl $-8192, %ebx\n\t"
1269 +#ifdef CONFIG_PREEMPT
1270 +#define BUMP_LOCK_COUNT \
1272 + "incl 4(%ebx)\n\t"
1274 +#define BUMP_LOCK_COUNT
1280 @@ -108,15 +120,12 @@
1282 "movl $" STR(__KERNEL_DS) ",%edx\n\t" \
1283 "movl %edx,%ds\n\t" \
1284 - "movl %edx,%es\n\t"
1285 + "movl %edx,%es\n\t" \
1288 #define IRQ_NAME2(nr) nr##_interrupt(void)
1289 #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
1291 -#define GET_CURRENT \
1292 - "movl %esp, %ebx\n\t" \
1293 - "andl $-8192, %ebx\n\t"
1296 * SMP has a few special interrupts for IPI messages
1298 diff -urN linux-2.4.19-ac5/include/asm-i386/i387.h linux/include/asm-i386/i387.h
1299 --- linux-2.4.19-ac5/include/asm-i386/i387.h Thu Aug 1 15:54:35 2002
1300 +++ linux/include/asm-i386/i387.h Fri Aug 2 10:28:19 2002
1302 #define __ASM_I386_I387_H
1304 #include <linux/sched.h>
1305 +#include <linux/spinlock.h>
1306 #include <asm/processor.h>
1307 #include <asm/sigcontext.h>
1308 #include <asm/user.h>
1310 extern void restore_fpu( struct task_struct *tsk );
1312 extern void kernel_fpu_begin(void);
1313 -#define kernel_fpu_end() stts()
1314 +#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
1317 #define unlazy_fpu( tsk ) do { \
1318 diff -urN linux-2.4.19-ac5/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h
1319 --- linux-2.4.19-ac5/include/asm-i386/pgalloc.h Thu Aug 1 15:54:35 2002
1320 +++ linux/include/asm-i386/pgalloc.h Fri Aug 2 10:28:19 2002
1325 + preempt_disable();
1326 if ((ret = pgd_quicklist) != NULL) {
1327 pgd_quicklist = (unsigned long *)(*ret);
1329 pgtable_cache_size--;
1334 ret = (unsigned long *)get_pgd_slow();
1336 return (pgd_t *)ret;
1339 static inline void free_pgd_fast(pgd_t *pgd)
1341 + preempt_disable();
1342 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
1343 pgd_quicklist = (unsigned long *) pgd;
1344 pgtable_cache_size++;
1348 static inline void free_pgd_slow(pgd_t *pgd)
1349 @@ -119,19 +125,23 @@
1353 + preempt_disable();
1354 if ((ret = (unsigned long *)pte_quicklist) != NULL) {
1355 pte_quicklist = (unsigned long *)(*ret);
1357 pgtable_cache_size--;
1360 return (pte_t *)ret;
1363 static inline void pte_free_fast(pte_t *pte)
1365 + preempt_disable();
1366 *(unsigned long *)pte = (unsigned long) pte_quicklist;
1367 pte_quicklist = (unsigned long *) pte;
1368 pgtable_cache_size++;
1372 static __inline__ void pte_free_slow(pte_t *pte)
1373 diff -urN linux-2.4.19-ac5/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h
1374 --- linux-2.4.19-ac5/include/asm-i386/smplock.h Thu Aug 1 15:54:34 2002
1375 +++ linux/include/asm-i386/smplock.h Fri Aug 2 10:28:19 2002
1377 extern spinlock_cacheline_t kernel_flag_cacheline;
1378 #define kernel_flag kernel_flag_cacheline.lock
1381 #define kernel_locked() spin_is_locked(&kernel_flag)
1383 +#ifdef CONFIG_PREEMPT
1384 +#define kernel_locked() preempt_get_count()
1386 +#define kernel_locked() 1
1391 * Release global kernel lock and global interrupt lock
1394 static __inline__ void lock_kernel(void)
1396 +#ifdef CONFIG_PREEMPT
1397 + if (current->lock_depth == -1)
1398 + spin_lock(&kernel_flag);
1399 + ++current->lock_depth;
1402 if (!++current->lock_depth)
1403 spin_lock(&kernel_flag);
1405 :"=m" (__dummy_lock(&kernel_flag)),
1406 "=m" (current->lock_depth));
1411 static __inline__ void unlock_kernel(void)
1412 diff -urN linux-2.4.19-ac5/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h
1413 --- linux-2.4.19-ac5/include/asm-i386/softirq.h Thu Aug 1 15:54:34 2002
1414 +++ linux/include/asm-i386/softirq.h Fri Aug 2 10:28:19 2002
1416 #include <asm/hardirq.h>
1418 #define __cpu_bh_enable(cpu) \
1419 - do { barrier(); local_bh_count(cpu)--; } while (0)
1420 + do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
1421 #define cpu_bh_disable(cpu) \
1422 - do { local_bh_count(cpu)++; barrier(); } while (0)
1423 + do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
1425 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
1426 #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
1428 * If you change the offsets in irq_stat then you have to
1429 * update this code as well.
1431 -#define local_bh_enable() \
1432 +#define _local_bh_enable() \
1434 unsigned int *ptr = &local_bh_count(smp_processor_id()); \
1437 /* no registers clobbered */ ); \
1440 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
1442 #endif /* __ASM_SOFTIRQ_H */
1443 diff -urN linux-2.4.19-ac5/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
1444 --- linux-2.4.19-ac5/include/asm-i386/spinlock.h Thu Aug 1 15:54:34 2002
1445 +++ linux/include/asm-i386/spinlock.h Fri Aug 2 10:28:19 2002
1447 :"=m" (lock->lock) : : "memory"
1450 -static inline void spin_unlock(spinlock_t *lock)
1451 +static inline void _raw_spin_unlock(spinlock_t *lock)
1454 if (lock->magic != SPINLOCK_MAGIC)
1456 :"=q" (oldval), "=m" (lock->lock) \
1457 :"0" (oldval) : "memory"
1459 -static inline void spin_unlock(spinlock_t *lock)
1460 +static inline void _raw_spin_unlock(spinlock_t *lock)
1468 -static inline int spin_trylock(spinlock_t *lock)
1469 +static inline int _raw_spin_trylock(spinlock_t *lock)
1472 __asm__ __volatile__(
1477 -static inline void spin_lock(spinlock_t *lock)
1478 +static inline void _raw_spin_lock(spinlock_t *lock)
1484 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
1486 -static inline void read_lock(rwlock_t *rw)
1487 +static inline void _raw_read_lock(rwlock_t *rw)
1490 if (rw->magic != RWLOCK_MAGIC)
1492 __build_read_lock(rw, "__read_lock_failed");
1495 -static inline void write_lock(rwlock_t *rw)
1496 +static inline void _raw_write_lock(rwlock_t *rw)
1499 if (rw->magic != RWLOCK_MAGIC)
1500 @@ -197,10 +197,10 @@
1501 __build_write_lock(rw, "__write_lock_failed");
1504 -#define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
1505 -#define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
1506 +#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
1507 +#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
1509 -static inline int write_trylock(rwlock_t *lock)
1510 +static inline int _raw_write_trylock(rwlock_t *lock)
1512 atomic_t *count = (atomic_t *)lock;
1513 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
1514 diff -urN linux-2.4.19-ac5/include/asm-i386/system.h linux/include/asm-i386/system.h
1515 --- linux-2.4.19-ac5/include/asm-i386/system.h Thu Aug 1 15:54:34 2002
1516 +++ linux/include/asm-i386/system.h Fri Aug 2 10:33:09 2002
1517 @@ -317,6 +317,13 @@
1518 /* used in the idle loop; sti takes one instruction cycle to complete */
1519 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
1521 +#define irqs_disabled() \
1523 + unsigned long flags; \
1524 + __save_flags(flags); \
1525 + !(flags & (1<<9)); \
1528 /* For spinlocks etc */
1529 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
1530 #define local_irq_restore(x) __restore_flags(x)
1531 diff -urN linux-2.4.19-ac5/include/asm-mips/smplock.h linux/include/asm-mips/smplock.h
1532 --- linux-2.4.19-ac5/include/asm-mips/smplock.h Thu Aug 1 15:54:36 2002
1533 +++ linux/include/asm-mips/smplock.h Fri Aug 2 10:28:44 2002
1536 * Default SMP lock implementation
1538 +#include <linux/config.h>
1539 #include <linux/interrupt.h>
1540 #include <linux/spinlock.h>
1542 extern spinlock_t kernel_flag;
1545 #define kernel_locked() spin_is_locked(&kernel_flag)
1547 +#ifdef CONFIG_PREEMPT
1548 +#define kernel_locked() preempt_get_count()
1550 +#define kernel_locked() 1
1555 * Release global kernel lock and global interrupt lock
1558 extern __inline__ void lock_kernel(void)
1560 +#ifdef CONFIG_PREEMPT
1561 + if (current->lock_depth == -1)
1562 + spin_lock(&kernel_flag);
1563 + ++current->lock_depth;
1565 if (!++current->lock_depth)
1566 spin_lock(&kernel_flag);
1570 extern __inline__ void unlock_kernel(void)
1571 diff -urN linux-2.4.19-ac5/include/asm-mips/softirq.h linux/include/asm-mips/softirq.h
1572 --- linux-2.4.19-ac5/include/asm-mips/softirq.h Thu Aug 1 15:54:36 2002
1573 +++ linux/include/asm-mips/softirq.h Fri Aug 2 10:28:44 2002
1576 static inline void cpu_bh_disable(int cpu)
1578 + preempt_disable();
1579 local_bh_count(cpu)++;
1585 local_bh_count(cpu)--;
1591 cpu = smp_processor_id(); \
1592 if (!--local_bh_count(cpu) && softirq_pending(cpu)) \
1594 + preempt_enable(); \
1597 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1598 diff -urN linux-2.4.19-ac5/include/asm-mips/system.h linux/include/asm-mips/system.h
1599 --- linux-2.4.19-ac5/include/asm-mips/system.h Thu Aug 1 15:54:35 2002
1600 +++ linux/include/asm-mips/system.h Fri Aug 2 10:33:43 2002
1601 @@ -285,4 +285,18 @@
1602 #define die_if_kernel(msg, regs) \
1603 __die_if_kernel(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__)
1605 +extern __inline__ int intr_on(void)
1607 + unsigned long flags;
1608 + save_flags(flags);
1612 +extern __inline__ int intr_off(void)
1614 + return ! intr_on();
1617 +#define irqs_disabled() intr_off()
1619 #endif /* _ASM_SYSTEM_H */
1620 diff -urN linux-2.4.19-ac5/include/asm-ppc/dma.h linux/include/asm-ppc/dma.h
1621 --- linux-2.4.19-ac5/include/asm-ppc/dma.h Thu Aug 1 15:54:37 2002
1622 +++ linux/include/asm-ppc/dma.h Fri Aug 2 10:29:00 2002
1624 #include <linux/config.h>
1626 #include <linux/spinlock.h>
1627 +#include <linux/sched.h>
1628 #include <asm/system.h>
1631 diff -urN linux-2.4.19-ac5/include/asm-ppc/hardirq.h linux/include/asm-ppc/hardirq.h
1632 --- linux-2.4.19-ac5/include/asm-ppc/hardirq.h Thu Aug 1 15:54:37 2002
1633 +++ linux/include/asm-ppc/hardirq.h Fri Aug 2 10:29:00 2002
1635 #define hardirq_exit(cpu) (local_irq_count(cpu)--)
1637 #define synchronize_irq() do { } while (0)
1638 +#define release_irqlock(cpu) do { } while (0)
1640 #else /* CONFIG_SMP */
1642 diff -urN linux-2.4.19-ac5/include/asm-ppc/highmem.h linux/include/asm-ppc/highmem.h
1643 --- linux-2.4.19-ac5/include/asm-ppc/highmem.h Thu Aug 1 15:54:38 2002
1644 +++ linux/include/asm-ppc/highmem.h Fri Aug 2 10:29:00 2002
1647 unsigned long vaddr;
1649 + preempt_disable();
1650 if (page < highmem_start_page)
1651 return page_address(page);
1653 @@ -105,8 +106,10 @@
1654 unsigned long vaddr = (unsigned long) kvaddr;
1655 unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
1657 - if (vaddr < KMAP_FIX_BEGIN) // FIXME
1658 + if (vaddr < KMAP_FIX_BEGIN) { // FIXME
1663 if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
1666 pte_clear(kmap_pte+idx);
1667 flush_tlb_page(0, vaddr);
1672 #endif /* __KERNEL__ */
1673 diff -urN linux-2.4.19-ac5/include/asm-ppc/hw_irq.h linux/include/asm-ppc/hw_irq.h
1674 --- linux-2.4.19-ac5/include/asm-ppc/hw_irq.h Thu Aug 1 15:54:38 2002
1675 +++ linux/include/asm-ppc/hw_irq.h Fri Aug 2 10:34:12 2002
1677 #define __save_flags(flags) __save_flags_ptr((unsigned long *)&flags)
1678 #define __save_and_cli(flags) ({__save_flags(flags);__cli();})
1680 +#define mfmsr() ({unsigned int rval; \
1681 + asm volatile("mfmsr %0" : "=r" (rval)); rval;})
1682 +#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
1684 +#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
1686 extern void do_lost_interrupts(unsigned long);
1688 #define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
1689 diff -urN linux-2.4.19-ac5/include/asm-ppc/mmu_context.h linux/include/asm-ppc/mmu_context.h
1690 --- linux-2.4.19-ac5/include/asm-ppc/mmu_context.h Thu Aug 1 15:54:37 2002
1691 +++ linux/include/asm-ppc/mmu_context.h Fri Aug 2 10:29:00 2002
1692 @@ -158,6 +158,10 @@
1693 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
1694 struct task_struct *tsk, int cpu)
1696 +#ifdef CONFIG_PREEMPT
1697 + if (preempt_get_count() == 0)
1700 tsk->thread.pgdir = next->pgd;
1701 get_mmu_context(next);
1702 set_context(next->context, next->pgd);
1703 diff -urN linux-2.4.19-ac5/include/asm-ppc/pgalloc.h linux/include/asm-ppc/pgalloc.h
1704 --- linux-2.4.19-ac5/include/asm-ppc/pgalloc.h Thu Aug 1 15:54:38 2002
1705 +++ linux/include/asm-ppc/pgalloc.h Fri Aug 2 10:29:00 2002
1710 + preempt_disable();
1711 if ((ret = pgd_quicklist) != NULL) {
1712 pgd_quicklist = (unsigned long *)(*ret);
1714 pgtable_cache_size--;
1718 ret = (unsigned long *)get_pgd_slow();
1719 return (pgd_t *)ret;
1722 extern __inline__ void free_pgd_fast(pgd_t *pgd)
1724 + preempt_disable();
1725 *(unsigned long **)pgd = pgd_quicklist;
1726 pgd_quicklist = (unsigned long *) pgd;
1727 pgtable_cache_size++;
1731 extern __inline__ void free_pgd_slow(pgd_t *pgd)
1732 @@ -120,19 +125,23 @@
1736 + preempt_disable();
1737 if ((ret = pte_quicklist) != NULL) {
1738 pte_quicklist = (unsigned long *)(*ret);
1740 pgtable_cache_size--;
1743 return (pte_t *)ret;
1746 extern __inline__ void pte_free_fast(pte_t *pte)
1748 + preempt_disable();
1749 *(unsigned long **)pte = pte_quicklist;
1750 pte_quicklist = (unsigned long *) pte;
1751 pgtable_cache_size++;
1755 extern __inline__ void pte_free_slow(pte_t *pte)
1756 diff -urN linux-2.4.19-ac5/include/asm-ppc/smplock.h linux/include/asm-ppc/smplock.h
1757 --- linux-2.4.19-ac5/include/asm-ppc/smplock.h Thu Aug 1 15:54:37 2002
1758 +++ linux/include/asm-ppc/smplock.h Fri Aug 2 10:29:00 2002
1761 extern spinlock_t kernel_flag;
1764 #define kernel_locked() spin_is_locked(&kernel_flag)
1766 +#ifdef CONFIG_PREEMPT
1767 +#define kernel_locked() preempt_get_count()
1769 +#define kernel_locked() 1
1774 * Release global kernel lock and global interrupt lock
1777 static __inline__ void lock_kernel(void)
1779 +#ifdef CONFIG_PREEMPT
1780 + if (current->lock_depth == -1)
1781 + spin_lock(&kernel_flag);
1782 + ++current->lock_depth;
1784 if (!++current->lock_depth)
1785 spin_lock(&kernel_flag);
1789 static __inline__ void unlock_kernel(void)
1790 diff -urN linux-2.4.19-ac5/include/asm-ppc/softirq.h linux/include/asm-ppc/softirq.h
1791 --- linux-2.4.19-ac5/include/asm-ppc/softirq.h Thu Aug 1 15:54:37 2002
1792 +++ linux/include/asm-ppc/softirq.h Fri Aug 2 10:29:00 2002
1795 #define local_bh_disable() \
1797 + preempt_disable(); \
1798 local_bh_count(smp_processor_id())++; \
1804 local_bh_count(smp_processor_id())--; \
1805 + preempt_enable(); \
1808 -#define local_bh_enable() \
1809 +#define _local_bh_enable() \
1811 if (!--local_bh_count(smp_processor_id()) \
1812 && softirq_pending(smp_processor_id())) { \
1817 +#define local_bh_enable() \
1819 + _local_bh_enable(); \
1820 + preempt_enable(); \
1823 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1825 #endif /* __ASM_SOFTIRQ_H */
1826 diff -urN linux-2.4.19-ac5/include/asm-sh/hardirq.h linux/include/asm-sh/hardirq.h
1827 --- linux-2.4.19-ac5/include/asm-sh/hardirq.h Thu Aug 1 15:54:40 2002
1828 +++ linux/include/asm-sh/hardirq.h Fri Aug 2 10:28:19 2002
1831 #define synchronize_irq() barrier()
1833 +#define release_irqlock(cpu) do { } while (0)
1837 #error Super-H SMP is not available
1838 diff -urN linux-2.4.19-ac5/include/asm-sh/smplock.h linux/include/asm-sh/smplock.h
1839 --- linux-2.4.19-ac5/include/asm-sh/smplock.h Thu Aug 1 15:54:40 2002
1840 +++ linux/include/asm-sh/smplock.h Fri Aug 2 10:28:20 2002
1843 #include <linux/config.h>
1847 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
1849 + * Should never happen, since linux/smp_lock.h catches this case;
1850 + * but in case this file is included directly with neither SMP nor
1851 + * PREEMPT configuration, provide same dummys as linux/smp_lock.h
1853 #define lock_kernel() do { } while(0)
1854 #define unlock_kernel() do { } while(0)
1855 -#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
1856 -#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
1857 +#define release_kernel_lock(task, cpu) do { } while(0)
1858 +#define reacquire_kernel_lock(task) do { } while(0)
1859 +#define kernel_locked() 1
1861 +#else /* CONFIG_SMP || CONFIG_PREEMPT */
1864 +#error "We do not support SMP on SH yet"
1867 + * Default SMP lock implementation (i.e. the i386 version)
1870 +#include <linux/interrupt.h>
1871 +#include <linux/spinlock.h>
1873 +extern spinlock_t kernel_flag;
1874 +#define lock_bkl() spin_lock(&kernel_flag)
1875 +#define unlock_bkl() spin_unlock(&kernel_flag)
1878 +#define kernel_locked() spin_is_locked(&kernel_flag)
1879 +#elif CONFIG_PREEMPT
1880 +#define kernel_locked() preempt_get_count()
1881 +#else /* neither */
1882 +#define kernel_locked() 1
1886 + * Release global kernel lock and global interrupt lock
1888 +#define release_kernel_lock(task, cpu) \
1890 + if (task->lock_depth >= 0) \
1891 + spin_unlock(&kernel_flag); \
1892 + release_irqlock(cpu); \
1897 + * Re-acquire the kernel lock
1899 +#define reacquire_kernel_lock(task) \
1901 + if (task->lock_depth >= 0) \
1902 + spin_lock(&kernel_flag); \
1906 + * Getting the big kernel lock.
1908 + * This cannot happen asynchronously,
1909 + * so we only need to worry about other
1912 +static __inline__ void lock_kernel(void)
1914 +#ifdef CONFIG_PREEMPT
1915 + if (current->lock_depth == -1)
1916 + spin_lock(&kernel_flag);
1917 + ++current->lock_depth;
1919 -#error "We do not support SMP on SH"
1920 -#endif /* CONFIG_SMP */
1921 + if (!++current->lock_depth)
1922 + spin_lock(&kernel_flag);
1926 +static __inline__ void unlock_kernel(void)
1928 + if (current->lock_depth < 0)
1930 + if (--current->lock_depth < 0)
1931 + spin_unlock(&kernel_flag);
1933 +#endif /* CONFIG_SMP || CONFIG_PREEMPT */
1935 #endif /* __ASM_SH_SMPLOCK_H */
1936 diff -urN linux-2.4.19-ac5/include/asm-sh/softirq.h linux/include/asm-sh/softirq.h
1937 --- linux-2.4.19-ac5/include/asm-sh/softirq.h Thu Aug 1 15:54:40 2002
1938 +++ linux/include/asm-sh/softirq.h Fri Aug 2 10:28:20 2002
1941 #define local_bh_disable() \
1943 + preempt_disable(); \
1944 local_bh_count(smp_processor_id())++; \
1950 local_bh_count(smp_processor_id())--; \
1951 + preempt_enable(); \
1954 #define local_bh_enable() \
1956 && softirq_pending(smp_processor_id())) { \
1959 + preempt_enable(); \
1962 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1963 diff -urN linux-2.4.19-ac5/include/linux/brlock.h linux/include/linux/brlock.h
1964 --- linux-2.4.19-ac5/include/linux/brlock.h Thu Aug 1 15:54:34 2002
1965 +++ linux/include/linux/brlock.h Fri Aug 2 10:28:20 2002
1966 @@ -171,11 +171,11 @@
1970 -# define br_read_lock(idx) ((void)(idx))
1971 -# define br_read_unlock(idx) ((void)(idx))
1972 -# define br_write_lock(idx) ((void)(idx))
1973 -# define br_write_unlock(idx) ((void)(idx))
1975 +# define br_read_lock(idx) ({ (void)(idx); preempt_disable(); })
1976 +# define br_read_unlock(idx) ({ (void)(idx); preempt_enable(); })
1977 +# define br_write_lock(idx) ({ (void)(idx); preempt_disable(); })
1978 +# define br_write_unlock(idx) ({ (void)(idx); preempt_enable(); })
1979 +#endif /* CONFIG_SMP */
1982 * Now enumerate all of the possible sw/hw IRQ protected
1983 diff -urN linux-2.4.19-ac5/include/linux/dcache.h linux/include/linux/dcache.h
1984 --- linux-2.4.19-ac5/include/linux/dcache.h Thu Aug 1 15:54:34 2002
1985 +++ linux/include/linux/dcache.h Fri Aug 2 10:28:20 2002
1986 @@ -126,31 +126,6 @@
1988 extern spinlock_t dcache_lock;
1991 - * d_drop - drop a dentry
1992 - * @dentry: dentry to drop
1994 - * d_drop() unhashes the entry from the parent
1995 - * dentry hashes, so that it won't be found through
1996 - * a VFS lookup any more. Note that this is different
1997 - * from deleting the dentry - d_delete will try to
1998 - * mark the dentry negative if possible, giving a
1999 - * successful _negative_ lookup, while d_drop will
2000 - * just make the cache lookup fail.
2002 - * d_drop() is used mainly for stuff that wants
2003 - * to invalidate a dentry for some reason (NFS
2004 - * timeouts or autofs deletes).
2007 -static __inline__ void d_drop(struct dentry * dentry)
2009 - spin_lock(&dcache_lock);
2010 - list_del(&dentry->d_hash);
2011 - INIT_LIST_HEAD(&dentry->d_hash);
2012 - spin_unlock(&dcache_lock);
2015 static __inline__ int dname_external(struct dentry *d)
2017 return d->d_name.name != d->d_iname;
2018 @@ -275,3 +250,34 @@
2019 #endif /* __KERNEL__ */
2021 #endif /* __LINUX_DCACHE_H */
2023 +#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2024 +#define __LINUX_DCACHE_H_INLINES
2028 + * d_drop - drop a dentry
2029 + * @dentry: dentry to drop
2031 + * d_drop() unhashes the entry from the parent
2032 + * dentry hashes, so that it won't be found through
2033 + * a VFS lookup any more. Note that this is different
2034 + * from deleting the dentry - d_delete will try to
2035 + * mark the dentry negative if possible, giving a
2036 + * successful _negative_ lookup, while d_drop will
2037 + * just make the cache lookup fail.
2039 + * d_drop() is used mainly for stuff that wants
2040 + * to invalidate a dentry for some reason (NFS
2041 + * timeouts or autofs deletes).
2044 +static __inline__ void d_drop(struct dentry * dentry)
2046 + spin_lock(&dcache_lock);
2047 + list_del(&dentry->d_hash);
2048 + INIT_LIST_HEAD(&dentry->d_hash);
2049 + spin_unlock(&dcache_lock);
2053 diff -urN linux-2.4.19-ac5/include/linux/fs_struct.h linux/include/linux/fs_struct.h
2054 --- linux-2.4.19-ac5/include/linux/fs_struct.h Thu Aug 1 15:54:34 2002
2055 +++ linux/include/linux/fs_struct.h Fri Aug 2 10:28:20 2002
2057 extern void exit_fs(struct task_struct *);
2058 extern void set_fs_altroot(void);
2060 +struct fs_struct *copy_fs_struct(struct fs_struct *old);
2061 +void put_fs_struct(struct fs_struct *fs);
2066 +#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2067 +#define _LINUX_FS_STRUCT_H_INLINES
2070 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
2071 * It can block. Requires the big lock held.
2077 -struct fs_struct *copy_fs_struct(struct fs_struct *old);
2078 -void put_fs_struct(struct fs_struct *fs);
2082 diff -urN linux-2.4.19-ac5/include/linux/sched.h linux/include/linux/sched.h
2083 --- linux-2.4.19-ac5/include/linux/sched.h Thu Aug 1 15:54:34 2002
2084 +++ linux/include/linux/sched.h Fri Aug 2 10:28:20 2002
2086 #define TASK_UNINTERRUPTIBLE 2
2087 #define TASK_ZOMBIE 4
2088 #define TASK_STOPPED 8
2089 +#define PREEMPT_ACTIVE 0x4000000
2091 #define __set_task_state(tsk, state_value) \
2092 do { (tsk)->state = (state_value); } while (0)
2094 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
2095 extern signed long FASTCALL(schedule_timeout(signed long timeout));
2096 asmlinkage void schedule(void);
2097 +#ifdef CONFIG_PREEMPT
2098 +asmlinkage void preempt_schedule(void);
2101 extern int schedule_task(struct tq_struct *task);
2102 extern void flush_scheduled_tasks(void);
2104 * offsets of these are hardcoded elsewhere - touch with care
2106 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
2107 - unsigned long flags; /* per process flags, defined below */
2108 + int preempt_count; /* 0 => preemptable, <0 => BUG */
2110 mm_segment_t addr_limit; /* thread address space:
2111 0-0xBFFFFFFF for user-thead
2113 struct mm_struct *active_mm;
2114 struct list_head local_pages;
2115 unsigned int allocation_order, nr_local_pages;
2116 + unsigned long flags;
2119 struct linux_binfmt *binfmt;
2120 @@ -944,6 +949,11 @@
2124 +#define _TASK_STRUCT_DEFINED
2125 +#include <linux/dcache.h>
2126 +#include <linux/tqueue.h>
2127 +#include <linux/fs_struct.h>
2129 #endif /* __KERNEL__ */
2132 diff -urN linux-2.4.19-ac5/include/linux/smp_lock.h linux/include/linux/smp_lock.h
2133 --- linux-2.4.19-ac5/include/linux/smp_lock.h Thu Aug 1 15:54:34 2002
2134 +++ linux/include/linux/smp_lock.h Fri Aug 2 10:28:20 2002
2137 #include <linux/config.h>
2140 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
2142 #define lock_kernel() do { } while(0)
2143 #define unlock_kernel() do { } while(0)
2144 diff -urN linux-2.4.19-ac5/include/linux/spinlock.h linux/include/linux/spinlock.h
2145 --- linux-2.4.19-ac5/include/linux/spinlock.h Thu Aug 1 15:54:34 2002
2146 +++ linux/include/linux/spinlock.h Fri Aug 2 10:28:20 2002
2148 #define __LINUX_SPINLOCK_H
2150 #include <linux/config.h>
2151 +#include <linux/compiler.h>
2154 * These are the generic versions of the spinlocks and read-write
2157 #if (DEBUG_SPINLOCKS < 1)
2159 +#ifndef CONFIG_PREEMPT
2160 #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
2161 #define ATOMIC_DEC_AND_LOCK
2165 * Your basic spinlocks, allowing only a single CPU anywhere
2169 #define spin_lock_init(lock) do { } while(0)
2170 -#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
2171 +#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */
2172 #define spin_is_locked(lock) (0)
2173 -#define spin_trylock(lock) ({1; })
2174 +#define _raw_spin_trylock(lock) ({1; })
2175 #define spin_unlock_wait(lock) do { } while(0)
2176 -#define spin_unlock(lock) do { } while(0)
2177 +#define _raw_spin_unlock(lock) do { } while(0)
2179 #elif (DEBUG_SPINLOCKS < 2)
2181 @@ -142,13 +145,78 @@
2184 #define rwlock_init(lock) do { } while(0)
2185 -#define read_lock(lock) (void)(lock) /* Not "unused variable". */
2186 -#define read_unlock(lock) do { } while(0)
2187 -#define write_lock(lock) (void)(lock) /* Not "unused variable". */
2188 -#define write_unlock(lock) do { } while(0)
2189 +#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */
2190 +#define _raw_read_unlock(lock) do { } while(0)
2191 +#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */
2192 +#define _raw_write_unlock(lock) do { } while(0)
2196 +#ifdef CONFIG_PREEMPT
2198 +#define preempt_get_count() (current->preempt_count)
2199 +#define preempt_is_disabled() (preempt_get_count() != 0)
2201 +#define preempt_disable() \
2203 + ++current->preempt_count; \
2207 +#define preempt_enable_no_resched() \
2209 + --current->preempt_count; \
2213 +#define preempt_enable() \
2215 + --current->preempt_count; \
2217 + if (unlikely(current->preempt_count < current->need_resched)) \
2218 + preempt_schedule(); \
2221 +#define spin_lock(lock) \
2223 + preempt_disable(); \
2224 + _raw_spin_lock(lock); \
2227 +#define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
2228 + 1 : ({preempt_enable(); 0;});})
2229 +#define spin_unlock(lock) \
2231 + _raw_spin_unlock(lock); \
2232 + preempt_enable(); \
2235 +#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
2236 +#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
2237 +#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
2238 +#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();})
2239 +#define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \
2240 + 1 : ({preempt_enable(); 0;});})
2244 +#define preempt_get_count() (0)
2245 +#define preempt_is_disabled() (1)
2246 +#define preempt_disable() do { } while (0)
2247 +#define preempt_enable_no_resched() do {} while(0)
2248 +#define preempt_enable() do { } while (0)
2250 +#define spin_lock(lock) _raw_spin_lock(lock)
2251 +#define spin_trylock(lock) _raw_spin_trylock(lock)
2252 +#define spin_unlock(lock) _raw_spin_unlock(lock)
2254 +#define read_lock(lock) _raw_read_lock(lock)
2255 +#define read_unlock(lock) _raw_read_unlock(lock)
2256 +#define write_lock(lock) _raw_write_lock(lock)
2257 +#define write_unlock(lock) _raw_write_unlock(lock)
2258 +#define write_trylock(lock) _raw_write_trylock(lock)
2261 /* "lock on reference count zero" */
2262 #ifndef ATOMIC_DEC_AND_LOCK
2263 #include <asm/atomic.h>
2264 diff -urN linux-2.4.19-ac5/include/linux/tqueue.h linux/include/linux/tqueue.h
2265 --- linux-2.4.19-ac5/include/linux/tqueue.h Thu Aug 1 15:54:34 2002
2266 +++ linux/include/linux/tqueue.h Fri Aug 2 10:28:20 2002
2268 extern spinlock_t tqueue_lock;
2271 + * Call all "bottom halfs" on a given list.
2274 +extern void __run_task_queue(task_queue *list);
2276 +static inline void run_task_queue(task_queue *list)
2278 + if (TQ_ACTIVE(*list))
2279 + __run_task_queue(list);
2282 +#endif /* _LINUX_TQUEUE_H */
2284 +#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2285 +#define _LINUX_TQUEUE_H_INLINES
2287 * Queue a task on a tq. Return non-zero if it was successfully
2290 @@ -109,17 +125,4 @@
2296 - * Call all "bottom halfs" on a given list.
2299 -extern void __run_task_queue(task_queue *list);
2301 -static inline void run_task_queue(task_queue *list)
2303 - if (TQ_ACTIVE(*list))
2304 - __run_task_queue(list);
2307 -#endif /* _LINUX_TQUEUE_H */
2309 diff -urN linux-2.4.19-ac5/kernel/exit.c linux/kernel/exit.c
2310 --- linux-2.4.19-ac5/kernel/exit.c Thu Aug 1 15:54:34 2002
2311 +++ linux/kernel/exit.c Fri Aug 2 10:28:20 2002
2313 /* more a memory barrier than a real lock */
2317 enter_lazy_tlb(mm, current, smp_processor_id());
2322 @@ -449,6 +449,11 @@
2323 tsk->flags |= PF_EXITING;
2324 del_timer_sync(&tsk->real_timer);
2326 + if (unlikely(preempt_get_count()))
2327 + printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
2328 + current->comm, current->pid,
2329 + preempt_get_count());
2332 #ifdef CONFIG_BSD_PROCESS_ACCT
2334 diff -urN linux-2.4.19-ac5/kernel/fork.c linux/kernel/fork.c
2335 --- linux-2.4.19-ac5/kernel/fork.c Thu Aug 1 15:54:34 2002
2336 +++ linux/kernel/fork.c Fri Aug 2 10:28:20 2002
2337 @@ -629,6 +629,13 @@
2338 if (p->binfmt && p->binfmt->module)
2339 __MOD_INC_USE_COUNT(p->binfmt->module);
2341 +#ifdef CONFIG_PREEMPT
2343 + * Continue with preemption disabled as part of the context
2344 + * switch, so start with preempt_count set to 1.
2346 + p->preempt_count = 1;
2350 p->state = TASK_UNINTERRUPTIBLE;
2351 diff -urN linux-2.4.19-ac5/kernel/ksyms.c linux/kernel/ksyms.c
2352 --- linux-2.4.19-ac5/kernel/ksyms.c Thu Aug 1 15:54:34 2002
2353 +++ linux/kernel/ksyms.c Fri Aug 2 10:28:20 2002
2355 EXPORT_SYMBOL(interruptible_sleep_on);
2356 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
2357 EXPORT_SYMBOL(schedule);
2358 +#ifdef CONFIG_PREEMPT
2359 +EXPORT_SYMBOL(preempt_schedule);
2361 EXPORT_SYMBOL(schedule_timeout);
2362 EXPORT_SYMBOL(sys_sched_yield);
2363 EXPORT_SYMBOL(jiffies);
2364 diff -urN linux-2.4.19-ac5/kernel/sched.c linux/kernel/sched.c
2365 --- linux-2.4.19-ac5/kernel/sched.c Thu Aug 1 15:54:34 2002
2366 +++ linux/kernel/sched.c Fri Aug 2 10:31:01 2002
2369 task_release_cpu(prev);
2371 - if (prev->state == TASK_RUNNING)
2372 + if (task_on_runqueue(prev))
2379 spin_lock_irqsave(&runqueue_lock, flags);
2380 - if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
2381 + if (task_on_runqueue(prev) && !task_has_cpu(prev))
2382 reschedule_idle(prev);
2383 spin_unlock_irqrestore(&runqueue_lock, flags);
2386 asmlinkage void schedule_tail(struct task_struct *prev)
2388 __schedule_tail(prev);
2393 @@ -553,9 +554,10 @@
2394 struct list_head *tmp;
2398 spin_lock_prefetch(&runqueue_lock);
2400 + preempt_disable();
2402 BUG_ON(!current->active_mm);
2405 @@ -583,6 +585,14 @@
2406 move_last_runqueue(prev);
2409 +#ifdef CONFIG_PREEMPT
2411 + * entering from preempt_schedule, off a kernel preemption,
2412 + * go straight to picking the next task.
2414 + if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
2415 + goto treat_like_run;
2417 switch (prev->state) {
2418 case TASK_INTERRUPTIBLE:
2419 if (signal_pending(prev)) {
2421 del_from_runqueue(prev);
2424 +#ifdef CONFIG_PREEMPT
2427 prev->need_resched = 0;
2430 @@ -701,9 +714,31 @@
2431 reacquire_kernel_lock(current);
2432 if (current->need_resched)
2433 goto need_resched_back;
2434 + preempt_enable_no_resched();
2438 +#ifdef CONFIG_PREEMPT
2440 + * this is is the entry point to schedule() from in-kernel preemption
2442 +asmlinkage void preempt_schedule(void)
2444 + if (unlikely(irqs_disabled()))
2448 + current->preempt_count += PREEMPT_ACTIVE;
2450 + current->preempt_count -= PREEMPT_ACTIVE;
2452 + /* we could miss a preemption opportunity between schedule and now */
2454 + if (unlikely(current->need_resched))
2455 + goto need_resched;
2457 +#endif /* CONFIG_PREEMPT */
2460 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
2461 * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
2462 @@ -1312,6 +1347,13 @@
2463 sched_data->curr = current;
2464 sched_data->last_schedule = get_cycles();
2465 clear_bit(current->processor, &wait_init_idle);
2466 +#ifdef CONFIG_PREEMPT
2468 + * fix up the preempt_count for non-CPU0 idle threads
2470 + if (current->processor)
2471 + current->preempt_count = 0;
2475 extern void init_timervecs (void);
2476 diff -urN linux-2.4.19-ac5/lib/dec_and_lock.c linux/lib/dec_and_lock.c
2477 --- linux-2.4.19-ac5/lib/dec_and_lock.c Thu Aug 1 15:54:34 2002
2478 +++ linux/lib/dec_and_lock.c Fri Aug 2 10:28:20 2002
2480 #include <linux/module.h>
2481 #include <linux/spinlock.h>
2482 +#include <linux/sched.h>
2483 #include <asm/atomic.h>
2486 diff -urN linux-2.4.19-ac5/mm/slab.c linux/mm/slab.c
2487 --- linux-2.4.19-ac5/mm/slab.c Thu Aug 1 15:54:34 2002
2488 +++ linux/mm/slab.c Fri Aug 2 10:28:21 2002
2490 * constructors and destructors are called without any locking.
2491 * Several members in kmem_cache_t and slab_t never change, they
2492 * are accessed without any locking.
2493 - * The per-cpu arrays are never accessed from the wrong cpu, no locking.
2494 + * The per-cpu arrays are never accessed from the wrong cpu, no locking,
2495 + * and local interrupts are disabled so slab code is preempt-safe.
2496 * The non-constant members are protected with a per-cache irq spinlock.
2498 * Further notes from the original documentation:
2499 diff -urN linux-2.4.19-ac5/net/core/dev.c linux/net/core/dev.c
2500 --- linux-2.4.19-ac5/net/core/dev.c Thu Aug 1 15:54:41 2002
2501 +++ linux/net/core/dev.c Fri Aug 2 10:28:21 2002
2502 @@ -1034,9 +1034,15 @@
2503 int cpu = smp_processor_id();
2505 if (dev->xmit_lock_owner != cpu) {
2507 + * The spin_lock effectivly does a preempt lock, but
2508 + * we are about to drop that...
2510 + preempt_disable();
2511 spin_unlock(&dev->queue_lock);
2512 spin_lock(&dev->xmit_lock);
2513 dev->xmit_lock_owner = cpu;
2516 if (!netif_queue_stopped(dev)) {
2518 diff -urN linux-2.4.19-ac5/net/core/skbuff.c linux/net/core/skbuff.c
2519 --- linux-2.4.19-ac5/net/core/skbuff.c Thu Aug 1 15:54:41 2002
2520 +++ linux/net/core/skbuff.c Fri Aug 2 10:28:21 2002
2521 @@ -111,33 +111,37 @@
2523 static __inline__ struct sk_buff *skb_head_from_pool(void)
2525 - struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
2526 + struct sk_buff_head *list;
2527 + struct sk_buff *skb = NULL;
2528 + unsigned long flags;
2530 - if (skb_queue_len(list)) {
2531 - struct sk_buff *skb;
2532 - unsigned long flags;
2533 + local_irq_save(flags);
2535 - local_irq_save(flags);
2536 + list = &skb_head_pool[smp_processor_id()].list;
2538 + if (skb_queue_len(list))
2539 skb = __skb_dequeue(list);
2540 - local_irq_restore(flags);
2545 + local_irq_restore(flags);
2549 static __inline__ void skb_head_to_pool(struct sk_buff *skb)
2551 - struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
2552 + struct sk_buff_head *list;
2553 + unsigned long flags;
2555 - if (skb_queue_len(list) < sysctl_hot_list_len) {
2556 - unsigned long flags;
2557 + local_irq_save(flags);
2558 + list = &skb_head_pool[smp_processor_id()].list;
2560 - local_irq_save(flags);
2561 + if (skb_queue_len(list) < sysctl_hot_list_len) {
2562 __skb_queue_head(list, skb);
2563 local_irq_restore(flags);
2568 + local_irq_restore(flags);
2569 kmem_cache_free(skbuff_head_cache, skb);
2572 diff -urN linux-2.4.19-ac5/net/socket.c linux/net/socket.c
2573 --- linux-2.4.19-ac5/net/socket.c Thu Aug 1 15:54:41 2002
2574 +++ linux/net/socket.c Fri Aug 2 10:28:21 2002
2577 static struct net_proto_family *net_families[NPROTO];
2580 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2581 static atomic_t net_family_lockct = ATOMIC_INIT(0);
2582 static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
2584 diff -urN linux-2.4.19-ac5/net/sunrpc/pmap_clnt.c linux/net/sunrpc/pmap_clnt.c
2585 --- linux-2.4.19-ac5/net/sunrpc/pmap_clnt.c Thu Aug 1 15:54:41 2002
2586 +++ linux/net/sunrpc/pmap_clnt.c Fri Aug 2 10:28:21 2002
2588 #include <linux/config.h>
2589 #include <linux/types.h>
2590 #include <linux/socket.h>
2591 +#include <linux/sched.h>
2592 #include <linux/kernel.h>
2593 #include <linux/errno.h>
2594 #include <linux/uio.h>