]> git.pld-linux.org Git - packages/kernel.git/blame - preempt-kernel-rml-2.4.19-rc5-3.patch
- added description of djurban's branch
[packages/kernel.git] / preempt-kernel-rml-2.4.19-rc5-3.patch
CommitLineData
23b17db7
JR
1diff -urN linux-2.4.19-ac5/CREDITS linux/CREDITS
2--- linux-2.4.19-ac5/CREDITS Thu Aug 1 15:54:34 2002
3+++ linux/CREDITS Fri Aug 2 10:28:17 2002
4@@ -996,8 +996,8 @@
5
6 N: Nigel Gamble
7 E: nigel@nrg.org
8-E: nigel@sgi.com
9 D: Interrupt-driven printer driver
10+D: Preemptible kernel
11 S: 120 Alley Way
12 S: Mountain View, California 94040
13 S: USA
14diff -urN linux-2.4.19-ac5/Documentation/Configure.help linux/Documentation/Configure.help
15--- linux-2.4.19-ac5/Documentation/Configure.help Thu Aug 1 15:55:22 2002
16+++ linux/Documentation/Configure.help Fri Aug 2 10:28:17 2002
17@@ -266,6 +266,17 @@
18 If you have a system with several CPUs, you do not need to say Y
19 here: the local APIC will be used automatically.
20
21+Preemptible Kernel
22+CONFIG_PREEMPT
23+ This option reduces the latency of the kernel when reacting to
24+ real-time or interactive events by allowing a low priority process to
25+ be preempted even if it is in kernel mode executing a system call.
26+ This allows applications to run more reliably even when the system is
27+ under load.
28+
29+ Say Y here if you are building a kernel for a desktop, embedded or
30+ real-time system. Say N if you are unsure.
31+
32 Kernel math emulation
33 CONFIG_MATH_EMULATION
34 Linux can emulate a math coprocessor (used for floating point
35diff -urN linux-2.4.19-ac5/Documentation/preempt-locking.txt linux/Documentation/preempt-locking.txt
36--- linux-2.4.19-ac5/Documentation/preempt-locking.txt Wed Dec 31 16:00:00 1969
37+++ linux/Documentation/preempt-locking.txt Fri Aug 2 10:28:18 2002
38@@ -0,0 +1,104 @@
39+ Proper Locking Under a Preemptible Kernel:
40+ Keeping Kernel Code Preempt-Safe
41+ Robert Love <rml@tech9.net>
42+ Last Updated: 22 Jan 2002
43+
44+
45+INTRODUCTION
46+
47+
48+A preemptible kernel creates new locking issues. The issues are the same as
49+those under SMP: concurrency and reentrancy. Thankfully, the Linux preemptible
50+kernel model leverages existing SMP locking mechanisms. Thus, the kernel
51+requires explicit additional locking for very few additional situations.
52+
53+This document is for all kernel hackers. Developing code in the kernel
54+requires protecting these situations.
55+
56+
57+RULE #1: Per-CPU data structures need explicit protection
58+
59+
60+Two similar problems arise. An example code snippet:
61+
62+ struct this_needs_locking tux[NR_CPUS];
63+ tux[smp_processor_id()] = some_value;
64+ /* task is preempted here... */
65+ something = tux[smp_processor_id()];
66+
67+First, since the data is per-CPU, it may not have explicit SMP locking, but
68+require it otherwise. Second, when a preempted task is finally rescheduled,
69+the previous value of smp_processor_id may not equal the current. You must
70+protect these situations by disabling preemption around them.
71+
72+
73+RULE #2: CPU state must be protected.
74+
75+
76+Under preemption, the state of the CPU must be protected. This is arch-
77+dependent, but includes CPU structures and state not preserved over a context
78+switch. For example, on x86, entering and exiting FPU mode is now a critical
79+section that must occur while preemption is disabled. Think what would happen
80+if the kernel is executing a floating-point instruction and is then preempted.
81+Remember, the kernel does not save FPU state except for user tasks. Therefore,
82+upon preemption, the FPU registers will be sold to the lowest bidder. Thus,
83+preemption must be disabled around such regions.
84+
85+Note, some FPU functions are already explicitly preempt safe. For example,
86+kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
87+However, math_state_restore must be called with preemption disabled.
88+
89+
90+RULE #3: Lock acquire and release must be performed by same task
91+
92+
93+A lock acquired in one task must be released by the same task. This
94+means you can't do oddball things like acquire a lock and go off to
95+play while another task releases it. If you want to do something
96+like this, acquire and release the task in the same code path and
97+have the caller wait on an event by the other task.
98+
99+
100+SOLUTION
101+
102+
103+Data protection under preemption is achieved by disabling preemption for the
104+duration of the critical region.
105+
106+preempt_enable() decrement the preempt counter
107+preempt_disable() increment the preempt counter
108+preempt_enable_no_resched() decrement, but do not immediately preempt
109+preempt_get_count() return the preempt counter
110+
111+The functions are nestable. In other words, you can call preempt_disable
112+n-times in a code path, and preemption will not be reenabled until the n-th
113+call to preempt_enable. The preempt statements define to nothing if
114+preemption is not enabled.
115+
116+Note that you do not need to explicitly prevent preemption if you are holding
117+any locks or interrupts are disabled, since preemption is implicitly disabled
118+in those cases.
119+
120+Example:
121+
122+ cpucache_t *cc; /* this is per-CPU */
123+ preempt_disable();
124+ cc = cc_data(searchp);
125+ if (cc && cc->avail) {
126+ __free_block(searchp, cc_entry(cc), cc->avail);
127+ cc->avail = 0;
128+ }
129+ preempt_enable();
130+ return 0;
131+
132+Notice how the preemption statements must encompass every reference of the
133+critical variables. Another example:
134+
135+ int buf[NR_CPUS];
136+ set_cpu_val(buf);
137+ if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n");
138+ spin_lock(&buf_lock);
139+ /* ... */
140+
141+This code is not preempt-safe, but see how easily we can fix it by simply
142+moving the spin_lock up two lines.
143diff -urN linux-2.4.19-ac5/MAINTAINERS linux/MAINTAINERS
144--- linux-2.4.19-ac5/MAINTAINERS Thu Aug 1 15:54:42 2002
145+++ linux/MAINTAINERS Fri Aug 2 10:28:18 2002
146@@ -1285,6 +1285,14 @@
147 M: mostrows@styx.uwaterloo.ca
148 S: Maintained
149
150+PREEMPTIBLE KERNEL
151+P: Robert M. Love
152+M: rml@tech9.net
153+L: linux-kernel@vger.kernel.org
154+L: kpreempt-tech@lists.sourceforge.net
155+W: http://tech9.net/rml/linux
156+S: Supported
157+
158 PROMISE DC4030 CACHING DISK CONTROLLER DRIVER
159 P: Peter Denison
160 M: promise@pnd-pc.demon.co.uk
161diff -urN linux-2.4.19-ac5/arch/arm/config.in linux/arch/arm/config.in
162--- linux-2.4.19-ac5/arch/arm/config.in Thu Aug 1 15:55:14 2002
163+++ linux/arch/arm/config.in Fri Aug 2 10:28:18 2002
164@@ -372,7 +372,7 @@
165 else
166 define_bool CONFIG_DISCONTIGMEM n
167 fi
168-
169+dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_CPU_32
170 endmenu
171
172 mainmenu_option next_comment
173diff -urN linux-2.4.19-ac5/arch/arm/kernel/entry-armv.S linux/arch/arm/kernel/entry-armv.S
174--- linux-2.4.19-ac5/arch/arm/kernel/entry-armv.S Thu Aug 1 15:55:14 2002
175+++ linux/arch/arm/kernel/entry-armv.S Fri Aug 2 10:28:18 2002
176@@ -697,6 +697,12 @@
177 add r4, sp, #S_SP
178 mov r6, lr
179 stmia r4, {r5, r6, r7, r8, r9} @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
180+#ifdef CONFIG_PREEMPT
181+ get_current_task r9
182+ ldr r8, [r9, #TSK_PREEMPT]
183+ add r8, r8, #1
184+ str r8, [r9, #TSK_PREEMPT]
185+#endif
186 1: get_irqnr_and_base r0, r6, r5, lr
187 movne r1, sp
188 @
189@@ -704,6 +710,25 @@
190 @
191 adrsvc ne, lr, 1b
192 bne do_IRQ
193+#ifdef CONFIG_PREEMPT
194+2: ldr r8, [r9, #TSK_PREEMPT]
195+ subs r8, r8, #1
196+ bne 3f
197+ ldr r7, [r9, #TSK_NEED_RESCHED]
198+ teq r7, #0
199+ beq 3f
200+ ldr r6, .LCirqstat
201+ ldr r0, [r6, #IRQSTAT_BH_COUNT]
202+ teq r0, #0
203+ bne 3f
204+ mov r0, #MODE_SVC
205+ msr cpsr_c, r0 @ enable interrupts
206+ bl SYMBOL_NAME(preempt_schedule)
207+ mov r0, #I_BIT | MODE_SVC
208+ msr cpsr_c, r0 @ disable interrupts
209+ b 2b
210+3: str r8, [r9, #TSK_PREEMPT]
211+#endif
212 ldr r0, [sp, #S_PSR] @ irqs are already disabled
213 msr spsr, r0
214 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
215@@ -761,6 +786,9 @@
216 .LCprocfns: .word SYMBOL_NAME(processor)
217 #endif
218 .LCfp: .word SYMBOL_NAME(fp_enter)
219+#ifdef CONFIG_PREEMPT
220+.LCirqstat: .word SYMBOL_NAME(irq_stat)
221+#endif
222
223 irq_prio_table
224
225@@ -801,6 +829,12 @@
226 stmdb r8, {sp, lr}^
227 alignment_trap r4, r7, __temp_irq
228 zero_fp
229+ get_current_task tsk
230+#ifdef CONFIG_PREEMPT
231+ ldr r0, [tsk, #TSK_PREEMPT]
232+ add r0, r0, #1
233+ str r0, [tsk, #TSK_PREEMPT]
234+#endif
235 1: get_irqnr_and_base r0, r6, r5, lr
236 movne r1, sp
237 adrsvc ne, lr, 1b
238@@ -808,8 +842,12 @@
239 @ routine called with r0 = irq number, r1 = struct pt_regs *
240 @
241 bne do_IRQ
242+#ifdef CONFIG_PREEMPT
243+ ldr r0, [tsk, #TSK_PREEMPT]
244+ sub r0, r0, #1
245+ str r0, [tsk, #TSK_PREEMPT]
246+#endif
247 mov why, #0
248- get_current_task tsk
249 b ret_to_user
250
251 .align 5
252diff -urN linux-2.4.19-ac5/arch/arm/tools/getconstants.c linux/arch/arm/tools/getconstants.c
253--- linux-2.4.19-ac5/arch/arm/tools/getconstants.c Thu Aug 1 15:55:16 2002
254+++ linux/arch/arm/tools/getconstants.c Fri Aug 2 10:28:18 2002
255@@ -13,6 +13,7 @@
256
257 #include <asm/pgtable.h>
258 #include <asm/uaccess.h>
259+#include <asm/hardirq.h>
260
261 /*
262 * Make sure that the compiler and target are compatible.
263@@ -39,6 +40,11 @@
264 DEFN("TSS_SAVE", OFF_TSK(thread.save));
265 DEFN("TSS_FPESAVE", OFF_TSK(thread.fpstate.soft.save));
266
267+#ifdef CONFIG_PREEMPT
268+DEFN("TSK_PREEMPT", OFF_TSK(preempt_count));
269+DEFN("IRQSTAT_BH_COUNT", (unsigned long)&(((irq_cpustat_t *)0)->__local_bh_count));
270+#endif
271+
272 #ifdef CONFIG_CPU_32
273 DEFN("TSS_DOMAIN", OFF_TSK(thread.domain));
274
275diff -urN linux-2.4.19-ac5/arch/i386/config.in linux/arch/i386/config.in
276--- linux-2.4.19-ac5/arch/i386/config.in Thu Aug 1 15:55:07 2002
277+++ linux/arch/i386/config.in Fri Aug 2 10:28:18 2002
278@@ -188,6 +188,7 @@
279 bool 'Math emulation' CONFIG_MATH_EMULATION
280 bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
281 bool 'Symmetric multi-processing support' CONFIG_SMP
282+bool 'Preemptible Kernel' CONFIG_PREEMPT
283 if [ "$CONFIG_SMP" != "y" ]; then
284 bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
285 dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
286@@ -201,9 +202,12 @@
287 bool 'Multiquad NUMA system' CONFIG_MULTIQUAD
288 fi
289
290-if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
291- define_bool CONFIG_HAVE_DEC_LOCK y
292+if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then
293+ if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then
294+ define_bool CONFIG_HAVE_DEC_LOCK y
295+ fi
296 fi
297+
298 endmenu
299
300 mainmenu_option next_comment
301diff -urN linux-2.4.19-ac5/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
302--- linux-2.4.19-ac5/arch/i386/kernel/entry.S Thu Aug 1 15:55:07 2002
303+++ linux/arch/i386/kernel/entry.S Fri Aug 2 10:28:18 2002
304@@ -71,7 +71,7 @@
305 * these are offsets into the task-struct.
306 */
307 state = 0
308-flags = 4
309+preempt_count = 4
310 sigpending = 8
311 addr_limit = 12
312 exec_domain = 16
313@@ -79,8 +79,28 @@
314 tsk_ptrace = 24
315 processor = 52
316
317+/* These are offsets into the irq_stat structure
318+ * There is one per cpu and it is aligned to 32
319+ * byte boundry (we put that here as a shift count)
320+ */
321+irq_array_shift = CONFIG_X86_L1_CACHE_SHIFT
322+
323+irq_stat_local_irq_count = 4
324+irq_stat_local_bh_count = 8
325+
326 ENOSYS = 38
327
328+#ifdef CONFIG_SMP
329+#define GET_CPU_INDX movl processor(%ebx),%eax; \
330+ shll $irq_array_shift,%eax
331+#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \
332+ GET_CPU_INDX
333+#define CPU_INDX (,%eax)
334+#else
335+#define GET_CPU_INDX
336+#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx)
337+#define CPU_INDX
338+#endif
339
340 #define SAVE_ALL \
341 cld; \
342@@ -247,12 +267,30 @@
343 ALIGN
344 ENTRY(ret_from_intr)
345 GET_CURRENT(%ebx)
346+#ifdef CONFIG_PREEMPT
347+ cli
348+ decl preempt_count(%ebx)
349+#endif
350 ret_from_exception:
351 movl EFLAGS(%esp),%eax # mix EFLAGS and CS
352 movb CS(%esp),%al
353 testl $(VM_MASK | 3),%eax # return to VM86 mode or non-supervisor?
354 jne ret_from_sys_call
355+#ifdef CONFIG_PREEMPT
356+ cmpl $0,preempt_count(%ebx)
357+ jnz restore_all
358+ cmpl $0,need_resched(%ebx)
359+ jz restore_all
360+ movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
361+ addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
362+ jnz restore_all
363+ incl preempt_count(%ebx)
364+ sti
365+ call SYMBOL_NAME(preempt_schedule)
366+ jmp ret_from_intr
367+#else
368 jmp restore_all
369+#endif
370
371 ALIGN
372 reschedule:
373@@ -289,6 +327,9 @@
374 GET_CURRENT(%ebx)
375 call *%edi
376 addl $8,%esp
377+#ifdef CONFIG_PREEMPT
378+ cli
379+#endif
380 jmp ret_from_exception
381
382 ENTRY(coprocessor_error)
383@@ -308,12 +349,18 @@
384 movl %cr0,%eax
385 testl $0x4,%eax # EM (math emulation bit)
386 jne device_not_available_emulate
387+#ifdef CONFIG_PREEMPT
388+ cli
389+#endif
390 call SYMBOL_NAME(math_state_restore)
391 jmp ret_from_exception
392 device_not_available_emulate:
393 pushl $0 # temporary storage for ORIG_EIP
394 call SYMBOL_NAME(math_emulate)
395 addl $4,%esp
396+#ifdef CONFIG_PREEMPT
397+ cli
398+#endif
399 jmp ret_from_exception
400
401 ENTRY(debug)
402diff -urN linux-2.4.19-ac5/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c
403--- linux-2.4.19-ac5/arch/i386/kernel/i387.c Thu Aug 1 15:55:08 2002
404+++ linux/arch/i386/kernel/i387.c Fri Aug 2 10:28:18 2002
405@@ -10,6 +10,7 @@
406
407 #include <linux/config.h>
408 #include <linux/sched.h>
409+#include <linux/spinlock.h>
410 #include <linux/init.h>
411 #include <asm/processor.h>
412 #include <asm/i387.h>
413@@ -89,6 +90,8 @@
414 {
415 struct task_struct *tsk = current;
416
417+ preempt_disable();
418+
419 if (tsk->flags & PF_USEDFPU) {
420 __save_init_fpu(tsk);
421 return;
422diff -urN linux-2.4.19-ac5/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c
423--- linux-2.4.19-ac5/arch/i386/kernel/smp.c Thu Aug 1 15:55:08 2002
424+++ linux/arch/i386/kernel/smp.c Fri Aug 2 10:28:18 2002
425@@ -357,10 +357,13 @@
426
427 asmlinkage void smp_invalidate_interrupt (void)
428 {
429- unsigned long cpu = smp_processor_id();
430+ unsigned long cpu;
431+
432+ preempt_disable();
433
434+ cpu = smp_processor_id();
435 if (!test_bit(cpu, &flush_cpumask))
436- return;
437+ goto out;
438 /*
439 * This was a BUG() but until someone can quote me the
440 * line from the intel manual that guarantees an IPI to
441@@ -381,6 +384,8 @@
442 }
443 ack_APIC_irq();
444 clear_bit(cpu, &flush_cpumask);
445+out:
446+ preempt_enable();
447 }
448
449 static void flush_tlb_others (unsigned long cpumask, struct mm_struct *mm,
450@@ -430,17 +435,22 @@
451 void flush_tlb_current_task(void)
452 {
453 struct mm_struct *mm = current->mm;
454- unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
455+ unsigned long cpu_mask;
456
457+ preempt_disable();
458+ cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
459 local_flush_tlb();
460 if (cpu_mask)
461 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
462+ preempt_enable();
463 }
464
465 void flush_tlb_mm (struct mm_struct * mm)
466 {
467- unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
468+ unsigned long cpu_mask;
469
470+ preempt_disable();
471+ cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
472 if (current->active_mm == mm) {
473 if (current->mm)
474 local_flush_tlb();
475@@ -449,13 +459,16 @@
476 }
477 if (cpu_mask)
478 flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
479+ preempt_enable();
480 }
481
482 void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
483 {
484 struct mm_struct *mm = vma->vm_mm;
485- unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
486+ unsigned long cpu_mask;
487
488+ preempt_disable();
489+ cpu_mask = mm->cpu_vm_mask & ~(1UL << smp_processor_id());
490 if (current->active_mm == mm) {
491 if(current->mm)
492 __flush_tlb_one(va);
493@@ -465,6 +478,7 @@
494
495 if (cpu_mask)
496 flush_tlb_others(cpu_mask, mm, va);
497+ preempt_enable();
498 }
499
500 static inline void do_flush_tlb_all_local(void)
501diff -urN linux-2.4.19-ac5/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
502--- linux-2.4.19-ac5/arch/i386/kernel/traps.c Thu Aug 1 15:55:07 2002
503+++ linux/arch/i386/kernel/traps.c Fri Aug 2 10:28:18 2002
504@@ -733,6 +733,8 @@
505 *
506 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
507 * Don't touch unless you *really* know how it works.
508+ *
509+ * Must be called with kernel preemption disabled.
510 */
511 asmlinkage void math_state_restore(struct pt_regs regs)
512 {
513diff -urN linux-2.4.19-ac5/arch/i386/lib/dec_and_lock.c linux/arch/i386/lib/dec_and_lock.c
514--- linux-2.4.19-ac5/arch/i386/lib/dec_and_lock.c Thu Aug 1 15:55:07 2002
515+++ linux/arch/i386/lib/dec_and_lock.c Fri Aug 2 10:28:18 2002
516@@ -8,6 +8,7 @@
517 */
518
519 #include <linux/spinlock.h>
520+#include <linux/sched.h>
521 #include <asm/atomic.h>
522
523 int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
524diff -urN linux-2.4.19-ac5/arch/mips/config.in linux/arch/mips/config.in
525--- linux-2.4.19-ac5/arch/mips/config.in Thu Aug 1 15:55:08 2002
526+++ linux/arch/mips/config.in Fri Aug 2 10:28:43 2002
527@@ -508,6 +508,7 @@
528 if [ "$CONFIG_SCSI" != "n" ]; then
529 source drivers/scsi/Config.in
530 fi
531+dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_NEW_IRQ
532 endmenu
533
534 if [ "$CONFIG_PCI" = "y" ]; then
535diff -urN linux-2.4.19-ac5/arch/mips/kernel/i8259.c linux/arch/mips/kernel/i8259.c
536--- linux-2.4.19-ac5/arch/mips/kernel/i8259.c Thu Aug 1 15:55:09 2002
537+++ linux/arch/mips/kernel/i8259.c Fri Aug 2 10:28:43 2002
538@@ -8,6 +8,7 @@
539 * Copyright (C) 1992 Linus Torvalds
540 * Copyright (C) 1994 - 2000 Ralf Baechle
541 */
542+#include <linux/sched.h>
543 #include <linux/delay.h>
544 #include <linux/init.h>
545 #include <linux/ioport.h>
546diff -urN linux-2.4.19-ac5/arch/mips/kernel/irq.c linux/arch/mips/kernel/irq.c
547--- linux-2.4.19-ac5/arch/mips/kernel/irq.c Thu Aug 1 15:55:09 2002
548+++ linux/arch/mips/kernel/irq.c Fri Aug 2 10:28:43 2002
549@@ -8,6 +8,8 @@
550 * Copyright (C) 1992 Linus Torvalds
551 * Copyright (C) 1994 - 2000 Ralf Baechle
552 */
553+
554+#include <linux/sched.h>
555 #include <linux/config.h>
556 #include <linux/kernel.h>
557 #include <linux/delay.h>
558@@ -19,11 +21,13 @@
559 #include <linux/slab.h>
560 #include <linux/mm.h>
561 #include <linux/random.h>
562-#include <linux/sched.h>
563+#include <linux/spinlock.h>
564+#include <linux/ptrace.h>
565
566 #include <asm/atomic.h>
567 #include <asm/system.h>
568 #include <asm/uaccess.h>
569+#include <asm/debug.h>
570
571 /*
572 * Controller mappings for all interrupt sources:
573@@ -427,6 +431,8 @@
574 struct irqaction * action;
575 unsigned int status;
576
577+ preempt_disable();
578+
579 kstat.irqs[cpu][irq]++;
580 spin_lock(&desc->lock);
581 desc->handler->ack(irq);
582@@ -488,6 +494,27 @@
583
584 if (softirq_pending(cpu))
585 do_softirq();
586+
587+#if defined(CONFIG_PREEMPT)
588+ while (--current->preempt_count == 0) {
589+ db_assert(intr_off());
590+ db_assert(!in_interrupt());
591+
592+ if (current->need_resched == 0) {
593+ break;
594+ }
595+
596+ current->preempt_count ++;
597+ sti();
598+ if (user_mode(regs)) {
599+ schedule();
600+ } else {
601+ preempt_schedule();
602+ }
603+ cli();
604+ }
605+#endif
606+
607 return 1;
608 }
609
610diff -urN linux-2.4.19-ac5/arch/mips/mm/extable.c linux/arch/mips/mm/extable.c
611--- linux-2.4.19-ac5/arch/mips/mm/extable.c Thu Aug 1 15:55:09 2002
612+++ linux/arch/mips/mm/extable.c Fri Aug 2 10:28:43 2002
613@@ -3,6 +3,7 @@
614 */
615 #include <linux/config.h>
616 #include <linux/module.h>
617+#include <linux/sched.h>
618 #include <linux/spinlock.h>
619 #include <asm/uaccess.h>
620
621diff -urN linux-2.4.19-ac5/arch/ppc/config.in linux/arch/ppc/config.in
622--- linux-2.4.19-ac5/arch/ppc/config.in Thu Aug 1 15:55:12 2002
623+++ linux/arch/ppc/config.in Fri Aug 2 10:29:00 2002
624@@ -109,6 +109,8 @@
625 bool ' Distribute interrupts on all CPUs by default' CONFIG_IRQ_ALL_CPUS
626 fi
627
628+bool 'Preemptible kernel support' CONFIG_PREEMPT
629+
630 if [ "$CONFIG_6xx" = "y" -a "$CONFIG_8260" = "n" ];then
631 bool 'AltiVec Support' CONFIG_ALTIVEC
632 bool 'Thermal Management Support' CONFIG_TAU
633diff -urN linux-2.4.19-ac5/arch/ppc/kernel/entry.S linux/arch/ppc/kernel/entry.S
634--- linux-2.4.19-ac5/arch/ppc/kernel/entry.S Thu Aug 1 15:55:12 2002
635+++ linux/arch/ppc/kernel/entry.S Fri Aug 2 10:29:00 2002
636@@ -277,6 +277,41 @@
637 */
638 cmpi 0,r3,0
639 beq restore
640+#ifdef CONFIG_PREEMPT
641+ lwz r3,PREEMPT_COUNT(r2)
642+ cmpi 0,r3,1
643+ bge ret_from_except
644+ lwz r5,_MSR(r1)
645+ andi. r5,r5,MSR_PR
646+ bne do_signal_ret
647+ lwz r5,NEED_RESCHED(r2)
648+ cmpi 0,r5,0
649+ beq ret_from_except
650+ lis r3,irq_stat@h
651+ ori r3,r3,irq_stat@l
652+ lwz r5,4(r3)
653+ lwz r3,8(r3)
654+ add r3,r3,r5
655+ cmpi 0,r3,0
656+ bne ret_from_except
657+ lwz r3,PREEMPT_COUNT(r2)
658+ addi r3,r3,1
659+ stw r3,PREEMPT_COUNT(r2)
660+ mfmsr r0
661+ ori r0,r0,MSR_EE
662+ mtmsr r0
663+ sync
664+ bl preempt_schedule
665+ mfmsr r0
666+ rlwinm r0,r0,0,17,15
667+ mtmsr r0
668+ sync
669+ lwz r3,PREEMPT_COUNT(r2)
670+ subi r3,r3,1
671+ stw r3,PREEMPT_COUNT(r2)
672+ li r3,1
673+ b ret_from_intercept
674+#endif /* CONFIG_PREEMPT */
675 .globl ret_from_except
676 ret_from_except:
677 lwz r3,_MSR(r1) /* Returning to user mode? */
678diff -urN linux-2.4.19-ac5/arch/ppc/kernel/irq.c linux/arch/ppc/kernel/irq.c
679--- linux-2.4.19-ac5/arch/ppc/kernel/irq.c Thu Aug 1 15:55:11 2002
680+++ linux/arch/ppc/kernel/irq.c Fri Aug 2 10:29:00 2002
681@@ -568,6 +568,34 @@
682 return 1; /* lets ret_from_int know we can do checks */
683 }
684
685+#ifdef CONFIG_PREEMPT
686+int
687+preempt_intercept(struct pt_regs *regs)
688+{
689+ int ret;
690+
691+ preempt_disable();
692+
693+ switch(regs->trap) {
694+ case 0x500:
695+ ret = do_IRQ(regs);
696+ break;
697+#ifndef CONFIG_4xx
698+ case 0x900:
699+#else
700+ case 0x1000:
701+#endif
702+ ret = timer_interrupt(regs);
703+ break;
704+ default:
705+ BUG();
706+ }
707+
708+ preempt_enable();
709+ return ret;
710+}
711+#endif /* CONFIG_PREEMPT */
712+
713 unsigned long probe_irq_on (void)
714 {
715 return 0;
716diff -urN linux-2.4.19-ac5/arch/ppc/kernel/mk_defs.c linux/arch/ppc/kernel/mk_defs.c
717--- linux-2.4.19-ac5/arch/ppc/kernel/mk_defs.c Thu Aug 1 15:55:11 2002
718+++ linux/arch/ppc/kernel/mk_defs.c Fri Aug 2 10:29:00 2002
719@@ -42,6 +42,9 @@
720 DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
721 DEFINE(THREAD, offsetof(struct task_struct, thread));
722 DEFINE(MM, offsetof(struct task_struct, mm));
723+#ifdef CONFIG_PREEMPT
724+ DEFINE(PREEMPT_COUNT, offsetof(struct task_struct, preempt_count));
725+#endif
726 DEFINE(ACTIVE_MM, offsetof(struct task_struct, active_mm));
727 DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
728 DEFINE(KSP, offsetof(struct thread_struct, ksp));
729diff -urN linux-2.4.19-ac5/arch/ppc/kernel/setup.c linux/arch/ppc/kernel/setup.c
730--- linux-2.4.19-ac5/arch/ppc/kernel/setup.c Thu Aug 1 15:55:11 2002
731+++ linux/arch/ppc/kernel/setup.c Fri Aug 2 10:29:00 2002
732@@ -504,6 +504,20 @@
733
734 parse_bootinfo();
735
736+#ifdef CONFIG_PREEMPT
737+ /* Override the irq routines for external & timer interrupts here,
738+ * as the MMU has only been minimally setup at this point and
739+ * there are no protections on page zero.
740+ */
741+ {
742+ extern int preempt_intercept(struct pt_regs *);
743+
744+ do_IRQ_intercept = (unsigned long) &preempt_intercept;
745+ timer_interrupt_intercept = (unsigned long) &preempt_intercept;
746+
747+ }
748+#endif /* CONFIG_PREEMPT */
749+
750 platform_init(r3, r4, r5, r6, r7);
751
752 if (ppc_md.progress)
753diff -urN linux-2.4.19-ac5/arch/ppc/lib/dec_and_lock.c linux/arch/ppc/lib/dec_and_lock.c
754--- linux-2.4.19-ac5/arch/ppc/lib/dec_and_lock.c Thu Aug 1 15:55:12 2002
755+++ linux/arch/ppc/lib/dec_and_lock.c Fri Aug 2 10:29:00 2002
756@@ -1,4 +1,5 @@
757 #include <linux/module.h>
758+#include <linux/sched.h>
759 #include <linux/spinlock.h>
760 #include <asm/atomic.h>
761 #include <asm/system.h>
762diff -urN linux-2.4.19-ac5/arch/sh/config.in linux/arch/sh/config.in
763--- linux-2.4.19-ac5/arch/sh/config.in Thu Aug 1 15:55:17 2002
764+++ linux/arch/sh/config.in Fri Aug 2 10:28:18 2002
765@@ -124,6 +124,7 @@
766 hex 'Physical memory start address' CONFIG_MEMORY_START 08000000
767 hex 'Physical memory size' CONFIG_MEMORY_SIZE 00400000
768 fi
769+bool 'Preemptible Kernel' CONFIG_PREEMPT
770 endmenu
771
772 if [ "$CONFIG_SH_HP690" = "y" ]; then
773diff -urN linux-2.4.19-ac5/arch/sh/kernel/entry.S linux/arch/sh/kernel/entry.S
774--- linux-2.4.19-ac5/arch/sh/kernel/entry.S Thu Aug 1 15:55:17 2002
775+++ linux/arch/sh/kernel/entry.S Fri Aug 2 10:28:18 2002
776@@ -60,10 +60,18 @@
777 /*
778 * These are offsets into the task-struct.
779 */
780-flags = 4
781+preempt_count = 4
782 sigpending = 8
783 need_resched = 20
784 tsk_ptrace = 24
785+flags = 84
786+
787+/*
788+ * These offsets are into irq_stat.
789+ * (Find irq_cpustat_t in asm-sh/hardirq.h)
790+ */
791+local_irq_count = 8
792+local_bh_count = 12
793
794 PT_TRACESYS = 0x00000002
795 PF_USEDFPU = 0x00100000
796@@ -143,7 +151,7 @@
797 mov.l __INV_IMASK, r11; \
798 stc sr, r10; \
799 and r11, r10; \
800- stc k_g_imask, r11; \
801+ stc k_g_imask, r11; \
802 or r11, r10; \
803 ldc r10, sr
804
805@@ -304,8 +312,8 @@
806 mov.l @(tsk_ptrace,r0), r0 ! Is current PTRACE_SYSCALL'd?
807 mov #PT_TRACESYS, r1
808 tst r1, r0
809- bt ret_from_syscall
810- bra syscall_ret_trace
811+ bf syscall_ret_trace
812+ bra ret_from_syscall
813 nop
814
815 .align 2
816@@ -505,8 +513,6 @@
817 .long syscall_ret_trace
818 __syscall_ret:
819 .long syscall_ret
820-__INV_IMASK:
821- .long 0xffffff0f ! ~(IMASK)
822
823
824 .align 2
825@@ -518,7 +524,84 @@
826 .align 2
827 1: .long SYMBOL_NAME(schedule)
828
829+#ifdef CONFIG_PREEMPT
830+ !
831+ ! Returning from interrupt during kernel mode: check if
832+ ! preempt_schedule should be called. If need_resched flag
833+ ! is set, preempt_count is zero, and we're not currently
834+ ! in an interrupt handler (local irq or bottom half) then
835+ ! call preempt_schedule.
836+ !
837+ ! Increment preempt_count to prevent a nested interrupt
838+ ! from reentering preempt_schedule, then decrement after
839+ ! and drop through to regular interrupt return which will
840+ ! jump back and check again in case such an interrupt did
841+ ! come in (and didn't preempt due to preempt_count).
842+ !
843+ ! NOTE: because we just checked that preempt_count was
844+ ! zero before getting to the call, can't we use immediate
845+ ! values (1 and 0) rather than inc/dec? Also, rather than
846+ ! drop through to ret_from_irq, we already know this thread
847+ ! is kernel mode, can't we go direct to ret_from_kirq? In
848+ ! fact, with proper interrupt nesting and so forth could
849+ ! the loop simply be on the need_resched w/o checking the
850+ ! other stuff again? Optimize later...
851+ !
852+ .align 2
853+ret_from_kirq:
854+ ! Nonzero preempt_count prevents scheduling
855+ stc k_current, r1
856+ mov.l @(preempt_count,r1), r0
857+ cmp/eq #0, r0
858+ bf restore_all
859+ ! Zero need_resched prevents scheduling
860+ mov.l @(need_resched,r1), r0
861+ cmp/eq #0, r0
862+ bt restore_all
863+ ! If in_interrupt(), don't schedule
864+ mov.l __irq_stat, r1
865+ mov.l @(local_irq_count,r1), r0
866+ mov.l @(local_bh_count,r1), r1
867+ or r1, r0
868+ cmp/eq #0, r0
869+ bf restore_all
870+ ! Allow scheduling using preempt_schedule
871+ ! Adjust preempt_count and SR as needed.
872+ stc k_current, r1
873+ mov.l @(preempt_count,r1), r0 ! Could replace this ...
874+ add #1, r0 ! ... and this w/mov #1?
875+ mov.l r0, @(preempt_count,r1)
876+ STI()
877+ mov.l __preempt_schedule, r0
878+ jsr @r0
879+ nop
880+ /* CLI */
881+ stc sr, r0
882+ or #0xf0, r0
883+ ldc r0, sr
884+ !
885+ stc k_current, r1
886+ mov.l @(preempt_count,r1), r0 ! Could replace this ...
887+ add #-1, r0 ! ... and this w/mov #0?
888+ mov.l r0, @(preempt_count,r1)
889+ ! Maybe should bra ret_from_kirq, or loop over need_resched?
890+ ! For now, fall through to ret_from_irq again...
891+#endif /* CONFIG_PREEMPT */
892+
893 ret_from_irq:
894+ mov #OFF_SR, r0
895+ mov.l @(r0,r15), r0 ! get status register
896+ shll r0
897+ shll r0 ! kernel space?
898+#ifndef CONFIG_PREEMPT
899+ bt restore_all ! Yes, it's from kernel, go back soon
900+#else /* CONFIG_PREEMPT */
901+ bt ret_from_kirq ! From kernel: maybe preempt_schedule
902+#endif /* CONFIG_PREEMPT */
903+ !
904+ bra ret_from_syscall
905+ nop
906+
907 ret_from_exception:
908 mov #OFF_SR, r0
909 mov.l @(r0,r15), r0 ! get status register
910@@ -564,6 +647,13 @@
911 .long SYMBOL_NAME(do_signal)
912 __irq_stat:
913 .long SYMBOL_NAME(irq_stat)
914+#ifdef CONFIG_PREEMPT
915+__preempt_schedule:
916+ .long SYMBOL_NAME(preempt_schedule)
917+#endif /* CONFIG_PREEMPT */
918+__INV_IMASK:
919+ .long 0xffffff0f ! ~(IMASK)
920+
921
922 .align 2
923 restore_all:
924@@ -679,7 +769,7 @@
925 __fpu_prepare_fd:
926 .long SYMBOL_NAME(fpu_prepare_fd)
927 __init_task_flags:
928- .long SYMBOL_NAME(init_task_union)+4
929+ .long SYMBOL_NAME(init_task_union)+flags
930 __PF_USEDFPU:
931 .long PF_USEDFPU
932 #endif
933diff -urN linux-2.4.19-ac5/arch/sh/kernel/irq.c linux/arch/sh/kernel/irq.c
934--- linux-2.4.19-ac5/arch/sh/kernel/irq.c Thu Aug 1 15:55:17 2002
935+++ linux/arch/sh/kernel/irq.c Fri Aug 2 10:28:18 2002
936@@ -229,6 +229,14 @@
937 struct irqaction * action;
938 unsigned int status;
939
940+ /*
941+ * At this point we're now about to actually call handlers,
942+ * and interrupts might get reenabled during them... bump
943+ * preempt_count to prevent any preemption while the handler
944+ * called here is pending...
945+ */
946+ preempt_disable();
947+
948 /* Get IRQ number */
949 asm volatile("stc r2_bank, %0\n\t"
950 "shlr2 %0\n\t"
951@@ -298,8 +306,17 @@
952 desc->handler->end(irq);
953 spin_unlock(&desc->lock);
954
955+
956 if (softirq_pending(cpu))
957 do_softirq();
958+
959+ /*
960+ * We're done with the handlers, interrupts should be
961+ * currently disabled; decrement preempt_count now so
962+ * as we return preemption may be allowed...
963+ */
964+ preempt_enable_no_resched();
965+
966 return 1;
967 }
968
969diff -urN linux-2.4.19-ac5/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c
970--- linux-2.4.19-ac5/drivers/ieee1394/csr.c Thu Aug 1 15:55:03 2002
971+++ linux/drivers/ieee1394/csr.c Fri Aug 2 10:28:18 2002
972@@ -10,6 +10,7 @@
973 */
974
975 #include <linux/string.h>
976+#include <linux/sched.h>
977
978 #include "ieee1394_types.h"
979 #include "hosts.h"
980diff -urN linux-2.4.19-ac5/drivers/sound/sound_core.c linux/drivers/sound/sound_core.c
981--- linux-2.4.19-ac5/drivers/sound/sound_core.c Thu Aug 1 15:54:53 2002
982+++ linux/drivers/sound/sound_core.c Fri Aug 2 10:28:18 2002
983@@ -37,6 +37,7 @@
984 #include <linux/config.h>
985 #include <linux/module.h>
986 #include <linux/init.h>
987+#include <linux/sched.h>
988 #include <linux/slab.h>
989 #include <linux/types.h>
990 #include <linux/kernel.h>
991diff -urN linux-2.4.19-ac5/fs/adfs/map.c linux/fs/adfs/map.c
992--- linux-2.4.19-ac5/fs/adfs/map.c Thu Aug 1 15:54:33 2002
993+++ linux/fs/adfs/map.c Fri Aug 2 10:28:19 2002
994@@ -12,6 +12,7 @@
995 #include <linux/fs.h>
996 #include <linux/adfs_fs.h>
997 #include <linux/spinlock.h>
998+#include <linux/sched.h>
999
1000 #include "adfs.h"
1001
1002diff -urN linux-2.4.19-ac5/fs/exec.c linux/fs/exec.c
1003--- linux-2.4.19-ac5/fs/exec.c Thu Aug 1 15:54:33 2002
1004+++ linux/fs/exec.c Fri Aug 2 10:28:19 2002
1005@@ -420,8 +420,8 @@
1006 active_mm = current->active_mm;
1007 current->mm = mm;
1008 current->active_mm = mm;
1009- task_unlock(current);
1010 activate_mm(active_mm, mm);
1011+ task_unlock(current);
1012 mm_release();
1013 if (old_mm) {
1014 if (active_mm != old_mm) BUG();
1015diff -urN linux-2.4.19-ac5/fs/fat/cache.c linux/fs/fat/cache.c
1016--- linux-2.4.19-ac5/fs/fat/cache.c Thu Aug 1 15:54:33 2002
1017+++ linux/fs/fat/cache.c Fri Aug 2 10:28:19 2002
1018@@ -14,6 +14,7 @@
1019 #include <linux/string.h>
1020 #include <linux/stat.h>
1021 #include <linux/fat_cvf.h>
1022+#include <linux/sched.h>
1023
1024 #if 0
1025 # define PRINTK(x) printk x
1026diff -urN linux-2.4.19-ac5/fs/nls/nls_base.c linux/fs/nls/nls_base.c
1027--- linux-2.4.19-ac5/fs/nls/nls_base.c Thu Aug 1 15:54:33 2002
1028+++ linux/fs/nls/nls_base.c Fri Aug 2 10:28:19 2002
1029@@ -18,6 +18,7 @@
1030 #ifdef CONFIG_KMOD
1031 #include <linux/kmod.h>
1032 #endif
1033+#include <linux/sched.h>
1034 #include <linux/spinlock.h>
1035
1036 static struct nls_table *tables;
1037diff -urN linux-2.4.19-ac5/include/asm-arm/dma.h linux/include/asm-arm/dma.h
1038--- linux-2.4.19-ac5/include/asm-arm/dma.h Thu Aug 1 15:54:39 2002
1039+++ linux/include/asm-arm/dma.h Fri Aug 2 10:28:19 2002
1040@@ -5,6 +5,7 @@
1041
1042 #include <linux/config.h>
1043 #include <linux/spinlock.h>
1044+#include <linux/sched.h>
1045 #include <asm/system.h>
1046 #include <asm/memory.h>
1047 #include <asm/scatterlist.h>
1048diff -urN linux-2.4.19-ac5/include/asm-arm/hardirq.h linux/include/asm-arm/hardirq.h
1049--- linux-2.4.19-ac5/include/asm-arm/hardirq.h Thu Aug 1 15:54:39 2002
1050+++ linux/include/asm-arm/hardirq.h Fri Aug 2 10:28:19 2002
1051@@ -34,6 +34,7 @@
1052 #define irq_exit(cpu,irq) (local_irq_count(cpu)--)
1053
1054 #define synchronize_irq() do { } while (0)
1055+#define release_irqlock(cpu) do { } while (0)
1056
1057 #else
1058 #error SMP not supported
1059diff -urN linux-2.4.19-ac5/include/asm-arm/pgalloc.h linux/include/asm-arm/pgalloc.h
1060--- linux-2.4.19-ac5/include/asm-arm/pgalloc.h Thu Aug 1 15:54:39 2002
1061+++ linux/include/asm-arm/pgalloc.h Fri Aug 2 10:28:19 2002
1062@@ -57,40 +57,48 @@
1063 {
1064 unsigned long *ret;
1065
1066+ preempt_disable();
1067 if ((ret = pgd_quicklist) != NULL) {
1068 pgd_quicklist = (unsigned long *)__pgd_next(ret);
1069 ret[1] = ret[2];
1070 clean_dcache_entry(ret + 1);
1071 pgtable_cache_size--;
1072 }
1073+ preempt_enable();
1074 return (pgd_t *)ret;
1075 }
1076
1077 static inline void free_pgd_fast(pgd_t *pgd)
1078 {
1079+ preempt_disable();
1080 __pgd_next(pgd) = (unsigned long) pgd_quicklist;
1081 pgd_quicklist = (unsigned long *) pgd;
1082 pgtable_cache_size++;
1083+ preempt_enable();
1084 }
1085
1086 static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
1087 {
1088 unsigned long *ret;
1089
1090+ preempt_disable();
1091 if((ret = pte_quicklist) != NULL) {
1092 pte_quicklist = (unsigned long *)__pte_next(ret);
1093 ret[0] = 0;
1094 clean_dcache_entry(ret);
1095 pgtable_cache_size--;
1096 }
1097+ preempt_enable();
1098 return (pte_t *)ret;
1099 }
1100
1101 static inline void free_pte_fast(pte_t *pte)
1102 {
1103+ preempt_disable();
1104 __pte_next(pte) = (unsigned long) pte_quicklist;
1105 pte_quicklist = (unsigned long *) pte;
1106 pgtable_cache_size++;
1107+ preempt_enable();
1108 }
1109
1110 #else /* CONFIG_NO_PGT_CACHE */
1111diff -urN linux-2.4.19-ac5/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h
1112--- linux-2.4.19-ac5/include/asm-arm/smplock.h Thu Aug 1 15:54:39 2002
1113+++ linux/include/asm-arm/smplock.h Fri Aug 2 10:28:19 2002
1114@@ -3,12 +3,17 @@
1115 *
1116 * Default SMP lock implementation
1117 */
1118+#include <linux/config.h>
1119 #include <linux/interrupt.h>
1120 #include <linux/spinlock.h>
1121
1122 extern spinlock_t kernel_flag;
1123
1124+#ifdef CONFIG_PREEMPT
1125+#define kernel_locked() preempt_get_count()
1126+#else
1127 #define kernel_locked() spin_is_locked(&kernel_flag)
1128+#endif
1129
1130 /*
1131 * Release global kernel lock and global interrupt lock
1132@@ -40,8 +45,14 @@
1133 */
1134 static inline void lock_kernel(void)
1135 {
1136+#ifdef CONFIG_PREEMPT
1137+ if (current->lock_depth == -1)
1138+ spin_lock(&kernel_flag);
1139+ ++current->lock_depth;
1140+#else
1141 if (!++current->lock_depth)
1142 spin_lock(&kernel_flag);
1143+#endif
1144 }
1145
1146 static inline void unlock_kernel(void)
1147diff -urN linux-2.4.19-ac5/include/asm-arm/softirq.h linux/include/asm-arm/softirq.h
1148--- linux-2.4.19-ac5/include/asm-arm/softirq.h Thu Aug 1 15:54:39 2002
1149+++ linux/include/asm-arm/softirq.h Fri Aug 2 10:28:19 2002
1150@@ -5,20 +5,22 @@
1151 #include <asm/hardirq.h>
1152
1153 #define __cpu_bh_enable(cpu) \
1154- do { barrier(); local_bh_count(cpu)--; } while (0)
1155+ do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
1156 #define cpu_bh_disable(cpu) \
1157- do { local_bh_count(cpu)++; barrier(); } while (0)
1158+ do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
1159
1160 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
1161 #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
1162
1163 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1164
1165-#define local_bh_enable() \
1166+#define _local_bh_enable() \
1167 do { \
1168 unsigned int *ptr = &local_bh_count(smp_processor_id()); \
1169 if (!--*ptr && ptr[-2]) \
1170 __asm__("bl%? __do_softirq": : : "lr");/* out of line */\
1171 } while (0)
1172
1173+#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
1174+
1175 #endif /* __ASM_SOFTIRQ_H */
1176diff -urN linux-2.4.19-ac5/include/asm-arm/system.h linux/include/asm-arm/system.h
1177--- linux-2.4.19-ac5/include/asm-arm/system.h Thu Aug 1 15:54:39 2002
1178+++ linux/include/asm-arm/system.h Fri Aug 2 10:32:41 2002
1179@@ -62,6 +62,13 @@
1180 #define local_irq_disable() __cli()
1181 #define local_irq_enable() __sti()
1182
1183+#define irqs_disabled() \
1184+({ \
1185+ unsigned long cpsr_val; \
1186+ asm ("mrs %0, cpsr" : "=r" (cpsr_val)); \
1187+ cpsr_val & 128; \
1188+})
1189+
1190 #ifdef CONFIG_SMP
1191 #error SMP not supported
1192
1193diff -urN linux-2.4.19-ac5/include/asm-i386/hardirq.h linux/include/asm-i386/hardirq.h
1194--- linux-2.4.19-ac5/include/asm-i386/hardirq.h Thu Aug 1 15:54:34 2002
1195+++ linux/include/asm-i386/hardirq.h Fri Aug 2 10:28:19 2002
1196@@ -19,12 +19,16 @@
1197
1198 /*
1199 * Are we in an interrupt context? Either doing bottom half
1200- * or hardware interrupt processing?
1201+ * or hardware interrupt processing? Note the preempt check,
1202+ * this is both a bugfix and an optimization. If we are
1203+ * preemptible, we cannot be in an interrupt.
1204 */
1205-#define in_interrupt() ({ int __cpu = smp_processor_id(); \
1206- (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
1207+#define in_interrupt() (preempt_is_disabled() && \
1208+ ({unsigned long __cpu = smp_processor_id(); \
1209+ (local_irq_count(__cpu) + local_bh_count(__cpu) != 0); }))
1210
1211-#define in_irq() (local_irq_count(smp_processor_id()) != 0)
1212+#define in_irq() (preempt_is_disabled() && \
1213+ (local_irq_count(smp_processor_id()) != 0))
1214
1215 #ifndef CONFIG_SMP
1216
1217@@ -36,6 +40,8 @@
1218
1219 #define synchronize_irq() barrier()
1220
1221+#define release_irqlock(cpu) do { } while (0)
1222+
1223 #else
1224
1225 #include <asm/atomic.h>
1226diff -urN linux-2.4.19-ac5/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h
1227--- linux-2.4.19-ac5/include/asm-i386/highmem.h Thu Aug 1 15:54:35 2002
1228+++ linux/include/asm-i386/highmem.h Fri Aug 2 10:28:19 2002
1229@@ -88,6 +88,7 @@
1230 enum fixed_addresses idx;
1231 unsigned long vaddr;
1232
1233+ preempt_disable();
1234 if (page < highmem_start_page)
1235 return page_address(page);
1236
1237@@ -109,8 +110,10 @@
1238 unsigned long vaddr = (unsigned long) kvaddr;
1239 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
1240
1241- if (vaddr < FIXADDR_START) // FIXME
1242+ if (vaddr < FIXADDR_START) { // FIXME
1243+ preempt_enable();
1244 return;
1245+ }
1246
1247 if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
1248 out_of_line_bug();
1249@@ -122,6 +125,8 @@
1250 pte_clear(kmap_pte-idx);
1251 __flush_tlb_one(vaddr);
1252 #endif
1253+
1254+ preempt_enable();
1255 }
1256
1257 #endif /* __KERNEL__ */
1258diff -urN linux-2.4.19-ac5/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h
1259--- linux-2.4.19-ac5/include/asm-i386/hw_irq.h Thu Aug 1 15:54:34 2002
1260+++ linux/include/asm-i386/hw_irq.h Fri Aug 2 10:28:19 2002
1261@@ -95,6 +95,18 @@
1262 #define __STR(x) #x
1263 #define STR(x) __STR(x)
1264
1265+#define GET_CURRENT \
1266+ "movl %esp, %ebx\n\t" \
1267+ "andl $-8192, %ebx\n\t"
1268+
1269+#ifdef CONFIG_PREEMPT
1270+#define BUMP_LOCK_COUNT \
1271+ GET_CURRENT \
1272+ "incl 4(%ebx)\n\t"
1273+#else
1274+#define BUMP_LOCK_COUNT
1275+#endif
1276+
1277 #define SAVE_ALL \
1278 "cld\n\t" \
1279 "pushl %es\n\t" \
1280@@ -108,15 +120,12 @@
1281 "pushl %ebx\n\t" \
1282 "movl $" STR(__KERNEL_DS) ",%edx\n\t" \
1283 "movl %edx,%ds\n\t" \
1284- "movl %edx,%es\n\t"
1285+ "movl %edx,%es\n\t" \
1286+ BUMP_LOCK_COUNT
1287
1288 #define IRQ_NAME2(nr) nr##_interrupt(void)
1289 #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
1290
1291-#define GET_CURRENT \
1292- "movl %esp, %ebx\n\t" \
1293- "andl $-8192, %ebx\n\t"
1294-
1295 /*
1296 * SMP has a few special interrupts for IPI messages
1297 */
1298diff -urN linux-2.4.19-ac5/include/asm-i386/i387.h linux/include/asm-i386/i387.h
1299--- linux-2.4.19-ac5/include/asm-i386/i387.h Thu Aug 1 15:54:35 2002
1300+++ linux/include/asm-i386/i387.h Fri Aug 2 10:28:19 2002
1301@@ -12,6 +12,7 @@
1302 #define __ASM_I386_I387_H
1303
1304 #include <linux/sched.h>
1305+#include <linux/spinlock.h>
1306 #include <asm/processor.h>
1307 #include <asm/sigcontext.h>
1308 #include <asm/user.h>
1309@@ -24,7 +25,7 @@
1310 extern void restore_fpu( struct task_struct *tsk );
1311
1312 extern void kernel_fpu_begin(void);
1313-#define kernel_fpu_end() stts()
1314+#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
1315
1316
1317 #define unlazy_fpu( tsk ) do { \
1318diff -urN linux-2.4.19-ac5/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h
1319--- linux-2.4.19-ac5/include/asm-i386/pgalloc.h Thu Aug 1 15:54:35 2002
1320+++ linux/include/asm-i386/pgalloc.h Fri Aug 2 10:28:19 2002
1321@@ -75,20 +75,26 @@
1322 {
1323 unsigned long *ret;
1324
1325+ preempt_disable();
1326 if ((ret = pgd_quicklist) != NULL) {
1327 pgd_quicklist = (unsigned long *)(*ret);
1328 ret[0] = 0;
1329 pgtable_cache_size--;
1330- } else
1331+ preempt_enable();
1332+ } else {
1333+ preempt_enable();
1334 ret = (unsigned long *)get_pgd_slow();
1335+ }
1336 return (pgd_t *)ret;
1337 }
1338
1339 static inline void free_pgd_fast(pgd_t *pgd)
1340 {
1341+ preempt_disable();
1342 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
1343 pgd_quicklist = (unsigned long *) pgd;
1344 pgtable_cache_size++;
1345+ preempt_enable();
1346 }
1347
1348 static inline void free_pgd_slow(pgd_t *pgd)
1349@@ -119,19 +125,23 @@
1350 {
1351 unsigned long *ret;
1352
1353+ preempt_disable();
1354 if ((ret = (unsigned long *)pte_quicklist) != NULL) {
1355 pte_quicklist = (unsigned long *)(*ret);
1356 ret[0] = ret[1];
1357 pgtable_cache_size--;
1358 }
1359+ preempt_enable();
1360 return (pte_t *)ret;
1361 }
1362
1363 static inline void pte_free_fast(pte_t *pte)
1364 {
1365+ preempt_disable();
1366 *(unsigned long *)pte = (unsigned long) pte_quicklist;
1367 pte_quicklist = (unsigned long *) pte;
1368 pgtable_cache_size++;
1369+ preempt_enable();
1370 }
1371
1372 static __inline__ void pte_free_slow(pte_t *pte)
1373diff -urN linux-2.4.19-ac5/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h
1374--- linux-2.4.19-ac5/include/asm-i386/smplock.h Thu Aug 1 15:54:34 2002
1375+++ linux/include/asm-i386/smplock.h Fri Aug 2 10:28:19 2002
1376@@ -11,7 +11,15 @@
1377 extern spinlock_cacheline_t kernel_flag_cacheline;
1378 #define kernel_flag kernel_flag_cacheline.lock
1379
1380+#ifdef CONFIG_SMP
1381 #define kernel_locked() spin_is_locked(&kernel_flag)
1382+#else
1383+#ifdef CONFIG_PREEMPT
1384+#define kernel_locked() preempt_get_count()
1385+#else
1386+#define kernel_locked() 1
1387+#endif
1388+#endif
1389
1390 /*
1391 * Release global kernel lock and global interrupt lock
1392@@ -43,6 +51,11 @@
1393 */
1394 static __inline__ void lock_kernel(void)
1395 {
1396+#ifdef CONFIG_PREEMPT
1397+ if (current->lock_depth == -1)
1398+ spin_lock(&kernel_flag);
1399+ ++current->lock_depth;
1400+#else
1401 #if 1
1402 if (!++current->lock_depth)
1403 spin_lock(&kernel_flag);
1404@@ -55,6 +68,7 @@
1405 :"=m" (__dummy_lock(&kernel_flag)),
1406 "=m" (current->lock_depth));
1407 #endif
1408+#endif
1409 }
1410
1411 static __inline__ void unlock_kernel(void)
1412diff -urN linux-2.4.19-ac5/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h
1413--- linux-2.4.19-ac5/include/asm-i386/softirq.h Thu Aug 1 15:54:34 2002
1414+++ linux/include/asm-i386/softirq.h Fri Aug 2 10:28:19 2002
1415@@ -5,9 +5,9 @@
1416 #include <asm/hardirq.h>
1417
1418 #define __cpu_bh_enable(cpu) \
1419- do { barrier(); local_bh_count(cpu)--; } while (0)
1420+ do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
1421 #define cpu_bh_disable(cpu) \
1422- do { local_bh_count(cpu)++; barrier(); } while (0)
1423+ do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
1424
1425 #define local_bh_disable() cpu_bh_disable(smp_processor_id())
1426 #define __local_bh_enable() __cpu_bh_enable(smp_processor_id())
1427@@ -22,7 +22,7 @@
1428 * If you change the offsets in irq_stat then you have to
1429 * update this code as well.
1430 */
1431-#define local_bh_enable() \
1432+#define _local_bh_enable() \
1433 do { \
1434 unsigned int *ptr = &local_bh_count(smp_processor_id()); \
1435 \
1436@@ -45,4 +45,6 @@
1437 /* no registers clobbered */ ); \
1438 } while (0)
1439
1440+#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
1441+
1442 #endif /* __ASM_SOFTIRQ_H */
1443diff -urN linux-2.4.19-ac5/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
1444--- linux-2.4.19-ac5/include/asm-i386/spinlock.h Thu Aug 1 15:54:34 2002
1445+++ linux/include/asm-i386/spinlock.h Fri Aug 2 10:28:19 2002
1446@@ -77,7 +77,7 @@
1447 :"=m" (lock->lock) : : "memory"
1448
1449
1450-static inline void spin_unlock(spinlock_t *lock)
1451+static inline void _raw_spin_unlock(spinlock_t *lock)
1452 {
1453 #if SPINLOCK_DEBUG
1454 if (lock->magic != SPINLOCK_MAGIC)
1455@@ -97,7 +97,7 @@
1456 :"=q" (oldval), "=m" (lock->lock) \
1457 :"0" (oldval) : "memory"
1458
1459-static inline void spin_unlock(spinlock_t *lock)
1460+static inline void _raw_spin_unlock(spinlock_t *lock)
1461 {
1462 char oldval = 1;
1463 #if SPINLOCK_DEBUG
1464@@ -113,7 +113,7 @@
1465
1466 #endif
1467
1468-static inline int spin_trylock(spinlock_t *lock)
1469+static inline int _raw_spin_trylock(spinlock_t *lock)
1470 {
1471 char oldval;
1472 __asm__ __volatile__(
1473@@ -123,7 +123,7 @@
1474 return oldval > 0;
1475 }
1476
1477-static inline void spin_lock(spinlock_t *lock)
1478+static inline void _raw_spin_lock(spinlock_t *lock)
1479 {
1480 #if SPINLOCK_DEBUG
1481 __label__ here;
1482@@ -179,7 +179,7 @@
1483 */
1484 /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
1485
1486-static inline void read_lock(rwlock_t *rw)
1487+static inline void _raw_read_lock(rwlock_t *rw)
1488 {
1489 #if SPINLOCK_DEBUG
1490 if (rw->magic != RWLOCK_MAGIC)
1491@@ -188,7 +188,7 @@
1492 __build_read_lock(rw, "__read_lock_failed");
1493 }
1494
1495-static inline void write_lock(rwlock_t *rw)
1496+static inline void _raw_write_lock(rwlock_t *rw)
1497 {
1498 #if SPINLOCK_DEBUG
1499 if (rw->magic != RWLOCK_MAGIC)
1500@@ -197,10 +197,10 @@
1501 __build_write_lock(rw, "__write_lock_failed");
1502 }
1503
1504-#define read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
1505-#define write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
1506+#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
1507+#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
1508
1509-static inline int write_trylock(rwlock_t *lock)
1510+static inline int _raw_write_trylock(rwlock_t *lock)
1511 {
1512 atomic_t *count = (atomic_t *)lock;
1513 if (atomic_sub_and_test(RW_LOCK_BIAS, count))
1514diff -urN linux-2.4.19-ac5/include/asm-i386/system.h linux/include/asm-i386/system.h
1515--- linux-2.4.19-ac5/include/asm-i386/system.h Thu Aug 1 15:54:34 2002
1516+++ linux/include/asm-i386/system.h Fri Aug 2 10:33:09 2002
1517@@ -317,6 +317,13 @@
1518 /* used in the idle loop; sti takes one instruction cycle to complete */
1519 #define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
1520
1521+#define irqs_disabled() \
1522+({ \
1523+ unsigned long flags; \
1524+ __save_flags(flags); \
1525+ !(flags & (1<<9)); \
1526+})
1527+
1528 /* For spinlocks etc */
1529 #define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory")
1530 #define local_irq_restore(x) __restore_flags(x)
1531diff -urN linux-2.4.19-ac5/include/asm-mips/smplock.h linux/include/asm-mips/smplock.h
1532--- linux-2.4.19-ac5/include/asm-mips/smplock.h Thu Aug 1 15:54:36 2002
1533+++ linux/include/asm-mips/smplock.h Fri Aug 2 10:28:44 2002
1534@@ -5,12 +5,21 @@
1535 *
1536 * Default SMP lock implementation
1537 */
1538+#include <linux/config.h>
1539 #include <linux/interrupt.h>
1540 #include <linux/spinlock.h>
1541
1542 extern spinlock_t kernel_flag;
1543
1544+#ifdef CONFIG_SMP
1545 #define kernel_locked() spin_is_locked(&kernel_flag)
1546+#else
1547+#ifdef CONFIG_PREEMPT
1548+#define kernel_locked() preempt_get_count()
1549+#else
1550+#define kernel_locked() 1
1551+#endif
1552+#endif
1553
1554 /*
1555 * Release global kernel lock and global interrupt lock
1556@@ -42,8 +51,14 @@
1557 */
1558 extern __inline__ void lock_kernel(void)
1559 {
1560+#ifdef CONFIG_PREEMPT
1561+ if (current->lock_depth == -1)
1562+ spin_lock(&kernel_flag);
1563+ ++current->lock_depth;
1564+#else
1565 if (!++current->lock_depth)
1566 spin_lock(&kernel_flag);
1567+#endif
1568 }
1569
1570 extern __inline__ void unlock_kernel(void)
1571diff -urN linux-2.4.19-ac5/include/asm-mips/softirq.h linux/include/asm-mips/softirq.h
1572--- linux-2.4.19-ac5/include/asm-mips/softirq.h Thu Aug 1 15:54:36 2002
1573+++ linux/include/asm-mips/softirq.h Fri Aug 2 10:28:44 2002
1574@@ -15,6 +15,7 @@
1575
1576 static inline void cpu_bh_disable(int cpu)
1577 {
1578+ preempt_disable();
1579 local_bh_count(cpu)++;
1580 barrier();
1581 }
1582@@ -23,6 +24,7 @@
1583 {
1584 barrier();
1585 local_bh_count(cpu)--;
1586+ preempt_enable();
1587 }
1588
1589
1590@@ -36,6 +38,7 @@
1591 cpu = smp_processor_id(); \
1592 if (!--local_bh_count(cpu) && softirq_pending(cpu)) \
1593 do_softirq(); \
1594+ preempt_enable(); \
1595 } while (0)
1596
1597 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1598diff -urN linux-2.4.19-ac5/include/asm-mips/system.h linux/include/asm-mips/system.h
1599--- linux-2.4.19-ac5/include/asm-mips/system.h Thu Aug 1 15:54:35 2002
1600+++ linux/include/asm-mips/system.h Fri Aug 2 10:33:43 2002
1601@@ -285,4 +285,18 @@
1602 #define die_if_kernel(msg, regs) \
1603 __die_if_kernel(msg, regs, __FILE__ ":"__FUNCTION__, __LINE__)
1604
1605+extern __inline__ int intr_on(void)
1606+{
1607+ unsigned long flags;
1608+ save_flags(flags);
1609+ return flags & 1;
1610+}
1611+
1612+extern __inline__ int intr_off(void)
1613+{
1614+ return ! intr_on();
1615+}
1616+
1617+#define irqs_disabled() intr_off()
1618+
1619 #endif /* _ASM_SYSTEM_H */
1620diff -urN linux-2.4.19-ac5/include/asm-ppc/dma.h linux/include/asm-ppc/dma.h
1621--- linux-2.4.19-ac5/include/asm-ppc/dma.h Thu Aug 1 15:54:37 2002
1622+++ linux/include/asm-ppc/dma.h Fri Aug 2 10:29:00 2002
1623@@ -14,6 +14,7 @@
1624 #include <linux/config.h>
1625 #include <asm/io.h>
1626 #include <linux/spinlock.h>
1627+#include <linux/sched.h>
1628 #include <asm/system.h>
1629
1630 /*
1631diff -urN linux-2.4.19-ac5/include/asm-ppc/hardirq.h linux/include/asm-ppc/hardirq.h
1632--- linux-2.4.19-ac5/include/asm-ppc/hardirq.h Thu Aug 1 15:54:37 2002
1633+++ linux/include/asm-ppc/hardirq.h Fri Aug 2 10:29:00 2002
1634@@ -44,6 +44,7 @@
1635 #define hardirq_exit(cpu) (local_irq_count(cpu)--)
1636
1637 #define synchronize_irq() do { } while (0)
1638+#define release_irqlock(cpu) do { } while (0)
1639
1640 #else /* CONFIG_SMP */
1641
1642diff -urN linux-2.4.19-ac5/include/asm-ppc/highmem.h linux/include/asm-ppc/highmem.h
1643--- linux-2.4.19-ac5/include/asm-ppc/highmem.h Thu Aug 1 15:54:38 2002
1644+++ linux/include/asm-ppc/highmem.h Fri Aug 2 10:29:00 2002
1645@@ -84,6 +84,7 @@
1646 unsigned int idx;
1647 unsigned long vaddr;
1648
1649+ preempt_disable();
1650 if (page < highmem_start_page)
1651 return page_address(page);
1652
1653@@ -105,8 +106,10 @@
1654 unsigned long vaddr = (unsigned long) kvaddr;
1655 unsigned int idx = type + KM_TYPE_NR*smp_processor_id();
1656
1657- if (vaddr < KMAP_FIX_BEGIN) // FIXME
1658+ if (vaddr < KMAP_FIX_BEGIN) { // FIXME
1659+ preempt_enable();
1660 return;
1661+ }
1662
1663 if (vaddr != KMAP_FIX_BEGIN + idx * PAGE_SIZE)
1664 BUG();
1665@@ -118,6 +121,7 @@
1666 pte_clear(kmap_pte+idx);
1667 flush_tlb_page(0, vaddr);
1668 #endif
1669+ preempt_enable();
1670 }
1671
1672 #endif /* __KERNEL__ */
1673diff -urN linux-2.4.19-ac5/include/asm-ppc/hw_irq.h linux/include/asm-ppc/hw_irq.h
1674--- linux-2.4.19-ac5/include/asm-ppc/hw_irq.h Thu Aug 1 15:54:38 2002
1675+++ linux/include/asm-ppc/hw_irq.h Fri Aug 2 10:34:12 2002
1676@@ -21,6 +21,12 @@
1677 #define __save_flags(flags) __save_flags_ptr((unsigned long *)&flags)
1678 #define __save_and_cli(flags) ({__save_flags(flags);__cli();})
1679
1680+#define mfmsr() ({unsigned int rval; \
1681+ asm volatile("mfmsr %0" : "=r" (rval)); rval;})
1682+#define mtmsr(v) asm volatile("mtmsr %0" : : "r" (v))
1683+
1684+#define irqs_disabled() ((mfmsr() & MSR_EE) == 0)
1685+
1686 extern void do_lost_interrupts(unsigned long);
1687
1688 #define mask_irq(irq) ({if (irq_desc[irq].handler && irq_desc[irq].handler->disable) irq_desc[irq].handler->disable(irq);})
1689diff -urN linux-2.4.19-ac5/include/asm-ppc/mmu_context.h linux/include/asm-ppc/mmu_context.h
1690--- linux-2.4.19-ac5/include/asm-ppc/mmu_context.h Thu Aug 1 15:54:37 2002
1691+++ linux/include/asm-ppc/mmu_context.h Fri Aug 2 10:29:00 2002
1692@@ -158,6 +158,10 @@
1693 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
1694 struct task_struct *tsk, int cpu)
1695 {
1696+#ifdef CONFIG_PREEMPT
1697+ if (preempt_get_count() == 0)
1698+ BUG();
1699+#endif
1700 tsk->thread.pgdir = next->pgd;
1701 get_mmu_context(next);
1702 set_context(next->context, next->pgd);
1703diff -urN linux-2.4.19-ac5/include/asm-ppc/pgalloc.h linux/include/asm-ppc/pgalloc.h
1704--- linux-2.4.19-ac5/include/asm-ppc/pgalloc.h Thu Aug 1 15:54:38 2002
1705+++ linux/include/asm-ppc/pgalloc.h Fri Aug 2 10:29:00 2002
1706@@ -68,20 +68,25 @@
1707 {
1708 unsigned long *ret;
1709
1710+ preempt_disable();
1711 if ((ret = pgd_quicklist) != NULL) {
1712 pgd_quicklist = (unsigned long *)(*ret);
1713 ret[0] = 0;
1714 pgtable_cache_size--;
1715+ preempt_enable();
1716 } else
1717+ preempt_enable();
1718 ret = (unsigned long *)get_pgd_slow();
1719 return (pgd_t *)ret;
1720 }
1721
1722 extern __inline__ void free_pgd_fast(pgd_t *pgd)
1723 {
1724+ preempt_disable();
1725 *(unsigned long **)pgd = pgd_quicklist;
1726 pgd_quicklist = (unsigned long *) pgd;
1727 pgtable_cache_size++;
1728+ preempt_enable();
1729 }
1730
1731 extern __inline__ void free_pgd_slow(pgd_t *pgd)
1732@@ -120,19 +125,23 @@
1733 {
1734 unsigned long *ret;
1735
1736+ preempt_disable();
1737 if ((ret = pte_quicklist) != NULL) {
1738 pte_quicklist = (unsigned long *)(*ret);
1739 ret[0] = 0;
1740 pgtable_cache_size--;
1741 }
1742+ preempt_enable();
1743 return (pte_t *)ret;
1744 }
1745
1746 extern __inline__ void pte_free_fast(pte_t *pte)
1747 {
1748+ preempt_disable();
1749 *(unsigned long **)pte = pte_quicklist;
1750 pte_quicklist = (unsigned long *) pte;
1751 pgtable_cache_size++;
1752+ preempt_enable();
1753 }
1754
1755 extern __inline__ void pte_free_slow(pte_t *pte)
1756diff -urN linux-2.4.19-ac5/include/asm-ppc/smplock.h linux/include/asm-ppc/smplock.h
1757--- linux-2.4.19-ac5/include/asm-ppc/smplock.h Thu Aug 1 15:54:37 2002
1758+++ linux/include/asm-ppc/smplock.h Fri Aug 2 10:29:00 2002
1759@@ -15,7 +15,15 @@
1760
1761 extern spinlock_t kernel_flag;
1762
1763+#ifdef CONFIG_SMP
1764 #define kernel_locked() spin_is_locked(&kernel_flag)
1765+#else
1766+#ifdef CONFIG_PREEMPT
1767+#define kernel_locked() preempt_get_count()
1768+#else
1769+#define kernel_locked() 1
1770+#endif
1771+#endif
1772
1773 /*
1774 * Release global kernel lock and global interrupt lock
1775@@ -47,8 +55,14 @@
1776 */
1777 static __inline__ void lock_kernel(void)
1778 {
1779+#ifdef CONFIG_PREEMPT
1780+ if (current->lock_depth == -1)
1781+ spin_lock(&kernel_flag);
1782+ ++current->lock_depth;
1783+#else
1784 if (!++current->lock_depth)
1785 spin_lock(&kernel_flag);
1786+#endif
1787 }
1788
1789 static __inline__ void unlock_kernel(void)
1790diff -urN linux-2.4.19-ac5/include/asm-ppc/softirq.h linux/include/asm-ppc/softirq.h
1791--- linux-2.4.19-ac5/include/asm-ppc/softirq.h Thu Aug 1 15:54:37 2002
1792+++ linux/include/asm-ppc/softirq.h Fri Aug 2 10:29:00 2002
1793@@ -10,6 +10,7 @@
1794
1795 #define local_bh_disable() \
1796 do { \
1797+ preempt_disable(); \
1798 local_bh_count(smp_processor_id())++; \
1799 barrier(); \
1800 } while (0)
1801@@ -18,9 +19,10 @@
1802 do { \
1803 barrier(); \
1804 local_bh_count(smp_processor_id())--; \
1805+ preempt_enable(); \
1806 } while (0)
1807
1808-#define local_bh_enable() \
1809+#define _local_bh_enable() \
1810 do { \
1811 if (!--local_bh_count(smp_processor_id()) \
1812 && softirq_pending(smp_processor_id())) { \
1813@@ -28,6 +30,12 @@
1814 } \
1815 } while (0)
1816
1817+#define local_bh_enable() \
1818+do { \
1819+ _local_bh_enable(); \
1820+ preempt_enable(); \
1821+} while (0)
1822+
1823 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1824
1825 #endif /* __ASM_SOFTIRQ_H */
1826diff -urN linux-2.4.19-ac5/include/asm-sh/hardirq.h linux/include/asm-sh/hardirq.h
1827--- linux-2.4.19-ac5/include/asm-sh/hardirq.h Thu Aug 1 15:54:40 2002
1828+++ linux/include/asm-sh/hardirq.h Fri Aug 2 10:28:19 2002
1829@@ -34,6 +34,8 @@
1830
1831 #define synchronize_irq() barrier()
1832
1833+#define release_irqlock(cpu) do { } while (0)
1834+
1835 #else
1836
1837 #error Super-H SMP is not available
1838diff -urN linux-2.4.19-ac5/include/asm-sh/smplock.h linux/include/asm-sh/smplock.h
1839--- linux-2.4.19-ac5/include/asm-sh/smplock.h Thu Aug 1 15:54:40 2002
1840+++ linux/include/asm-sh/smplock.h Fri Aug 2 10:28:20 2002
1841@@ -9,15 +9,88 @@
1842
1843 #include <linux/config.h>
1844
1845-#ifndef CONFIG_SMP
1846-
1847+#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
1848+/*
1849+ * Should never happen, since linux/smp_lock.h catches this case;
1850+ * but in case this file is included directly with neither SMP nor
1851+ * PREEMPT configuration, provide same dummys as linux/smp_lock.h
1852+ */
1853 #define lock_kernel() do { } while(0)
1854 #define unlock_kernel() do { } while(0)
1855-#define release_kernel_lock(task, cpu, depth) ((depth) = 1)
1856-#define reacquire_kernel_lock(task, cpu, depth) do { } while(0)
1857+#define release_kernel_lock(task, cpu) do { } while(0)
1858+#define reacquire_kernel_lock(task) do { } while(0)
1859+#define kernel_locked() 1
1860+
1861+#else /* CONFIG_SMP || CONFIG_PREEMPT */
1862+
1863+#if CONFIG_SMP
1864+#error "We do not support SMP on SH yet"
1865+#endif
1866+/*
1867+ * Default SMP lock implementation (i.e. the i386 version)
1868+ */
1869+
1870+#include <linux/interrupt.h>
1871+#include <linux/spinlock.h>
1872+
1873+extern spinlock_t kernel_flag;
1874+#define lock_bkl() spin_lock(&kernel_flag)
1875+#define unlock_bkl() spin_unlock(&kernel_flag)
1876
1877+#ifdef CONFIG_SMP
1878+#define kernel_locked() spin_is_locked(&kernel_flag)
1879+#elif CONFIG_PREEMPT
1880+#define kernel_locked() preempt_get_count()
1881+#else /* neither */
1882+#define kernel_locked() 1
1883+#endif
1884+
1885+/*
1886+ * Release global kernel lock and global interrupt lock
1887+ */
1888+#define release_kernel_lock(task, cpu) \
1889+do { \
1890+ if (task->lock_depth >= 0) \
1891+ spin_unlock(&kernel_flag); \
1892+ release_irqlock(cpu); \
1893+ __sti(); \
1894+} while (0)
1895+
1896+/*
1897+ * Re-acquire the kernel lock
1898+ */
1899+#define reacquire_kernel_lock(task) \
1900+do { \
1901+ if (task->lock_depth >= 0) \
1902+ spin_lock(&kernel_flag); \
1903+} while (0)
1904+
1905+/*
1906+ * Getting the big kernel lock.
1907+ *
1908+ * This cannot happen asynchronously,
1909+ * so we only need to worry about other
1910+ * CPU's.
1911+ */
1912+static __inline__ void lock_kernel(void)
1913+{
1914+#ifdef CONFIG_PREEMPT
1915+ if (current->lock_depth == -1)
1916+ spin_lock(&kernel_flag);
1917+ ++current->lock_depth;
1918 #else
1919-#error "We do not support SMP on SH"
1920-#endif /* CONFIG_SMP */
1921+ if (!++current->lock_depth)
1922+ spin_lock(&kernel_flag);
1923+#endif
1924+}
1925+
1926+static __inline__ void unlock_kernel(void)
1927+{
1928+ if (current->lock_depth < 0)
1929+ BUG();
1930+ if (--current->lock_depth < 0)
1931+ spin_unlock(&kernel_flag);
1932+}
1933+#endif /* CONFIG_SMP || CONFIG_PREEMPT */
1934
1935 #endif /* __ASM_SH_SMPLOCK_H */
1936diff -urN linux-2.4.19-ac5/include/asm-sh/softirq.h linux/include/asm-sh/softirq.h
1937--- linux-2.4.19-ac5/include/asm-sh/softirq.h Thu Aug 1 15:54:40 2002
1938+++ linux/include/asm-sh/softirq.h Fri Aug 2 10:28:20 2002
1939@@ -6,6 +6,7 @@
1940
1941 #define local_bh_disable() \
1942 do { \
1943+ preempt_disable(); \
1944 local_bh_count(smp_processor_id())++; \
1945 barrier(); \
1946 } while (0)
1947@@ -14,6 +15,7 @@
1948 do { \
1949 barrier(); \
1950 local_bh_count(smp_processor_id())--; \
1951+ preempt_enable(); \
1952 } while (0)
1953
1954 #define local_bh_enable() \
1955@@ -23,6 +25,7 @@
1956 && softirq_pending(smp_processor_id())) { \
1957 do_softirq(); \
1958 } \
1959+ preempt_enable(); \
1960 } while (0)
1961
1962 #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1963diff -urN linux-2.4.19-ac5/include/linux/brlock.h linux/include/linux/brlock.h
1964--- linux-2.4.19-ac5/include/linux/brlock.h Thu Aug 1 15:54:34 2002
1965+++ linux/include/linux/brlock.h Fri Aug 2 10:28:20 2002
1966@@ -171,11 +171,11 @@
1967 }
1968
1969 #else
1970-# define br_read_lock(idx) ((void)(idx))
1971-# define br_read_unlock(idx) ((void)(idx))
1972-# define br_write_lock(idx) ((void)(idx))
1973-# define br_write_unlock(idx) ((void)(idx))
1974-#endif
1975+# define br_read_lock(idx) ({ (void)(idx); preempt_disable(); })
1976+# define br_read_unlock(idx) ({ (void)(idx); preempt_enable(); })
1977+# define br_write_lock(idx) ({ (void)(idx); preempt_disable(); })
1978+# define br_write_unlock(idx) ({ (void)(idx); preempt_enable(); })
1979+#endif /* CONFIG_SMP */
1980
1981 /*
1982 * Now enumerate all of the possible sw/hw IRQ protected
1983diff -urN linux-2.4.19-ac5/include/linux/dcache.h linux/include/linux/dcache.h
1984--- linux-2.4.19-ac5/include/linux/dcache.h Thu Aug 1 15:54:34 2002
1985+++ linux/include/linux/dcache.h Fri Aug 2 10:28:20 2002
1986@@ -126,31 +126,6 @@
1987
1988 extern spinlock_t dcache_lock;
1989
1990-/**
1991- * d_drop - drop a dentry
1992- * @dentry: dentry to drop
1993- *
1994- * d_drop() unhashes the entry from the parent
1995- * dentry hashes, so that it won't be found through
1996- * a VFS lookup any more. Note that this is different
1997- * from deleting the dentry - d_delete will try to
1998- * mark the dentry negative if possible, giving a
1999- * successful _negative_ lookup, while d_drop will
2000- * just make the cache lookup fail.
2001- *
2002- * d_drop() is used mainly for stuff that wants
2003- * to invalidate a dentry for some reason (NFS
2004- * timeouts or autofs deletes).
2005- */
2006-
2007-static __inline__ void d_drop(struct dentry * dentry)
2008-{
2009- spin_lock(&dcache_lock);
2010- list_del(&dentry->d_hash);
2011- INIT_LIST_HEAD(&dentry->d_hash);
2012- spin_unlock(&dcache_lock);
2013-}
2014-
2015 static __inline__ int dname_external(struct dentry *d)
2016 {
2017 return d->d_name.name != d->d_iname;
2018@@ -275,3 +250,34 @@
2019 #endif /* __KERNEL__ */
2020
2021 #endif /* __LINUX_DCACHE_H */
2022+
2023+#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2024+#define __LINUX_DCACHE_H_INLINES
2025+
2026+#ifdef __KERNEL__
2027+/**
2028+ * d_drop - drop a dentry
2029+ * @dentry: dentry to drop
2030+ *
2031+ * d_drop() unhashes the entry from the parent
2032+ * dentry hashes, so that it won't be found through
2033+ * a VFS lookup any more. Note that this is different
2034+ * from deleting the dentry - d_delete will try to
2035+ * mark the dentry negative if possible, giving a
2036+ * successful _negative_ lookup, while d_drop will
2037+ * just make the cache lookup fail.
2038+ *
2039+ * d_drop() is used mainly for stuff that wants
2040+ * to invalidate a dentry for some reason (NFS
2041+ * timeouts or autofs deletes).
2042+ */
2043+
2044+static __inline__ void d_drop(struct dentry * dentry)
2045+{
2046+ spin_lock(&dcache_lock);
2047+ list_del(&dentry->d_hash);
2048+ INIT_LIST_HEAD(&dentry->d_hash);
2049+ spin_unlock(&dcache_lock);
2050+}
2051+#endif
2052+#endif
2053diff -urN linux-2.4.19-ac5/include/linux/fs_struct.h linux/include/linux/fs_struct.h
2054--- linux-2.4.19-ac5/include/linux/fs_struct.h Thu Aug 1 15:54:34 2002
2055+++ linux/include/linux/fs_struct.h Fri Aug 2 10:28:20 2002
2056@@ -20,6 +20,15 @@
2057 extern void exit_fs(struct task_struct *);
2058 extern void set_fs_altroot(void);
2059
2060+struct fs_struct *copy_fs_struct(struct fs_struct *old);
2061+void put_fs_struct(struct fs_struct *fs);
2062+
2063+#endif
2064+#endif
2065+
2066+#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2067+#define _LINUX_FS_STRUCT_H_INLINES
2068+#ifdef __KERNEL__
2069 /*
2070 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
2071 * It can block. Requires the big lock held.
2072@@ -65,9 +74,5 @@
2073 mntput(old_pwdmnt);
2074 }
2075 }
2076-
2077-struct fs_struct *copy_fs_struct(struct fs_struct *old);
2078-void put_fs_struct(struct fs_struct *fs);
2079-
2080 #endif
2081 #endif
2082diff -urN linux-2.4.19-ac5/include/linux/sched.h linux/include/linux/sched.h
2083--- linux-2.4.19-ac5/include/linux/sched.h Thu Aug 1 15:54:34 2002
2084+++ linux/include/linux/sched.h Fri Aug 2 10:28:20 2002
2085@@ -91,6 +91,7 @@
2086 #define TASK_UNINTERRUPTIBLE 2
2087 #define TASK_ZOMBIE 4
2088 #define TASK_STOPPED 8
2089+#define PREEMPT_ACTIVE 0x4000000
2090
2091 #define __set_task_state(tsk, state_value) \
2092 do { (tsk)->state = (state_value); } while (0)
2093@@ -157,6 +158,9 @@
2094 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
2095 extern signed long FASTCALL(schedule_timeout(signed long timeout));
2096 asmlinkage void schedule(void);
2097+#ifdef CONFIG_PREEMPT
2098+asmlinkage void preempt_schedule(void);
2099+#endif
2100
2101 extern int schedule_task(struct tq_struct *task);
2102 extern void flush_scheduled_tasks(void);
2103@@ -289,7 +293,7 @@
2104 * offsets of these are hardcoded elsewhere - touch with care
2105 */
2106 volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
2107- unsigned long flags; /* per process flags, defined below */
2108+ int preempt_count; /* 0 => preemptable, <0 => BUG */
2109 int sigpending;
2110 mm_segment_t addr_limit; /* thread address space:
2111 0-0xBFFFFFFF for user-thead
2112@@ -331,6 +335,7 @@
2113 struct mm_struct *active_mm;
2114 struct list_head local_pages;
2115 unsigned int allocation_order, nr_local_pages;
2116+ unsigned long flags;
2117
2118 /* task state */
2119 struct linux_binfmt *binfmt;
2120@@ -944,6 +949,11 @@
2121 return res;
2122 }
2123
2124+#define _TASK_STRUCT_DEFINED
2125+#include <linux/dcache.h>
2126+#include <linux/tqueue.h>
2127+#include <linux/fs_struct.h>
2128+
2129 #endif /* __KERNEL__ */
2130
2131 #endif
2132diff -urN linux-2.4.19-ac5/include/linux/smp_lock.h linux/include/linux/smp_lock.h
2133--- linux-2.4.19-ac5/include/linux/smp_lock.h Thu Aug 1 15:54:34 2002
2134+++ linux/include/linux/smp_lock.h Fri Aug 2 10:28:20 2002
2135@@ -3,7 +3,7 @@
2136
2137 #include <linux/config.h>
2138
2139-#ifndef CONFIG_SMP
2140+#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
2141
2142 #define lock_kernel() do { } while(0)
2143 #define unlock_kernel() do { } while(0)
2144diff -urN linux-2.4.19-ac5/include/linux/spinlock.h linux/include/linux/spinlock.h
2145--- linux-2.4.19-ac5/include/linux/spinlock.h Thu Aug 1 15:54:34 2002
2146+++ linux/include/linux/spinlock.h Fri Aug 2 10:28:20 2002
2147@@ -2,6 +2,7 @@
2148 #define __LINUX_SPINLOCK_H
2149
2150 #include <linux/config.h>
2151+#include <linux/compiler.h>
2152
2153 /*
2154 * These are the generic versions of the spinlocks and read-write
2155@@ -62,8 +63,10 @@
2156
2157 #if (DEBUG_SPINLOCKS < 1)
2158
2159+#ifndef CONFIG_PREEMPT
2160 #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
2161 #define ATOMIC_DEC_AND_LOCK
2162+#endif
2163
2164 /*
2165 * Your basic spinlocks, allowing only a single CPU anywhere
2166@@ -79,11 +82,11 @@
2167 #endif
2168
2169 #define spin_lock_init(lock) do { } while(0)
2170-#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
2171+#define _raw_spin_lock(lock) (void)(lock) /* Not "unused variable". */
2172 #define spin_is_locked(lock) (0)
2173-#define spin_trylock(lock) ({1; })
2174+#define _raw_spin_trylock(lock) ({1; })
2175 #define spin_unlock_wait(lock) do { } while(0)
2176-#define spin_unlock(lock) do { } while(0)
2177+#define _raw_spin_unlock(lock) do { } while(0)
2178
2179 #elif (DEBUG_SPINLOCKS < 2)
2180
2181@@ -142,13 +145,78 @@
2182 #endif
2183
2184 #define rwlock_init(lock) do { } while(0)
2185-#define read_lock(lock) (void)(lock) /* Not "unused variable". */
2186-#define read_unlock(lock) do { } while(0)
2187-#define write_lock(lock) (void)(lock) /* Not "unused variable". */
2188-#define write_unlock(lock) do { } while(0)
2189+#define _raw_read_lock(lock) (void)(lock) /* Not "unused variable". */
2190+#define _raw_read_unlock(lock) do { } while(0)
2191+#define _raw_write_lock(lock) (void)(lock) /* Not "unused variable". */
2192+#define _raw_write_unlock(lock) do { } while(0)
2193
2194 #endif /* !SMP */
2195
2196+#ifdef CONFIG_PREEMPT
2197+
2198+#define preempt_get_count() (current->preempt_count)
2199+#define preempt_is_disabled() (preempt_get_count() != 0)
2200+
2201+#define preempt_disable() \
2202+do { \
2203+ ++current->preempt_count; \
2204+ barrier(); \
2205+} while (0)
2206+
2207+#define preempt_enable_no_resched() \
2208+do { \
2209+ --current->preempt_count; \
2210+ barrier(); \
2211+} while (0)
2212+
2213+#define preempt_enable() \
2214+do { \
2215+ --current->preempt_count; \
2216+ barrier(); \
2217+ if (unlikely(current->preempt_count < current->need_resched)) \
2218+ preempt_schedule(); \
2219+} while (0)
2220+
2221+#define spin_lock(lock) \
2222+do { \
2223+ preempt_disable(); \
2224+ _raw_spin_lock(lock); \
2225+} while(0)
2226+
2227+#define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \
2228+ 1 : ({preempt_enable(); 0;});})
2229+#define spin_unlock(lock) \
2230+do { \
2231+ _raw_spin_unlock(lock); \
2232+ preempt_enable(); \
2233+} while (0)
2234+
2235+#define read_lock(lock) ({preempt_disable(); _raw_read_lock(lock);})
2236+#define read_unlock(lock) ({_raw_read_unlock(lock); preempt_enable();})
2237+#define write_lock(lock) ({preempt_disable(); _raw_write_lock(lock);})
2238+#define write_unlock(lock) ({_raw_write_unlock(lock); preempt_enable();})
2239+#define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \
2240+ 1 : ({preempt_enable(); 0;});})
2241+
2242+#else
2243+
2244+#define preempt_get_count() (0)
2245+#define preempt_is_disabled() (1)
2246+#define preempt_disable() do { } while (0)
2247+#define preempt_enable_no_resched() do {} while(0)
2248+#define preempt_enable() do { } while (0)
2249+
2250+#define spin_lock(lock) _raw_spin_lock(lock)
2251+#define spin_trylock(lock) _raw_spin_trylock(lock)
2252+#define spin_unlock(lock) _raw_spin_unlock(lock)
2253+
2254+#define read_lock(lock) _raw_read_lock(lock)
2255+#define read_unlock(lock) _raw_read_unlock(lock)
2256+#define write_lock(lock) _raw_write_lock(lock)
2257+#define write_unlock(lock) _raw_write_unlock(lock)
2258+#define write_trylock(lock) _raw_write_trylock(lock)
2259+#endif
2260+
2261 /* "lock on reference count zero" */
2262 #ifndef ATOMIC_DEC_AND_LOCK
2263 #include <asm/atomic.h>
2264diff -urN linux-2.4.19-ac5/include/linux/tqueue.h linux/include/linux/tqueue.h
2265--- linux-2.4.19-ac5/include/linux/tqueue.h Thu Aug 1 15:54:34 2002
2266+++ linux/include/linux/tqueue.h Fri Aug 2 10:28:20 2002
2267@@ -94,6 +94,22 @@
2268 extern spinlock_t tqueue_lock;
2269
2270 /*
2271+ * Call all "bottom halfs" on a given list.
2272+ */
2273+
2274+extern void __run_task_queue(task_queue *list);
2275+
2276+static inline void run_task_queue(task_queue *list)
2277+{
2278+ if (TQ_ACTIVE(*list))
2279+ __run_task_queue(list);
2280+}
2281+
2282+#endif /* _LINUX_TQUEUE_H */
2283+
2284+#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
2285+#define _LINUX_TQUEUE_H_INLINES
2286+/*
2287 * Queue a task on a tq. Return non-zero if it was successfully
2288 * added.
2289 */
2290@@ -109,17 +125,4 @@
2291 }
2292 return ret;
2293 }
2294-
2295-/*
2296- * Call all "bottom halfs" on a given list.
2297- */
2298-
2299-extern void __run_task_queue(task_queue *list);
2300-
2301-static inline void run_task_queue(task_queue *list)
2302-{
2303- if (TQ_ACTIVE(*list))
2304- __run_task_queue(list);
2305-}
2306-
2307-#endif /* _LINUX_TQUEUE_H */
2308+#endif
2309diff -urN linux-2.4.19-ac5/kernel/exit.c linux/kernel/exit.c
2310--- linux-2.4.19-ac5/kernel/exit.c Thu Aug 1 15:54:34 2002
2311+++ linux/kernel/exit.c Fri Aug 2 10:28:20 2002
2312@@ -327,8 +327,8 @@
2313 /* more a memory barrier than a real lock */
2314 task_lock(tsk);
2315 tsk->mm = NULL;
2316- task_unlock(tsk);
2317 enter_lazy_tlb(mm, current, smp_processor_id());
2318+ task_unlock(tsk);
2319 mmput(mm);
2320 }
2321 }
2322@@ -449,6 +449,11 @@
2323 tsk->flags |= PF_EXITING;
2324 del_timer_sync(&tsk->real_timer);
2325
2326+ if (unlikely(preempt_get_count()))
2327+ printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
2328+ current->comm, current->pid,
2329+ preempt_get_count());
2330+
2331 fake_volatile:
2332 #ifdef CONFIG_BSD_PROCESS_ACCT
2333 acct_process(code);
2334diff -urN linux-2.4.19-ac5/kernel/fork.c linux/kernel/fork.c
2335--- linux-2.4.19-ac5/kernel/fork.c Thu Aug 1 15:54:34 2002
2336+++ linux/kernel/fork.c Fri Aug 2 10:28:20 2002
2337@@ -629,6 +629,13 @@
2338 if (p->binfmt && p->binfmt->module)
2339 __MOD_INC_USE_COUNT(p->binfmt->module);
2340
2341+#ifdef CONFIG_PREEMPT
2342+ /*
2343+ * Continue with preemption disabled as part of the context
2344+ * switch, so start with preempt_count set to 1.
2345+ */
2346+ p->preempt_count = 1;
2347+#endif
2348 p->did_exec = 0;
2349 p->swappable = 0;
2350 p->state = TASK_UNINTERRUPTIBLE;
2351diff -urN linux-2.4.19-ac5/kernel/ksyms.c linux/kernel/ksyms.c
2352--- linux-2.4.19-ac5/kernel/ksyms.c Thu Aug 1 15:54:34 2002
2353+++ linux/kernel/ksyms.c Fri Aug 2 10:28:20 2002
2354@@ -446,6 +446,9 @@
2355 EXPORT_SYMBOL(interruptible_sleep_on);
2356 EXPORT_SYMBOL(interruptible_sleep_on_timeout);
2357 EXPORT_SYMBOL(schedule);
2358+#ifdef CONFIG_PREEMPT
2359+EXPORT_SYMBOL(preempt_schedule);
2360+#endif
2361 EXPORT_SYMBOL(schedule_timeout);
2362 EXPORT_SYMBOL(sys_sched_yield);
2363 EXPORT_SYMBOL(jiffies);
2364diff -urN linux-2.4.19-ac5/kernel/sched.c linux/kernel/sched.c
2365--- linux-2.4.19-ac5/kernel/sched.c Thu Aug 1 15:54:34 2002
2366+++ linux/kernel/sched.c Fri Aug 2 10:31:01 2002
2367@@ -491,7 +491,7 @@
2368 task_lock(prev);
2369 task_release_cpu(prev);
2370 mb();
2371- if (prev->state == TASK_RUNNING)
2372+ if (task_on_runqueue(prev))
2373 goto needs_resched;
2374
2375 out_unlock:
2376@@ -521,7 +521,7 @@
2377 goto out_unlock;
2378
2379 spin_lock_irqsave(&runqueue_lock, flags);
2380- if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
2381+ if (task_on_runqueue(prev) && !task_has_cpu(prev))
2382 reschedule_idle(prev);
2383 spin_unlock_irqrestore(&runqueue_lock, flags);
2384 goto out_unlock;
2385@@ -534,6 +534,7 @@
2386 asmlinkage void schedule_tail(struct task_struct *prev)
2387 {
2388 __schedule_tail(prev);
2389+ preempt_enable();
2390 }
2391
2392 /*
2393@@ -553,9 +554,10 @@
2394 struct list_head *tmp;
2395 int this_cpu, c;
2396
2397-
2398 spin_lock_prefetch(&runqueue_lock);
2399
2400+ preempt_disable();
2401+
2402 BUG_ON(!current->active_mm);
2403 need_resched_back:
2404 prev = current;
2405@@ -583,6 +585,14 @@
2406 move_last_runqueue(prev);
2407 }
2408
2409+#ifdef CONFIG_PREEMPT
2410+ /*
2411+ * entering from preempt_schedule, off a kernel preemption,
2412+ * go straight to picking the next task.
2413+ */
2414+ if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
2415+ goto treat_like_run;
2416+#endif
2417 switch (prev->state) {
2418 case TASK_INTERRUPTIBLE:
2419 if (signal_pending(prev)) {
2420@@ -593,6 +603,9 @@
2421 del_from_runqueue(prev);
2422 case TASK_RUNNING:;
2423 }
2424+#ifdef CONFIG_PREEMPT
2425+ treat_like_run:
2426+#endif
2427 prev->need_resched = 0;
2428
2429 /*
2430@@ -701,9 +714,31 @@
2431 reacquire_kernel_lock(current);
2432 if (current->need_resched)
2433 goto need_resched_back;
2434+ preempt_enable_no_resched();
2435 return;
2436 }
2437
2438+#ifdef CONFIG_PREEMPT
2439+/*
2440+ * this is is the entry point to schedule() from in-kernel preemption
2441+ */
2442+asmlinkage void preempt_schedule(void)
2443+{
2444+ if (unlikely(irqs_disabled()))
2445+ return;
2446+
2447+need_resched:
2448+ current->preempt_count += PREEMPT_ACTIVE;
2449+ schedule();
2450+ current->preempt_count -= PREEMPT_ACTIVE;
2451+
2452+ /* we could miss a preemption opportunity between schedule and now */
2453+ barrier();
2454+ if (unlikely(current->need_resched))
2455+ goto need_resched;
2456+}
2457+#endif /* CONFIG_PREEMPT */
2458+
2459 /*
2460 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
2461 * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
2462@@ -1312,6 +1347,13 @@
2463 sched_data->curr = current;
2464 sched_data->last_schedule = get_cycles();
2465 clear_bit(current->processor, &wait_init_idle);
2466+#ifdef CONFIG_PREEMPT
2467+ /*
2468+ * fix up the preempt_count for non-CPU0 idle threads
2469+ */
2470+ if (current->processor)
2471+ current->preempt_count = 0;
2472+#endif
2473 }
2474
2475 extern void init_timervecs (void);
2476diff -urN linux-2.4.19-ac5/lib/dec_and_lock.c linux/lib/dec_and_lock.c
2477--- linux-2.4.19-ac5/lib/dec_and_lock.c Thu Aug 1 15:54:34 2002
2478+++ linux/lib/dec_and_lock.c Fri Aug 2 10:28:20 2002
2479@@ -1,5 +1,6 @@
2480 #include <linux/module.h>
2481 #include <linux/spinlock.h>
2482+#include <linux/sched.h>
2483 #include <asm/atomic.h>
2484
2485 /*
2486diff -urN linux-2.4.19-ac5/mm/slab.c linux/mm/slab.c
2487--- linux-2.4.19-ac5/mm/slab.c Thu Aug 1 15:54:34 2002
2488+++ linux/mm/slab.c Fri Aug 2 10:28:21 2002
2489@@ -49,7 +49,8 @@
2490 * constructors and destructors are called without any locking.
2491 * Several members in kmem_cache_t and slab_t never change, they
2492 * are accessed without any locking.
2493- * The per-cpu arrays are never accessed from the wrong cpu, no locking.
2494+ * The per-cpu arrays are never accessed from the wrong cpu, no locking,
2495+ * and local interrupts are disabled so slab code is preempt-safe.
2496 * The non-constant members are protected with a per-cache irq spinlock.
2497 *
2498 * Further notes from the original documentation:
2499diff -urN linux-2.4.19-ac5/net/core/dev.c linux/net/core/dev.c
2500--- linux-2.4.19-ac5/net/core/dev.c Thu Aug 1 15:54:41 2002
2501+++ linux/net/core/dev.c Fri Aug 2 10:28:21 2002
2502@@ -1034,9 +1034,15 @@
2503 int cpu = smp_processor_id();
2504
2505 if (dev->xmit_lock_owner != cpu) {
2506+ /*
2507+ * The spin_lock effectivly does a preempt lock, but
2508+ * we are about to drop that...
2509+ */
2510+ preempt_disable();
2511 spin_unlock(&dev->queue_lock);
2512 spin_lock(&dev->xmit_lock);
2513 dev->xmit_lock_owner = cpu;
2514+ preempt_enable();
2515
2516 if (!netif_queue_stopped(dev)) {
2517 if (netdev_nit)
2518diff -urN linux-2.4.19-ac5/net/core/skbuff.c linux/net/core/skbuff.c
2519--- linux-2.4.19-ac5/net/core/skbuff.c Thu Aug 1 15:54:41 2002
2520+++ linux/net/core/skbuff.c Fri Aug 2 10:28:21 2002
2521@@ -111,33 +111,37 @@
2522
2523 static __inline__ struct sk_buff *skb_head_from_pool(void)
2524 {
2525- struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
2526+ struct sk_buff_head *list;
2527+ struct sk_buff *skb = NULL;
2528+ unsigned long flags;
2529
2530- if (skb_queue_len(list)) {
2531- struct sk_buff *skb;
2532- unsigned long flags;
2533+ local_irq_save(flags);
2534
2535- local_irq_save(flags);
2536+ list = &skb_head_pool[smp_processor_id()].list;
2537+
2538+ if (skb_queue_len(list))
2539 skb = __skb_dequeue(list);
2540- local_irq_restore(flags);
2541- return skb;
2542- }
2543- return NULL;
2544+
2545+ local_irq_restore(flags);
2546+ return skb;
2547 }
2548
2549 static __inline__ void skb_head_to_pool(struct sk_buff *skb)
2550 {
2551- struct sk_buff_head *list = &skb_head_pool[smp_processor_id()].list;
2552+ struct sk_buff_head *list;
2553+ unsigned long flags;
2554
2555- if (skb_queue_len(list) < sysctl_hot_list_len) {
2556- unsigned long flags;
2557+ local_irq_save(flags);
2558+ list = &skb_head_pool[smp_processor_id()].list;
2559
2560- local_irq_save(flags);
2561+ if (skb_queue_len(list) < sysctl_hot_list_len) {
2562 __skb_queue_head(list, skb);
2563 local_irq_restore(flags);
2564
2565 return;
2566 }
2567+
2568+ local_irq_restore(flags);
2569 kmem_cache_free(skbuff_head_cache, skb);
2570 }
2571
2572diff -urN linux-2.4.19-ac5/net/socket.c linux/net/socket.c
2573--- linux-2.4.19-ac5/net/socket.c Thu Aug 1 15:54:41 2002
2574+++ linux/net/socket.c Fri Aug 2 10:28:21 2002
2575@@ -132,7 +132,7 @@
2576
2577 static struct net_proto_family *net_families[NPROTO];
2578
2579-#ifdef CONFIG_SMP
2580+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
2581 static atomic_t net_family_lockct = ATOMIC_INIT(0);
2582 static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
2583
2584diff -urN linux-2.4.19-ac5/net/sunrpc/pmap_clnt.c linux/net/sunrpc/pmap_clnt.c
2585--- linux-2.4.19-ac5/net/sunrpc/pmap_clnt.c Thu Aug 1 15:54:41 2002
2586+++ linux/net/sunrpc/pmap_clnt.c Fri Aug 2 10:28:21 2002
2587@@ -12,6 +12,7 @@
2588 #include <linux/config.h>
2589 #include <linux/types.h>
2590 #include <linux/socket.h>
2591+#include <linux/sched.h>
2592 #include <linux/kernel.h>
2593 #include <linux/errno.h>
2594 #include <linux/uio.h>
This page took 4.615311 seconds and 4 git commands to generate.