]> git.pld-linux.org Git - packages/kernel.git/blob - preempt-kernel-rml-2.4.17-1.patch
- added CONFIG_PDC202XXX_FORCE, for new ide drivers
[packages/kernel.git] / preempt-kernel-rml-2.4.17-1.patch
1 diff -urN linux-2.4.17/CREDITS linux/CREDITS
2 --- linux-2.4.17/CREDITS        Wed Dec 19 03:17:30 2001
3 +++ linux/CREDITS       Fri Dec 21 00:41:25 2001
4 @@ -971,8 +971,8 @@
5  
6  N: Nigel Gamble
7  E: nigel@nrg.org
8 -E: nigel@sgi.com
9  D: Interrupt-driven printer driver
10 +D: Preemptible kernel
11  S: 120 Alley Way
12  S: Mountain View, California 94040
13  S: USA
14 diff -urN linux-2.4.17/Documentation/Configure.help linux/Documentation/Configure.help
15 --- linux-2.4.17/Documentation/Configure.help   Wed Dec 19 03:18:34 2001
16 +++ linux/Documentation/Configure.help  Fri Dec 21 00:41:25 2001
17 @@ -266,6 +266,19 @@
18    If you have a system with several CPUs, you do not need to say Y
19    here: the local APIC will be used automatically.
20  
21 +Preemptible Kernel
22 +CONFIG_PREEMPT
23 +  This option reduces the latency of the kernel when reacting to
24 +  real-time or interactive events by allowing a low priority process to
25 +  be preempted even if it is in kernel mode executing a system call.
26 +  This allows applications to run more reliably even when the system is
27 +  under load due to other, lower priority, processes.
28 +
29 +  Say Y here if you are building a kernel for a desktop system, embedded
30 +  system or real-time system.  Say N if you are building a kernel for a
31 +  system where throughput is more important than interactive response,
32 +  such as a server system.  Say N if you are unsure.
33 +
34  Kernel math emulation
35  CONFIG_MATH_EMULATION
36    Linux can emulate a math coprocessor (used for floating point
37 diff -urN linux-2.4.17/Documentation/preempt-locking.txt linux/Documentation/preempt-locking.txt
38 --- linux-2.4.17/Documentation/preempt-locking.txt      Wed Dec 31 19:00:00 1969
39 +++ linux/Documentation/preempt-locking.txt     Fri Dec 21 00:41:25 2001
40 @@ -0,0 +1,94 @@
41 +                 Proper Locking Under a Preemptible Kernel:
42 +                      Keeping Kernel Code Preempt-Safe
43 +                         Robert Love <rml@tech9.net>
44 +                          Last Updated: 21 Oct 2001
45 +
46 +
47 +INTRODUCTION
48 +
49 +
50 +A preemptible kernel creates new locking issues.  The issues are the same as
51 +those under SMP: concurrency and reentrancy.  Thankfully, the Linux preemptible
52 +kernel model leverages existing SMP locking mechanisms.  Thus, the kernel
53 +requires explicit additional locking for very few additional situations.
54 +
55 +This document is for all kernel hackers.  Developing code in the kernel
56 +requires protecting these situations.  As you will see, these situations would 
57 +normally require a lock, where they not per-CPU.
58
59 +
60 +RULE #1: Per-CPU data structures need explicit protection
61 +
62 +
63 +Two similar problems arise. An example code snippet:
64 +
65 +       struct this_needs_locking tux[NR_CPUS];
66 +       tux[smp_processor_id()] = some_value;
67 +       /* task is preempted here... */
68 +       something = tux[smp_processor_id()];
69 +
70 +First, since the data is per-CPU, it may not have explicit SMP locking, but
71 +require it otherwise.  Second, when a preempted task is finally rescheduled,
72 +the previous value of smp_processor_id may not equal the current.  You must
73 +protect these situations by disabling preemption around them.
74 +
75 +
76 +RULE #2: CPU state must be protected.
77 +
78 +
79 +Under preemption, the state of the CPU must be protected.  This is arch-
80 +dependent, but includes CPU structures and state not preserved over a context
81 +switch.  For example, on x86, entering and exiting FPU mode is now a critical
82 +section that must occur while preemption is disabled.  Think what would happen
83 +if the kernel is executing a floating-point instruction and is then preempted.
84 +Remember, the kernel does not save FPU state except for user tasks.  Therefore,
85 +upon preemption, the FPU registers will be sold to the lowest bidder.  Thus,
86 +preemption must be disabled around such regions.i
87 +
88 +Note, some FPU functions are already explicitly preempt safe.  For example,
89 +kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
90 +However, math_state_restore must be called with preemption disabled.
91 +
92 +
93 +SOLUTION
94 +
95 +
96 +Data protection under preemption is achieved by disabling preemption for the
97 +duration of the critical region.
98 +
99 +preempt_enable()               decrement the preempt counter
100 +preempt_disable()              increment the preempt counter
101 +preempt_enable_no_resched()    decrement, but do not immediately preempt
102 +
103 +The functions are nestable.  In other words, you can call preempt_disable
104 +n-times in a code path, and preemption will not be reenabled until the n-th
105 +call to preempt_enable.  The preempt statements define to nothing if
106 +preemption is not enabled.
107 +
108 +Note that you do not need to explicitly prevent preemption if you are holding
109 +any locks or interrupts are disabled, since preemption is implicitly disabled
110 +in those cases.
111 +
112 +Example:
113 +
114 +       cpucache_t *cc; /* this is per-CPU */
115 +       preempt_disable();
116 +       cc = cc_data(searchp);
117 +       if (cc && cc->avail) {
118 +               __free_block(searchp, cc_entry(cc), cc->avail);
119 +               cc->avail = 0;
120 +       }
121 +       preempt_enable();
122 +       return 0;
123 +
124 +Notice how the preemption statements must encompass every reference of the
125 +critical variables.  Another example:
126 +
127 +       int buf[NR_CPUS];
128 +       set_cpu_val(buf);
129 +       if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n");
130 +       spin_lock(&buf_lock);
131 +       /* ... */
132 +
133 +This code is not preempt-safe, but see how easily we can fix it by simply
134 +moving the spin_lock up two lines.
135 diff -urN linux-2.4.17/MAINTAINERS linux/MAINTAINERS
136 --- linux-2.4.17/MAINTAINERS    Wed Dec 19 03:17:41 2001
137 +++ linux/MAINTAINERS   Fri Dec 21 00:41:25 2001
138 @@ -1242,6 +1242,14 @@
139  M:     mostrows@styx.uwaterloo.ca
140  S:     Maintained
141  
142 +PREEMPTIBLE KERNEL
143 +P:     Robert M. Love
144 +M:     rml@tech9.net
145 +L:     linux-kernel@vger.kernel.org
146 +L:     kpreempt-tech@lists.sourceforge.net
147 +W:     http://tech9.net/rml/linux
148 +S:     Maintained
149 +
150  PROMISE DC4030 CACHING DISK CONTROLLER DRIVER
151  P:     Peter Denison
152  M:     promise@pnd-pc.demon.co.uk
153 diff -urN linux-2.4.17/arch/arm/config.in linux/arch/arm/config.in
154 --- linux-2.4.17/arch/arm/config.in     Wed Dec 19 03:18:28 2001
155 +++ linux/arch/arm/config.in    Fri Dec 21 00:41:25 2001
156 @@ -437,6 +437,7 @@
157  if [ "$CONFIG_CPU_32" = "y" -a "$CONFIG_ARCH_EBSA110" != "y" ]; then
158     bool 'Kernel-mode alignment trap handler' CONFIG_ALIGNMENT_TRAP
159  fi
160 +dep_bool 'Preemptible Kernel (experimental)' CONFIG_PREEMPT $CONFIG_CPU_32 $CONFIG_EXPERIMENTAL
161  endmenu
162  
163  source drivers/parport/Config.in
164 diff -urN linux-2.4.17/arch/arm/kernel/entry-armv.S linux/arch/arm/kernel/entry-armv.S
165 --- linux-2.4.17/arch/arm/kernel/entry-armv.S   Wed Dec 19 03:18:28 2001
166 +++ linux/arch/arm/kernel/entry-armv.S  Fri Dec 21 00:41:25 2001
167 @@ -672,6 +672,12 @@
168                 add     r4, sp, #S_SP
169                 mov     r6, lr
170                 stmia   r4, {r5, r6, r7, r8, r9}        @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
171 +#ifdef CONFIG_PREEMPT
172 +               get_current_task r9
173 +               ldr     r8, [r9, #TSK_PREEMPT]
174 +               add     r8, r8, #1
175 +               str     r8, [r9, #TSK_PREEMPT]
176 +#endif
177  1:             get_irqnr_and_base r0, r6, r5, lr
178                 movne   r1, sp
179                 @
180 @@ -679,6 +685,25 @@
181                 @
182                 adrsvc  ne, lr, 1b
183                 bne     do_IRQ
184 +#ifdef CONFIG_PREEMPT
185 +2:             ldr     r8, [r9, #TSK_PREEMPT]
186 +               subs    r8, r8, #1
187 +               bne     3f
188 +               ldr     r7, [r9, #TSK_NEED_RESCHED]
189 +               teq     r7, #0
190 +               beq     3f
191 +               ldr     r6, .LCirqstat
192 +               ldr     r0, [r6, #IRQSTAT_BH_COUNT]
193 +               teq     r0, #0
194 +               bne     3f
195 +               mov     r0, #MODE_SVC
196 +               msr     cpsr_c, r0              @ enable interrupts
197 +               bl      SYMBOL_NAME(preempt_schedule)
198 +               mov     r0, #I_BIT | MODE_SVC
199 +               msr     cpsr_c, r0              @ disable interrupts
200 +               b       2b
201 +3:             str     r8, [r9, #TSK_PREEMPT]
202 +#endif
203                 ldr     r0, [sp, #S_PSR]                @ irqs are already disabled
204                 msr     spsr, r0
205                 ldmia   sp, {r0 - pc}^                  @ load r0 - pc, cpsr
206 @@ -736,6 +761,9 @@
207  .LCprocfns:    .word   SYMBOL_NAME(processor)
208  #endif
209  .LCfp:         .word   SYMBOL_NAME(fp_enter)
210 +#ifdef CONFIG_PREEMPT
211 +.LCirqstat:    .word   SYMBOL_NAME(irq_stat)
212 +#endif
213  
214                 irq_prio_table
215  
216 @@ -775,6 +803,12 @@
217                 stmdb   r8, {sp, lr}^
218                 alignment_trap r4, r7, __temp_irq
219                 zero_fp
220 +               get_current_task tsk
221 +#ifdef CONFIG_PREEMPT
222 +               ldr     r0, [tsk, #TSK_PREEMPT]
223 +               add     r0, r0, #1
224 +               str     r0, [tsk, #TSK_PREEMPT]
225 +#endif
226  1:             get_irqnr_and_base r0, r6, r5, lr
227                 movne   r1, sp
228                 adrsvc  ne, lr, 1b
229 @@ -782,8 +816,12 @@
230                 @ routine called with r0 = irq number, r1 = struct pt_regs *
231                 @
232                 bne     do_IRQ
233 +#ifdef CONFIG_PREEMPT
234 +               ldr     r0, [tsk, #TSK_PREEMPT]
235 +               sub     r0, r0, #1
236 +               str     r0, [tsk, #TSK_PREEMPT]
237 +#endif
238                 mov     why, #0
239 -               get_current_task tsk
240                 b       ret_to_user
241  
242                 .align  5
243 diff -urN linux-2.4.17/arch/arm/tools/getconstants.c linux/arch/arm/tools/getconstants.c
244 --- linux-2.4.17/arch/arm/tools/getconstants.c  Wed Dec 19 03:18:29 2001
245 +++ linux/arch/arm/tools/getconstants.c Fri Dec 21 00:41:25 2001
246 @@ -13,6 +13,7 @@
247  
248  #include <asm/pgtable.h>
249  #include <asm/uaccess.h>
250 +#include <asm/hardirq.h>
251  
252  /*
253   * Make sure that the compiler and target are compatible.
254 @@ -39,6 +40,11 @@
255  DEFN("TSS_SAVE",               OFF_TSK(thread.save));
256  DEFN("TSS_FPESAVE",            OFF_TSK(thread.fpstate.soft.save));
257  
258 +#ifdef CONFIG_PREEMPT
259 +DEFN("TSK_PREEMPT",            OFF_TSK(preempt_count));
260 +DEFN("IRQSTAT_BH_COUNT",       (unsigned long)&(((irq_cpustat_t *)0)->__local_bh_count));
261 +#endif
262 +
263  #ifdef CONFIG_CPU_32
264  DEFN("TSS_DOMAIN",             OFF_TSK(thread.domain));
265  
266 diff -urN linux-2.4.17/arch/i386/config.in linux/arch/i386/config.in
267 --- linux-2.4.17/arch/i386/config.in    Wed Dec 19 03:18:08 2001
268 +++ linux/arch/i386/config.in   Fri Dec 21 00:41:25 2001
269 @@ -176,6 +176,7 @@
270  bool 'Math emulation' CONFIG_MATH_EMULATION
271  bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
272  bool 'Symmetric multi-processing support' CONFIG_SMP
273 +bool 'Preemptible Kernel' CONFIG_PREEMPT
274  if [ "$CONFIG_SMP" != "y" ]; then
275     bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
276     dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
277 @@ -189,9 +190,12 @@
278     bool 'Multiquad NUMA system' CONFIG_MULTIQUAD
279  fi
280  
281 -if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
282 -   define_bool CONFIG_HAVE_DEC_LOCK y
283 +if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then
284 +   if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then
285 +      define_bool CONFIG_HAVE_DEC_LOCK y
286 +   fi
287  fi
288 +
289  endmenu
290  
291  mainmenu_option next_comment
292 diff -urN linux-2.4.17/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
293 --- linux-2.4.17/arch/i386/kernel/entry.S       Wed Dec 19 03:18:09 2001
294 +++ linux/arch/i386/kernel/entry.S      Fri Dec 21 00:41:25 2001
295 @@ -71,7 +71,7 @@
296   * these are offsets into the task-struct.
297   */
298  state          =  0
299 -flags          =  4
300 +preempt_count  =  4
301  sigpending     =  8
302  addr_limit     = 12
303  exec_domain    = 16
304 @@ -79,8 +79,28 @@
305  tsk_ptrace     = 24
306  processor      = 52
307  
308 +        /* These are offsets into the irq_stat structure
309 +         * There is one per cpu and it is aligned to 32
310 +         * byte boundry (we put that here as a shift count)
311 +         */
312 +irq_array_shift                 = CONFIG_X86_L1_CACHE_SHIFT
313 +
314 +irq_stat_local_irq_count        = 4
315 +irq_stat_local_bh_count         = 8
316 +
317  ENOSYS = 38
318  
319 +#ifdef CONFIG_SMP
320 +#define GET_CPU_INDX   movl processor(%ebx),%eax;  \
321 +                        shll $irq_array_shift,%eax
322 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \
323 +                             GET_CPU_INDX
324 +#define CPU_INDX (,%eax)
325 +#else
326 +#define GET_CPU_INDX
327 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx)
328 +#define CPU_INDX
329 +#endif
330  
331  #define SAVE_ALL \
332         cld; \
333 @@ -247,12 +267,30 @@
334         ALIGN
335  ENTRY(ret_from_intr)
336         GET_CURRENT(%ebx)
337 +#ifdef CONFIG_PREEMPT
338 +       cli
339 +       decl preempt_count(%ebx)
340 +#endif
341  ret_from_exception:
342         movl EFLAGS(%esp),%eax          # mix EFLAGS and CS
343         movb CS(%esp),%al
344         testl $(VM_MASK | 3),%eax       # return to VM86 mode or non-supervisor?
345         jne ret_from_sys_call
346 +#ifdef CONFIG_PREEMPT
347 +       cmpl $0,preempt_count(%ebx)
348 +       jnz restore_all
349 +       cmpl $0,need_resched(%ebx)
350 +       jz restore_all
351 +       movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
352 +       addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
353 +       jnz restore_all
354 +       incl preempt_count(%ebx)
355 +       sti
356 +       call SYMBOL_NAME(preempt_schedule)
357 +       jmp ret_from_intr
358 +#else
359         jmp restore_all
360 +#endif
361  
362         ALIGN
363  reschedule:
364 @@ -289,6 +327,9 @@
365         GET_CURRENT(%ebx)
366         call *%edi
367         addl $8,%esp
368 +#ifdef CONFIG_PREEMPT
369 +       cli
370 +#endif
371         jmp ret_from_exception
372  
373  ENTRY(coprocessor_error)
374 @@ -308,12 +349,18 @@
375         movl %cr0,%eax
376         testl $0x4,%eax                 # EM (math emulation bit)
377         jne device_not_available_emulate
378 +#ifdef CONFIG_PREEMPT
379 +       cli
380 +#endif
381         call SYMBOL_NAME(math_state_restore)
382         jmp ret_from_exception
383  device_not_available_emulate:
384         pushl $0                # temporary storage for ORIG_EIP
385         call  SYMBOL_NAME(math_emulate)
386         addl $4,%esp
387 +#ifdef CONFIG_PREEMPT
388 +       cli
389 +#endif
390         jmp ret_from_exception
391  
392  ENTRY(debug)
393 diff -urN linux-2.4.17/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c
394 --- linux-2.4.17/arch/i386/kernel/i387.c        Wed Dec 19 03:18:12 2001
395 +++ linux/arch/i386/kernel/i387.c       Fri Dec 21 00:41:25 2001
396 @@ -10,6 +10,7 @@
397  
398  #include <linux/config.h>
399  #include <linux/sched.h>
400 +#include <linux/spinlock.h>
401  #include <asm/processor.h>
402  #include <asm/i387.h>
403  #include <asm/math_emu.h>
404 @@ -65,6 +66,8 @@
405  {
406         struct task_struct *tsk = current;
407  
408 +       preempt_disable();
409 +       
410         if (tsk->flags & PF_USEDFPU) {
411                 __save_init_fpu(tsk);
412                 return;
413 diff -urN linux-2.4.17/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
414 --- linux-2.4.17/arch/i386/kernel/traps.c       Wed Dec 19 03:18:09 2001
415 +++ linux/arch/i386/kernel/traps.c      Fri Dec 21 00:41:25 2001
416 @@ -697,6 +697,11 @@
417   */
418  asmlinkage void math_state_restore(struct pt_regs regs)
419  {
420 +       /*
421 +        * CONFIG_PREEMPT
422 +        * Must be called with preemption disabled
423 +        */
424 +
425         __asm__ __volatile__("clts");           /* Allow maths ops (or we recurse) */
426  
427         if (current->used_math) {
428 diff -urN linux-2.4.17/arch/i386/lib/dec_and_lock.c linux/arch/i386/lib/dec_and_lock.c
429 --- linux-2.4.17/arch/i386/lib/dec_and_lock.c   Wed Dec 19 03:18:09 2001
430 +++ linux/arch/i386/lib/dec_and_lock.c  Fri Dec 21 00:41:25 2001
431 @@ -8,6 +8,7 @@
432   */
433  
434  #include <linux/spinlock.h>
435 +#include <linux/sched.h>
436  #include <asm/atomic.h>
437  
438  int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
439 diff -urN linux-2.4.17/arch/sh/config.in linux/arch/sh/config.in
440 --- linux-2.4.17/arch/sh/config.in      Wed Dec 19 03:18:31 2001
441 +++ linux/arch/sh/config.in     Fri Dec 21 00:41:25 2001
442 @@ -124,6 +124,8 @@
443     hex 'Physical memory start address' CONFIG_MEMORY_START 08000000
444     hex 'Physical memory size' CONFIG_MEMORY_SIZE 00400000
445  fi
446 +# Preemptible kernel feature
447 +bool 'Preemptible Kernel' CONFIG_PREEMPT
448  endmenu
449  
450  if [ "$CONFIG_SH_HP690" = "y" ]; then
451 diff -urN linux-2.4.17/arch/sh/kernel/entry.S linux/arch/sh/kernel/entry.S
452 --- linux-2.4.17/arch/sh/kernel/entry.S Wed Dec 19 03:18:31 2001
453 +++ linux/arch/sh/kernel/entry.S        Fri Dec 21 00:41:25 2001
454 @@ -60,10 +60,18 @@
455  /*
456   * These are offsets into the task-struct.
457   */
458 -flags          =  4
459 +preempt_count  =  4
460  sigpending     =  8
461  need_resched   = 20
462  tsk_ptrace     = 24
463 +flags          = 84
464 +
465 +/*
466 + * And these offsets are into irq_stat.
467 + * (Find irq_cpustat_t in asm-sh/hardirq.h)
468 + */
469 +local_irq_count =  8
470 +local_bh_count  = 12
471  
472  PT_TRACESYS  = 0x00000002
473  PF_USEDFPU   = 0x00100000
474 @@ -143,7 +151,7 @@
475         mov.l   __INV_IMASK, r11;       \
476         stc     sr, r10;                \
477         and     r11, r10;               \
478 -       stc     k_g_imask, r11; \
479 +       stc     k_g_imask, r11;         \
480         or      r11, r10;               \
481         ldc     r10, sr
482  
483 @@ -304,8 +312,8 @@
484         mov.l   @(tsk_ptrace,r0), r0    ! Is current PTRACE_SYSCALL'd?
485         mov     #PT_TRACESYS, r1
486         tst     r1, r0
487 -       bt      ret_from_syscall
488 -       bra     syscall_ret_trace
489 +       bf      syscall_ret_trace
490 +       bra     ret_from_syscall
491          nop     
492  
493         .align  2
494 @@ -505,8 +513,6 @@
495         .long   syscall_ret_trace
496  __syscall_ret:
497         .long   syscall_ret
498 -__INV_IMASK:
499 -       .long   0xffffff0f      ! ~(IMASK)
500  
501  
502         .align  2
503 @@ -518,7 +524,84 @@
504         .align  2
505  1:     .long   SYMBOL_NAME(schedule)
506  
507 +#ifdef CONFIG_PREEMPT  
508 +       !
509 +       ! Returning from interrupt during kernel mode: check if
510 +       ! preempt_schedule should be called. If need_resched flag
511 +       ! is set, preempt_count is zero, and we're not currently
512 +       ! in an interrupt handler (local irq or bottom half) then
513 +       ! call preempt_schedule. 
514 +       !
515 +       ! Increment preempt_count to prevent a nested interrupt
516 +       ! from reentering preempt_schedule, then decrement after
517 +       ! and drop through to regular interrupt return which will
518 +       ! jump back and check again in case such an interrupt did
519 +       ! come in (and didn't preempt due to preempt_count).
520 +       !
521 +       ! NOTE: because we just checked that preempt_count was
522 +       ! zero before getting to the call, can't we use immediate
523 +       ! values (1 and 0) rather than inc/dec? Also, rather than
524 +       ! drop through to ret_from_irq, we already know this thread
525 +       ! is kernel mode, can't we go direct to ret_from_kirq? In
526 +       ! fact, with proper interrupt nesting and so forth could
527 +       ! the loop simply be on the need_resched w/o checking the
528 +       ! other stuff again? Optimize later...
529 +       !
530 +       .align  2
531 +ret_from_kirq:
532 +       ! Nonzero preempt_count prevents scheduling
533 +       stc     k_current, r1
534 +       mov.l   @(preempt_count,r1), r0
535 +       cmp/eq  #0, r0
536 +       bf      restore_all
537 +       ! Zero need_resched prevents scheduling
538 +       mov.l   @(need_resched,r1), r0
539 +       cmp/eq  #0, r0
540 +       bt      restore_all
541 +       ! If in_interrupt(), don't schedule
542 +       mov.l   __irq_stat, r1
543 +       mov.l   @(local_irq_count,r1), r0
544 +       mov.l   @(local_bh_count,r1), r1
545 +       or      r1, r0
546 +       cmp/eq  #0, r0
547 +       bf      restore_all
548 +       ! Allow scheduling using preempt_schedule
549 +       ! Adjust preempt_count and SR as needed.
550 +       stc     k_current, r1
551 +       mov.l   @(preempt_count,r1), r0 ! Could replace this ...
552 +       add     #1, r0                  ! ... and this w/mov #1?
553 +       mov.l   r0, @(preempt_count,r1)
554 +       STI()
555 +       mov.l   __preempt_schedule, r0
556 +       jsr     @r0
557 +        nop    
558 +       /* CLI */
559 +       stc     sr, r0
560 +       or      #0xf0, r0
561 +       ldc     r0, sr
562 +       !
563 +       stc     k_current, r1
564 +       mov.l   @(preempt_count,r1), r0 ! Could replace this ...
565 +       add     #-1, r0                 ! ... and this w/mov #0?
566 +       mov.l   r0, @(preempt_count,r1)
567 +       ! Maybe should bra ret_from_kirq, or loop over need_resched?
568 +       ! For now, fall through to ret_from_irq again...
569 +#endif /* CONFIG_PREEMPT */
570 +       
571  ret_from_irq:
572 +       mov     #OFF_SR, r0
573 +       mov.l   @(r0,r15), r0   ! get status register
574 +       shll    r0
575 +       shll    r0              ! kernel space?
576 +#ifndef CONFIG_PREEMPT
577 +       bt      restore_all     ! Yes, it's from kernel, go back soon
578 +#else /* CONFIG_PREEMPT */
579 +       bt      ret_from_kirq   ! From kernel: maybe preempt_schedule
580 +#endif /* CONFIG_PREEMPT */
581 +       !
582 +       bra     ret_from_syscall
583 +        nop
584 +
585  ret_from_exception:
586         mov     #OFF_SR, r0
587         mov.l   @(r0,r15), r0   ! get status register
588 @@ -564,6 +647,13 @@
589         .long   SYMBOL_NAME(do_signal)
590  __irq_stat:
591         .long   SYMBOL_NAME(irq_stat)
592 +#ifdef CONFIG_PREEMPT
593 +__preempt_schedule:
594 +       .long   SYMBOL_NAME(preempt_schedule)
595 +#endif /* CONFIG_PREEMPT */    
596 +__INV_IMASK:
597 +       .long   0xffffff0f      ! ~(IMASK)
598 +
599  
600         .align 2
601  restore_all:
602 @@ -679,7 +769,7 @@
603  __fpu_prepare_fd:
604         .long   SYMBOL_NAME(fpu_prepare_fd)
605  __init_task_flags:
606 -       .long   SYMBOL_NAME(init_task_union)+4
607 +       .long   SYMBOL_NAME(init_task_union)+flags
608  __PF_USEDFPU:
609         .long   PF_USEDFPU
610  #endif
611 diff -urN linux-2.4.17/arch/sh/kernel/irq.c linux/arch/sh/kernel/irq.c
612 --- linux-2.4.17/arch/sh/kernel/irq.c   Wed Dec 19 03:18:31 2001
613 +++ linux/arch/sh/kernel/irq.c  Fri Dec 21 00:41:25 2001
614 @@ -229,6 +229,14 @@
615         struct irqaction * action;
616         unsigned int status;
617  
618 +       /*
619 +        * At this point we're now about to actually call handlers,
620 +        * and interrupts might get reenabled during them... bump
621 +        * preempt_count to prevent any preemption while the handler
622 +        * called here is pending...
623 +        */
624 +       preempt_disable();
625 +
626         /* Get IRQ number */
627         asm volatile("stc       r2_bank, %0\n\t"
628                      "shlr2     %0\n\t"
629 @@ -298,8 +306,17 @@
630         desc->handler->end(irq);
631         spin_unlock(&desc->lock);
632  
633 +
634         if (softirq_pending(cpu))
635                 do_softirq();
636 +
637 +       /*
638 +        * We're done with the handlers, interrupts should be
639 +        * currently disabled; decrement preempt_count now so
640 +        * as we return preemption may be allowed...
641 +        */
642 +       preempt_enable_no_resched();
643 +
644         return 1;
645  }
646  
647 diff -urN linux-2.4.17/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c
648 --- linux-2.4.17/drivers/ieee1394/csr.c Wed Dec 19 03:18:03 2001
649 +++ linux/drivers/ieee1394/csr.c        Fri Dec 21 00:41:25 2001
650 @@ -10,6 +10,7 @@
651   */
652  
653  #include <linux/string.h>
654 +#include <linux/sched.h>
655  
656  #include "ieee1394_types.h"
657  #include "hosts.h"
658 diff -urN linux-2.4.17/fs/adfs/map.c linux/fs/adfs/map.c
659 --- linux-2.4.17/fs/adfs/map.c  Wed Dec 19 03:17:26 2001
660 +++ linux/fs/adfs/map.c Fri Dec 21 00:41:25 2001
661 @@ -12,6 +12,7 @@
662  #include <linux/fs.h>
663  #include <linux/adfs_fs.h>
664  #include <linux/spinlock.h>
665 +#include <linux/sched.h>
666  
667  #include "adfs.h"
668  
669 diff -urN linux-2.4.17/fs/exec.c linux/fs/exec.c
670 --- linux-2.4.17/fs/exec.c      Wed Dec 19 03:17:25 2001
671 +++ linux/fs/exec.c     Fri Dec 21 00:41:25 2001
672 @@ -420,8 +420,8 @@
673                 active_mm = current->active_mm;
674                 current->mm = mm;
675                 current->active_mm = mm;
676 -               task_unlock(current);
677                 activate_mm(active_mm, mm);
678 +               task_unlock(current);
679                 mm_release();
680                 if (old_mm) {
681                         if (active_mm != old_mm) BUG();
682 diff -urN linux-2.4.17/fs/fat/cache.c linux/fs/fat/cache.c
683 --- linux-2.4.17/fs/fat/cache.c Wed Dec 19 03:17:26 2001
684 +++ linux/fs/fat/cache.c        Fri Dec 21 00:41:25 2001
685 @@ -14,6 +14,7 @@
686  #include <linux/string.h>
687  #include <linux/stat.h>
688  #include <linux/fat_cvf.h>
689 +#include <linux/sched.h>
690  
691  #if 0
692  #  define PRINTK(x) printk x
693 diff -urN linux-2.4.17/include/asm-arm/dma.h linux/include/asm-arm/dma.h
694 --- linux-2.4.17/include/asm-arm/dma.h  Wed Dec 19 03:17:35 2001
695 +++ linux/include/asm-arm/dma.h Fri Dec 21 00:41:25 2001
696 @@ -5,6 +5,7 @@
697  
698  #include <linux/config.h>
699  #include <linux/spinlock.h>
700 +#include <linux/sched.h>
701  #include <asm/system.h>
702  #include <asm/memory.h>
703  #include <asm/scatterlist.h>
704 diff -urN linux-2.4.17/include/asm-arm/hardirq.h linux/include/asm-arm/hardirq.h
705 --- linux-2.4.17/include/asm-arm/hardirq.h      Wed Dec 19 03:17:35 2001
706 +++ linux/include/asm-arm/hardirq.h     Fri Dec 21 00:41:25 2001
707 @@ -34,6 +34,7 @@
708  #define irq_exit(cpu,irq)      (local_irq_count(cpu)--)
709  
710  #define synchronize_irq()      do { } while (0)
711 +#define release_irqlock(cpu)   do { } while (0)
712  
713  #else
714  #error SMP not supported
715 diff -urN linux-2.4.17/include/asm-arm/mmu_context.h linux/include/asm-arm/mmu_context.h
716 --- linux-2.4.17/include/asm-arm/mmu_context.h  Wed Dec 19 03:17:35 2001
717 +++ linux/include/asm-arm/mmu_context.h Fri Dec 21 00:41:25 2001
718 @@ -42,6 +42,10 @@
719  switch_mm(struct mm_struct *prev, struct mm_struct *next,
720           struct task_struct *tsk, unsigned int cpu)
721  {
722 +#ifdef CONFIG_PREEMPT
723 +       if (preempt_is_disable() == 0)
724 +               BUG();
725 +#endif
726         if (prev != next) {
727                 cpu_switch_mm(next->pgd, tsk);
728                 clear_bit(cpu, &prev->cpu_vm_mask);
729 diff -urN linux-2.4.17/include/asm-arm/pgalloc.h linux/include/asm-arm/pgalloc.h
730 --- linux-2.4.17/include/asm-arm/pgalloc.h      Wed Dec 19 03:17:35 2001
731 +++ linux/include/asm-arm/pgalloc.h     Fri Dec 21 00:41:25 2001
732 @@ -57,40 +57,48 @@
733  {
734         unsigned long *ret;
735  
736 +       preempt_disable();
737         if ((ret = pgd_quicklist) != NULL) {
738                 pgd_quicklist = (unsigned long *)__pgd_next(ret);
739                 ret[1] = ret[2];
740                 clean_dcache_entry(ret + 1);
741                 pgtable_cache_size--;
742         }
743 +       preempt_enable();
744         return (pgd_t *)ret;
745  }
746  
747  static inline void free_pgd_fast(pgd_t *pgd)
748  {
749 +       preempt_disable();
750         __pgd_next(pgd) = (unsigned long) pgd_quicklist;
751         pgd_quicklist = (unsigned long *) pgd;
752         pgtable_cache_size++;
753 +       preempt_enable();
754  }
755  
756  static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
757  {
758         unsigned long *ret;
759  
760 +       preempt_disable();
761         if((ret = pte_quicklist) != NULL) {
762                 pte_quicklist = (unsigned long *)__pte_next(ret);
763                 ret[0] = 0;
764                 clean_dcache_entry(ret);
765                 pgtable_cache_size--;
766         }
767 +       preempt_enable();
768         return (pte_t *)ret;
769  }
770  
771  static inline void free_pte_fast(pte_t *pte)
772  {
773 +       preempt_disable();
774         __pte_next(pte) = (unsigned long) pte_quicklist;
775         pte_quicklist = (unsigned long *) pte;
776         pgtable_cache_size++;
777 +       preempt_enable();
778  }
779  
780  #else  /* CONFIG_NO_PGT_CACHE */
781 diff -urN linux-2.4.17/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h
782 --- linux-2.4.17/include/asm-arm/smplock.h      Wed Dec 19 03:17:35 2001
783 +++ linux/include/asm-arm/smplock.h     Fri Dec 21 00:41:25 2001
784 @@ -3,12 +3,17 @@
785   *
786   * Default SMP lock implementation
787   */
788 +#include <linux/config.h>
789  #include <linux/interrupt.h>
790  #include <linux/spinlock.h>
791  
792  extern spinlock_t kernel_flag;
793  
794 +#ifdef CONFIG_PREEMPT
795 +#define kernel_locked()                preempt_is_disable()
796 +#else
797  #define kernel_locked()                spin_is_locked(&kernel_flag)
798 +#endif
799  
800  /*
801   * Release global kernel lock and global interrupt lock
802 @@ -40,8 +45,14 @@
803   */
804  static inline void lock_kernel(void)
805  {
806 +#ifdef CONFIG_PREEMPT
807 +       if (current->lock_depth == -1)
808 +               spin_lock(&kernel_flag);
809 +       ++current->lock_depth;
810 +#else
811         if (!++current->lock_depth)
812                 spin_lock(&kernel_flag);
813 +#endif
814  }
815  
816  static inline void unlock_kernel(void)
817 diff -urN linux-2.4.17/include/asm-arm/softirq.h linux/include/asm-arm/softirq.h
818 --- linux-2.4.17/include/asm-arm/softirq.h      Wed Dec 19 03:17:35 2001
819 +++ linux/include/asm-arm/softirq.h     Fri Dec 21 00:41:25 2001
820 @@ -5,20 +5,22 @@
821  #include <asm/hardirq.h>
822  
823  #define __cpu_bh_enable(cpu) \
824 -               do { barrier(); local_bh_count(cpu)--; } while (0)
825 +               do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
826  #define cpu_bh_disable(cpu) \
827 -               do { local_bh_count(cpu)++; barrier(); } while (0)
828 +               do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
829  
830  #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
831  #define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
832  
833  #define in_softirq()           (local_bh_count(smp_processor_id()) != 0)
834  
835 -#define local_bh_enable()                                              \
836 +#define _local_bh_enable()                                             \
837  do {                                                                   \
838         unsigned int *ptr = &local_bh_count(smp_processor_id());        \
839         if (!--*ptr && ptr[-2])                                         \
840                 __asm__("bl%? __do_softirq": : : "lr");/* out of line */\
841  } while (0)
842  
843 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
844 +
845  #endif /* __ASM_SOFTIRQ_H */
846 diff -urN linux-2.4.17/include/asm-i386/hardirq.h linux/include/asm-i386/hardirq.h
847 --- linux-2.4.17/include/asm-i386/hardirq.h     Wed Dec 19 03:17:31 2001
848 +++ linux/include/asm-i386/hardirq.h    Fri Dec 21 00:41:25 2001
849 @@ -36,6 +36,8 @@
850  
851  #define synchronize_irq()      barrier()
852  
853 +#define release_irqlock(cpu)   do { } while (0)
854 +
855  #else
856  
857  #include <asm/atomic.h>
858 diff -urN linux-2.4.17/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h
859 --- linux-2.4.17/include/asm-i386/highmem.h     Wed Dec 19 03:17:31 2001
860 +++ linux/include/asm-i386/highmem.h    Fri Dec 21 00:41:25 2001
861 @@ -88,6 +88,7 @@
862         enum fixed_addresses idx;
863         unsigned long vaddr;
864  
865 +       preempt_disable();
866         if (page < highmem_start_page)
867                 return page_address(page);
868  
869 @@ -109,8 +110,10 @@
870         unsigned long vaddr = (unsigned long) kvaddr;
871         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
872  
873 -       if (vaddr < FIXADDR_START) // FIXME
874 +       if (vaddr < FIXADDR_START) { // FIXME
875 +               preempt_enable();
876                 return;
877 +       }
878  
879         if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
880                 BUG();
881 @@ -122,6 +125,8 @@
882         pte_clear(kmap_pte-idx);
883         __flush_tlb_one(vaddr);
884  #endif
885 +
886 +       preempt_enable();
887  }
888  
889  #endif /* __KERNEL__ */
890 diff -urN linux-2.4.17/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h
891 --- linux-2.4.17/include/asm-i386/hw_irq.h      Wed Dec 19 03:17:31 2001
892 +++ linux/include/asm-i386/hw_irq.h     Fri Dec 21 00:41:25 2001
893 @@ -95,6 +95,18 @@
894  #define __STR(x) #x
895  #define STR(x) __STR(x)
896  
897 +#define GET_CURRENT \
898 +       "movl %esp, %ebx\n\t" \
899 +       "andl $-8192, %ebx\n\t"
900 +
901 +#ifdef CONFIG_PREEMPT
902 +#define BUMP_LOCK_COUNT \
903 +       GET_CURRENT \
904 +       "incl 4(%ebx)\n\t"
905 +#else
906 +#define BUMP_LOCK_COUNT
907 +#endif
908 +
909  #define SAVE_ALL \
910         "cld\n\t" \
911         "pushl %es\n\t" \
912 @@ -108,15 +120,12 @@
913         "pushl %ebx\n\t" \
914         "movl $" STR(__KERNEL_DS) ",%edx\n\t" \
915         "movl %edx,%ds\n\t" \
916 -       "movl %edx,%es\n\t"
917 +       "movl %edx,%es\n\t" \
918 +       BUMP_LOCK_COUNT
919  
920  #define IRQ_NAME2(nr) nr##_interrupt(void)
921  #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
922  
923 -#define GET_CURRENT \
924 -       "movl %esp, %ebx\n\t" \
925 -       "andl $-8192, %ebx\n\t"
926 -
927  /*
928   *     SMP has a few special interrupts for IPI messages
929   */
930 diff -urN linux-2.4.17/include/asm-i386/i387.h linux/include/asm-i386/i387.h
931 --- linux-2.4.17/include/asm-i386/i387.h        Wed Dec 19 03:17:31 2001
932 +++ linux/include/asm-i386/i387.h       Fri Dec 21 00:41:25 2001
933 @@ -12,6 +12,7 @@
934  #define __ASM_I386_I387_H
935  
936  #include <linux/sched.h>
937 +#include <linux/spinlock.h>
938  #include <asm/processor.h>
939  #include <asm/sigcontext.h>
940  #include <asm/user.h>
941 @@ -24,7 +25,7 @@
942  extern void restore_fpu( struct task_struct *tsk );
943  
944  extern void kernel_fpu_begin(void);
945 -#define kernel_fpu_end() stts()
946 +#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
947  
948  
949  #define unlazy_fpu( tsk ) do { \
950 diff -urN linux-2.4.17/include/asm-i386/mmu_context.h linux/include/asm-i386/mmu_context.h
951 --- linux-2.4.17/include/asm-i386/mmu_context.h Wed Dec 19 03:17:31 2001
952 +++ linux/include/asm-i386/mmu_context.h        Fri Dec 21 00:41:25 2001
953 @@ -27,6 +27,10 @@
954  
955  static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
956  {
957 +#ifdef CONFIG_PREEMPT
958 +       if (preempt_is_disabled() == 0)
959 +               BUG();
960 +#endif
961         if (prev != next) {
962                 /* stop flush ipis for the previous mm */
963                 clear_bit(cpu, &prev->cpu_vm_mask);
964 diff -urN linux-2.4.17/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h
965 --- linux-2.4.17/include/asm-i386/pgalloc.h     Wed Dec 19 03:17:31 2001
966 +++ linux/include/asm-i386/pgalloc.h    Fri Dec 21 00:41:25 2001
967 @@ -75,20 +75,26 @@
968  {
969         unsigned long *ret;
970  
971 +       preempt_disable();
972         if ((ret = pgd_quicklist) != NULL) {
973                 pgd_quicklist = (unsigned long *)(*ret);
974                 ret[0] = 0;
975                 pgtable_cache_size--;
976 -       } else
977 +               preempt_enable();
978 +       } else {
979 +               preempt_enable();
980                 ret = (unsigned long *)get_pgd_slow();
981 +       }
982         return (pgd_t *)ret;
983  }
984  
985  static inline void free_pgd_fast(pgd_t *pgd)
986  {
987 +       preempt_disable();
988         *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
989         pgd_quicklist = (unsigned long *) pgd;
990         pgtable_cache_size++;
991 +       preempt_enable();
992  }
993  
994  static inline void free_pgd_slow(pgd_t *pgd)
995 @@ -119,19 +125,23 @@
996  {
997         unsigned long *ret;
998  
999 +       preempt_disable();
1000         if ((ret = (unsigned long *)pte_quicklist) != NULL) {
1001                 pte_quicklist = (unsigned long *)(*ret);
1002                 ret[0] = ret[1];
1003                 pgtable_cache_size--;
1004         }
1005 +       preempt_enable();
1006         return (pte_t *)ret;
1007  }
1008  
1009  static inline void pte_free_fast(pte_t *pte)
1010  {
1011 +       preempt_disable();
1012         *(unsigned long *)pte = (unsigned long) pte_quicklist;
1013         pte_quicklist = (unsigned long *) pte;
1014         pgtable_cache_size++;
1015 +       preempt_enable();
1016  }
1017  
1018  static __inline__ void pte_free_slow(pte_t *pte)
1019 diff -urN linux-2.4.17/include/asm-i386/processor.h linux/include/asm-i386/processor.h
1020 --- linux-2.4.17/include/asm-i386/processor.h   Wed Dec 19 03:17:31 2001
1021 +++ linux/include/asm-i386/processor.h  Fri Dec 21 00:41:25 2001
1022 @@ -502,7 +502,10 @@
1023  {
1024          __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
1025  }
1026 -#define spin_lock_prefetch(x)  prefetchw(x)
1027 +#define spin_lock_prefetch(x) do {                             \
1028 +       prefetchw(x);                                           \
1029 +       preempt_prefetch(&current->preempt_count);              \
1030 +} while(0)
1031  
1032  #endif
1033  
1034 diff -urN linux-2.4.17/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h
1035 --- linux-2.4.17/include/asm-i386/smplock.h     Wed Dec 19 03:17:31 2001
1036 +++ linux/include/asm-i386/smplock.h    Fri Dec 21 00:41:25 2001
1037 @@ -10,7 +10,15 @@
1038  
1039  extern spinlock_t kernel_flag;
1040  
1041 +#ifdef CONFIG_SMP
1042  #define kernel_locked()                spin_is_locked(&kernel_flag)
1043 +#else
1044 +#ifdef CONFIG_PREEMPT
1045 +#define kernel_locked()                preempt_is_disabled()
1046 +#else
1047 +#define kernel_locked()                1
1048 +#endif
1049 +#endif
1050  
1051  /*
1052   * Release global kernel lock and global interrupt lock
1053 @@ -42,6 +50,11 @@
1054   */
1055  static __inline__ void lock_kernel(void)
1056  {
1057 +#ifdef CONFIG_PREEMPT
1058 +       if (current->lock_depth == -1)
1059 +               spin_lock(&kernel_flag);
1060 +       ++current->lock_depth;
1061 +#else
1062  #if 1
1063         if (!++current->lock_depth)
1064                 spin_lock(&kernel_flag);
1065 @@ -54,6 +67,7 @@
1066                 :"=m" (__dummy_lock(&kernel_flag)),
1067                  "=m" (current->lock_depth));
1068  #endif
1069 +#endif
1070  }
1071  
1072  static __inline__ void unlock_kernel(void)
1073 diff -urN linux-2.4.17/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h
1074 --- linux-2.4.17/include/asm-i386/softirq.h     Wed Dec 19 03:17:31 2001
1075 +++ linux/include/asm-i386/softirq.h    Fri Dec 21 00:41:25 2001
1076 @@ -5,9 +5,9 @@
1077  #include <asm/hardirq.h>
1078  
1079  #define __cpu_bh_enable(cpu) \
1080 -               do { barrier(); local_bh_count(cpu)--; } while (0)
1081 +               do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
1082  #define cpu_bh_disable(cpu) \
1083 -               do { local_bh_count(cpu)++; barrier(); } while (0)
1084 +               do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
1085  
1086  #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
1087  #define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
1088 @@ -22,7 +22,7 @@
1089   * If you change the offsets in irq_stat then you have to
1090   * update this code as well.
1091   */
1092 -#define local_bh_enable()                                              \
1093 +#define _local_bh_enable()                                             \
1094  do {                                                                   \
1095         unsigned int *ptr = &local_bh_count(smp_processor_id());        \
1096                                                                         \
1097 @@ -45,4 +45,6 @@
1098                 /* no registers clobbered */ );                         \
1099  } while (0)
1100  
1101 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
1102 +
1103  #endif /* __ASM_SOFTIRQ_H */
1104 diff -urN linux-2.4.17/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
1105 --- linux-2.4.17/include/asm-i386/spinlock.h    Wed Dec 19 03:17:31 2001
1106 +++ linux/include/asm-i386/spinlock.h   Fri Dec 21 00:41:25 2001
1107 @@ -77,7 +77,7 @@
1108                 :"=m" (lock->lock) : : "memory"
1109  
1110  
1111 -static inline void spin_unlock(spinlock_t *lock)
1112 +static inline void _raw_spin_unlock(spinlock_t *lock)
1113  {
1114  #if SPINLOCK_DEBUG
1115         if (lock->magic != SPINLOCK_MAGIC)
1116 @@ -97,7 +97,7 @@
1117                 :"=q" (oldval), "=m" (lock->lock) \
1118                 :"0" (oldval) : "memory"
1119  
1120 -static inline void spin_unlock(spinlock_t *lock)
1121 +static inline void _raw_spin_unlock(spinlock_t *lock)
1122  {
1123         char oldval = 1;
1124  #if SPINLOCK_DEBUG
1125 @@ -113,7 +113,7 @@
1126  
1127  #endif
1128  
1129 -static inline int spin_trylock(spinlock_t *lock)
1130 +static inline int _raw_spin_trylock(spinlock_t *lock)
1131  {
1132         char oldval;
1133         __asm__ __volatile__(
1134 @@ -123,7 +123,7 @@
1135         return oldval > 0;
1136  }
1137  
1138 -static inline void spin_lock(spinlock_t *lock)
1139 +static inline void _raw_spin_lock(spinlock_t *lock)
1140  {
1141  #if SPINLOCK_DEBUG
1142         __label__ here;
1143 @@ -179,7 +179,7 @@
1144   */
1145  /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
1146  
1147 -static inline void read_lock(rwlock_t *rw)
1148 +static inline void _raw_read_lock(rwlock_t *rw)
1149  {
1150  #if SPINLOCK_DEBUG
1151         if (rw->magic != RWLOCK_MAGIC)
1152 @@ -188,7 +188,7 @@
1153         __build_read_lock(rw, "__read_lock_failed");
1154  }
1155  
1156 -static inline void write_lock(rwlock_t *rw)
1157 +static inline void _raw_write_lock(rwlock_t *rw)
1158  {
1159  #if SPINLOCK_DEBUG
1160         if (rw->magic != RWLOCK_MAGIC)
1161 @@ -197,10 +197,10 @@
1162         __build_write_lock(rw, "__write_lock_failed");
1163  }
1164  
1165 -#define read_unlock(rw)                asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
1166 -#define write_unlock(rw)       asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
1167 +#define _raw_read_unlock(rw)           asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
1168 +#define _raw_write_unlock(rw)  asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
1169  
1170 -static inline int write_trylock(rwlock_t *lock)
1171 +static inline int _raw_write_trylock(rwlock_t *lock)
1172  {
1173         atomic_t *count = (atomic_t *)lock;
1174         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
1175 diff -urN linux-2.4.17/include/asm-sh/hardirq.h linux/include/asm-sh/hardirq.h
1176 --- linux-2.4.17/include/asm-sh/hardirq.h       Wed Dec 19 03:17:36 2001
1177 +++ linux/include/asm-sh/hardirq.h      Fri Dec 21 00:41:25 2001
1178 @@ -34,6 +34,8 @@
1179  
1180  #define synchronize_irq()      barrier()
1181  
1182 +#define release_irqlock(cpu)   do { } while (0)
1183 +
1184  #else
1185  
1186  #error Super-H SMP is not available
1187 diff -urN linux-2.4.17/include/asm-sh/mmu_context.h linux/include/asm-sh/mmu_context.h
1188 --- linux-2.4.17/include/asm-sh/mmu_context.h   Wed Dec 19 03:17:36 2001
1189 +++ linux/include/asm-sh/mmu_context.h  Fri Dec 21 00:41:25 2001
1190 @@ -166,6 +166,10 @@
1191                                  struct mm_struct *next,
1192                                  struct task_struct *tsk, unsigned int cpu)
1193  {
1194 +#ifdef CONFIG_PREEMPT
1195 +       if (preempt_is_disabled() == 0)
1196 +               BUG();
1197 +#endif
1198         if (prev != next) {
1199                 unsigned long __pgdir = (unsigned long)next->pgd;
1200  
1201 diff -urN linux-2.4.17/include/asm-sh/smplock.h linux/include/asm-sh/smplock.h
1202 --- linux-2.4.17/include/asm-sh/smplock.h       Wed Dec 19 03:17:36 2001
1203 +++ linux/include/asm-sh/smplock.h      Fri Dec 21 00:41:25 2001
1204 @@ -9,15 +9,88 @@
1205  
1206  #include <linux/config.h>
1207  
1208 -#ifndef CONFIG_SMP
1209 -
1210 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
1211 +/*
1212 + * Should never happen, since linux/smp_lock.h catches this case;
1213 + * but in case this file is included directly with neither SMP nor
1214 + * PREEMPT configuration, provide same dummys as linux/smp_lock.h
1215 + */
1216  #define lock_kernel()                          do { } while(0)
1217  #define unlock_kernel()                                do { } while(0)
1218 -#define release_kernel_lock(task, cpu, depth)  ((depth) = 1)
1219 -#define reacquire_kernel_lock(task, cpu, depth)        do { } while(0)
1220 +#define release_kernel_lock(task, cpu)         do { } while(0)
1221 +#define reacquire_kernel_lock(task)            do { } while(0)
1222 +#define kernel_locked()                1
1223 +
1224 +#else /* CONFIG_SMP || CONFIG_PREEMPT */
1225 +
1226 +#if CONFIG_SMP
1227 +#error "We do not support SMP on SH yet"
1228 +#endif
1229 +/*
1230 + * Default SMP lock implementation (i.e. the i386 version)
1231 + */
1232 +
1233 +#include <linux/interrupt.h>
1234 +#include <linux/spinlock.h>
1235 +
1236 +extern spinlock_t kernel_flag;
1237 +#define lock_bkl() spin_lock(&kernel_flag)
1238 +#define unlock_bkl() spin_unlock(&kernel_flag)
1239  
1240 +#ifdef CONFIG_SMP
1241 +#define kernel_locked()                spin_is_locked(&kernel_flag)
1242 +#elif  CONFIG_PREEMPT
1243 +#define kernel_locked()                preempt_is_disabled()
1244 +#else  /* neither */
1245 +#define kernel_locked()                1
1246 +#endif
1247 +
1248 +/*
1249 + * Release global kernel lock and global interrupt lock
1250 + */
1251 +#define release_kernel_lock(task, cpu) \
1252 +do { \
1253 +       if (task->lock_depth >= 0) \
1254 +               spin_unlock(&kernel_flag); \
1255 +       release_irqlock(cpu); \
1256 +       __sti(); \
1257 +} while (0)
1258 +
1259 +/*
1260 + * Re-acquire the kernel lock
1261 + */
1262 +#define reacquire_kernel_lock(task) \
1263 +do { \
1264 +       if (task->lock_depth >= 0) \
1265 +               spin_lock(&kernel_flag); \
1266 +} while (0)
1267 +
1268 +/*
1269 + * Getting the big kernel lock.
1270 + *
1271 + * This cannot happen asynchronously,
1272 + * so we only need to worry about other
1273 + * CPU's.
1274 + */
1275 +static __inline__ void lock_kernel(void)
1276 +{
1277 +#ifdef CONFIG_PREEMPT
1278 +       if (current->lock_depth == -1)
1279 +               spin_lock(&kernel_flag);
1280 +       ++current->lock_depth;
1281  #else
1282 -#error "We do not support SMP on SH"
1283 -#endif /* CONFIG_SMP */
1284 +       if (!++current->lock_depth)
1285 +               spin_lock(&kernel_flag);
1286 +#endif
1287 +}
1288 +
1289 +static __inline__ void unlock_kernel(void)
1290 +{
1291 +       if (current->lock_depth < 0)
1292 +               BUG();
1293 +       if (--current->lock_depth < 0)
1294 +               spin_unlock(&kernel_flag);
1295 +}
1296 +#endif /* CONFIG_SMP || CONFIG_PREEMPT */
1297  
1298  #endif /* __ASM_SH_SMPLOCK_H */
1299 diff -urN linux-2.4.17/include/asm-sh/softirq.h linux/include/asm-sh/softirq.h
1300 --- linux-2.4.17/include/asm-sh/softirq.h       Wed Dec 19 03:17:36 2001
1301 +++ linux/include/asm-sh/softirq.h      Fri Dec 21 00:41:25 2001
1302 @@ -6,6 +6,7 @@
1303  
1304  #define local_bh_disable()                     \
1305  do {                                           \
1306 +       preempt_disable();                      \
1307         local_bh_count(smp_processor_id())++;   \
1308         barrier();                              \
1309  } while (0)
1310 @@ -14,6 +15,7 @@
1311  do {                                           \
1312         barrier();                              \
1313         local_bh_count(smp_processor_id())--;   \
1314 +       preempt_enable();                       \
1315  } while (0)
1316  
1317  #define local_bh_enable()                              \
1318 @@ -22,6 +24,7 @@
1319         if (!--local_bh_count(smp_processor_id())       \
1320             && softirq_pending(smp_processor_id())) {   \
1321                 do_softirq();                           \
1322 +       preempt_enable();                               \
1323         }                                               \
1324  } while (0)
1325  
1326 diff -urN linux-2.4.17/include/linux/brlock.h linux/include/linux/brlock.h
1327 --- linux-2.4.17/include/linux/brlock.h Wed Dec 19 03:17:31 2001
1328 +++ linux/include/linux/brlock.h        Fri Dec 21 00:41:25 2001
1329 @@ -171,11 +171,11 @@
1330  }
1331  
1332  #else
1333 -# define br_read_lock(idx)     ((void)(idx))
1334 -# define br_read_unlock(idx)   ((void)(idx))
1335 -# define br_write_lock(idx)    ((void)(idx))
1336 -# define br_write_unlock(idx)  ((void)(idx))
1337 -#endif
1338 +# define br_read_lock(idx)     ({ (void)(idx); preempt_disable(); })
1339 +# define br_read_unlock(idx)   ({ (void)(idx); preempt_enable(); })
1340 +# define br_write_lock(idx)    ({ (void)(idx); preempt_disable(); })
1341 +# define br_write_unlock(idx)  ({ (void)(idx); preempt_enable(); })
1342 +#endif /* CONFIG_SMP */
1343  
1344  /*
1345   * Now enumerate all of the possible sw/hw IRQ protected
1346 diff -urN linux-2.4.17/include/linux/dcache.h linux/include/linux/dcache.h
1347 --- linux-2.4.17/include/linux/dcache.h Wed Dec 19 03:17:31 2001
1348 +++ linux/include/linux/dcache.h        Fri Dec 21 00:41:25 2001
1349 @@ -126,31 +126,6 @@
1350  
1351  extern spinlock_t dcache_lock;
1352  
1353 -/**
1354 - * d_drop - drop a dentry
1355 - * @dentry: dentry to drop
1356 - *
1357 - * d_drop() unhashes the entry from the parent
1358 - * dentry hashes, so that it won't be found through
1359 - * a VFS lookup any more. Note that this is different
1360 - * from deleting the dentry - d_delete will try to
1361 - * mark the dentry negative if possible, giving a
1362 - * successful _negative_ lookup, while d_drop will
1363 - * just make the cache lookup fail.
1364 - *
1365 - * d_drop() is used mainly for stuff that wants
1366 - * to invalidate a dentry for some reason (NFS
1367 - * timeouts or autofs deletes).
1368 - */
1369 -
1370 -static __inline__ void d_drop(struct dentry * dentry)
1371 -{
1372 -       spin_lock(&dcache_lock);
1373 -       list_del(&dentry->d_hash);
1374 -       INIT_LIST_HEAD(&dentry->d_hash);
1375 -       spin_unlock(&dcache_lock);
1376 -}
1377 -
1378  static __inline__ int dname_external(struct dentry *d)
1379  {
1380         return d->d_name.name != d->d_iname; 
1381 @@ -275,3 +250,34 @@
1382  #endif /* __KERNEL__ */
1383  
1384  #endif /* __LINUX_DCACHE_H */
1385 +
1386 +#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1387 +#define __LINUX_DCACHE_H_INLINES
1388 +
1389 +#ifdef __KERNEL__
1390 +/**
1391 + * d_drop - drop a dentry
1392 + * @dentry: dentry to drop
1393 + *
1394 + * d_drop() unhashes the entry from the parent
1395 + * dentry hashes, so that it won't be found through
1396 + * a VFS lookup any more. Note that this is different
1397 + * from deleting the dentry - d_delete will try to
1398 + * mark the dentry negative if possible, giving a
1399 + * successful _negative_ lookup, while d_drop will
1400 + * just make the cache lookup fail.
1401 + *
1402 + * d_drop() is used mainly for stuff that wants
1403 + * to invalidate a dentry for some reason (NFS
1404 + * timeouts or autofs deletes).
1405 + */
1406 +
1407 +static __inline__ void d_drop(struct dentry * dentry)
1408 +{
1409 +       spin_lock(&dcache_lock);
1410 +       list_del(&dentry->d_hash);
1411 +       INIT_LIST_HEAD(&dentry->d_hash);
1412 +       spin_unlock(&dcache_lock);
1413 +}
1414 +#endif
1415 +#endif
1416 diff -urN linux-2.4.17/include/linux/fs_struct.h linux/include/linux/fs_struct.h
1417 --- linux-2.4.17/include/linux/fs_struct.h      Wed Dec 19 03:17:31 2001
1418 +++ linux/include/linux/fs_struct.h     Fri Dec 21 00:41:25 2001
1419 @@ -20,6 +20,15 @@
1420  extern void exit_fs(struct task_struct *);
1421  extern void set_fs_altroot(void);
1422  
1423 +struct fs_struct *copy_fs_struct(struct fs_struct *old);
1424 +void put_fs_struct(struct fs_struct *fs);
1425 +
1426 +#endif
1427 +#endif
1428 +
1429 +#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1430 +#define _LINUX_FS_STRUCT_H_INLINES
1431 +#ifdef __KERNEL__
1432  /*
1433   * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1434   * It can block. Requires the big lock held.
1435 @@ -65,9 +74,5 @@
1436                 mntput(old_pwdmnt);
1437         }
1438  }
1439 -
1440 -struct fs_struct *copy_fs_struct(struct fs_struct *old);
1441 -void put_fs_struct(struct fs_struct *fs);
1442 -
1443  #endif
1444  #endif
1445 diff -urN linux-2.4.17/include/linux/sched.h linux/include/linux/sched.h
1446 --- linux-2.4.17/include/linux/sched.h  Wed Dec 19 03:17:30 2001
1447 +++ linux/include/linux/sched.h Fri Dec 21 00:41:25 2001
1448 @@ -88,6 +88,7 @@
1449  #define TASK_UNINTERRUPTIBLE   2
1450  #define TASK_ZOMBIE            4
1451  #define TASK_STOPPED           8
1452 +#define PREEMPT_ACTIVE         0x40000000
1453  
1454  #define __set_task_state(tsk, state_value)             \
1455         do { (tsk)->state = (state_value); } while (0)
1456 @@ -154,6 +155,9 @@
1457  #define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
1458  extern signed long FASTCALL(schedule_timeout(signed long timeout));
1459  asmlinkage void schedule(void);
1460 +#ifdef CONFIG_PREEMPT
1461 +asmlinkage void preempt_schedule(void);
1462 +#endif
1463  
1464  extern int schedule_task(struct tq_struct *task);
1465  extern void flush_scheduled_tasks(void);
1466 @@ -283,7 +287,17 @@
1467          * offsets of these are hardcoded elsewhere - touch with care
1468          */
1469         volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
1470 -       unsigned long flags;    /* per process flags, defined below */
1471 +        /*
1472 +         * We want the preempt_count in this cache line, but we
1473 +         * a) don't want to mess up the offsets in asm code, and
1474 +         * b) the alignment of the next line below,
1475 +         * so we move "flags" down
1476 +        *
1477 +        * Also note we don't make preempt_count volatile, but we do
1478 +        * need to make sure it is never hiding in a register when
1479 +        * we have an interrupt, so we need to use barrier()
1480 +         */
1481 +       int preempt_count;          /* 0=> preemptable, < 0 => BUG */
1482         int sigpending;
1483         mm_segment_t addr_limit;        /* thread address space:
1484                                                 0-0xBFFFFFFF for user-thead
1485 @@ -325,6 +339,7 @@
1486         struct mm_struct *active_mm;
1487         struct list_head local_pages;
1488         unsigned int allocation_order, nr_local_pages;
1489 +       unsigned long flags;
1490  
1491  /* task state */
1492         struct linux_binfmt *binfmt;
1493 @@ -927,6 +942,11 @@
1494         return res;
1495  }
1496  
1497 +#define _TASK_STRUCT_DEFINED
1498 +#include <linux/dcache.h>
1499 +#include <linux/tqueue.h>
1500 +#include <linux/fs_struct.h>
1501 +
1502  #endif /* __KERNEL__ */
1503  
1504  #endif
1505 diff -urN linux-2.4.17/include/linux/smp.h linux/include/linux/smp.h
1506 --- linux-2.4.17/include/linux/smp.h    Wed Dec 19 03:17:31 2001
1507 +++ linux/include/linux/smp.h   Fri Dec 21 00:41:25 2001
1508 @@ -81,7 +81,9 @@
1509  #define smp_processor_id()                     0
1510  #define hard_smp_processor_id()                        0
1511  #define smp_threads_ready                      1
1512 +#ifndef CONFIG_PREEMPT
1513  #define kernel_lock()
1514 +#endif
1515  #define cpu_logical_map(cpu)                   0
1516  #define cpu_number_map(cpu)                    0
1517  #define smp_call_function(func,info,retry,wait)        ({ 0; })
1518 diff -urN linux-2.4.17/include/linux/smp_lock.h linux/include/linux/smp_lock.h
1519 --- linux-2.4.17/include/linux/smp_lock.h       Wed Dec 19 03:17:31 2001
1520 +++ linux/include/linux/smp_lock.h      Fri Dec 21 00:41:25 2001
1521 @@ -3,7 +3,7 @@
1522  
1523  #include <linux/config.h>
1524  
1525 -#ifndef CONFIG_SMP
1526 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
1527  
1528  #define lock_kernel()                          do { } while(0)
1529  #define unlock_kernel()                                do { } while(0)
1530 diff -urN linux-2.4.17/include/linux/spinlock.h linux/include/linux/spinlock.h
1531 --- linux-2.4.17/include/linux/spinlock.h       Wed Dec 19 03:17:31 2001
1532 +++ linux/include/linux/spinlock.h      Fri Dec 21 00:41:25 2001
1533 @@ -2,6 +2,7 @@
1534  #define __LINUX_SPINLOCK_H
1535  
1536  #include <linux/config.h>
1537 +#include <linux/compiler.h>
1538  
1539  /*
1540   * These are the generic versions of the spinlocks and read-write
1541 @@ -45,8 +46,10 @@
1542  
1543  #if (DEBUG_SPINLOCKS < 1)
1544  
1545 +#ifndef CONFIG_PREEMPT
1546  #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
1547  #define ATOMIC_DEC_AND_LOCK
1548 +#endif
1549  
1550  /*
1551   * Your basic spinlocks, allowing only a single CPU anywhere
1552 @@ -62,11 +65,11 @@
1553  #endif
1554  
1555  #define spin_lock_init(lock)   do { } while(0)
1556 -#define spin_lock(lock)                (void)(lock) /* Not "unused variable". */
1557 +#define _raw_spin_lock(lock)   (void)(lock) /* Not "unused variable". */
1558  #define spin_is_locked(lock)   (0)
1559 -#define spin_trylock(lock)     ({1; })
1560 +#define _raw_spin_trylock(lock)        ({1; })
1561  #define spin_unlock_wait(lock) do { } while(0)
1562 -#define spin_unlock(lock)      do { } while(0)
1563 +#define _raw_spin_unlock(lock) do { } while(0)
1564  
1565  #elif (DEBUG_SPINLOCKS < 2)
1566  
1567 @@ -125,13 +128,77 @@
1568  #endif
1569  
1570  #define rwlock_init(lock)      do { } while(0)
1571 -#define read_lock(lock)                (void)(lock) /* Not "unused variable". */
1572 -#define read_unlock(lock)      do { } while(0)
1573 -#define write_lock(lock)       (void)(lock) /* Not "unused variable". */
1574 -#define write_unlock(lock)     do { } while(0)
1575 +#define _raw_read_lock(lock)   (void)(lock) /* Not "unused variable". */
1576 +#define _raw_read_unlock(lock) do { } while(0)
1577 +#define _raw_write_lock(lock)  (void)(lock) /* Not "unused variable". */
1578 +#define _raw_write_unlock(lock)        do { } while(0)
1579  
1580  #endif /* !SMP */
1581  
1582 +#ifdef CONFIG_PREEMPT
1583 +
1584 +#define preempt_is_disabled() (current->preempt_count)
1585 +#define preempt_prefetch(a) prefetchw(a)
1586 +
1587 +#define preempt_disable() \
1588 +do { \
1589 +       ++current->preempt_count; \
1590 +       barrier(); \
1591 +} while (0)
1592 +
1593 +#define preempt_enable_no_resched() \
1594 +do { \
1595 +       --current->preempt_count; \
1596 +       barrier(); \
1597 +} while (0)
1598 +
1599 +#define preempt_enable() \
1600 +do { \
1601 +       --current->preempt_count; \
1602 +       barrier(); \
1603 +       if (unlikely((current->preempt_count == 0) && current->need_resched)) \
1604 +               preempt_schedule(); \
1605 +} while (0)
1606 +
1607 +#define spin_lock(lock)        \
1608 +do { \
1609 +       preempt_disable(); \
1610 +       _raw_spin_lock(lock); \
1611 +} while(0)
1612 +#define spin_trylock(lock)     ({preempt_disable(); _raw_spin_trylock(lock) ? \
1613 +                                       1 : ({preempt_enable(); 0;});})
1614 +#define spin_unlock(lock) \
1615 +do { \
1616 +       _raw_spin_unlock(lock); \
1617 +       preempt_enable(); \
1618 +} while (0)
1619 +
1620 +#define read_lock(lock)                ({preempt_disable(); _raw_read_lock(lock);})
1621 +#define read_unlock(lock)      ({_raw_read_unlock(lock); preempt_enable();})
1622 +#define write_lock(lock)       ({preempt_disable(); _raw_write_lock(lock);})
1623 +#define write_unlock(lock)     ({_raw_write_unlock(lock); preempt_enable();})
1624 +#define write_trylock(lock)    ({preempt_disable(); _raw_write_trylock(lock) ? \
1625 +                                       1 : ({preempt_enable(); 0;});})
1626 +
1627 +#else
1628 +
1629 +#define preempt_is_disabled() do { } while (0)
1630 +#define preempt_disable()    do { } while (0)
1631 +#define preempt_enable_no_resched()
1632 +#define preempt_enable()     do { } while (0)
1633 +#define preempt_prefetch(a)
1634 +
1635 +#define spin_lock(lock)                _raw_spin_lock(lock)
1636 +#define spin_trylock(lock)     _raw_spin_trylock(lock)
1637 +#define spin_unlock(lock)      _raw_spin_unlock(lock)
1638 +
1639 +#define read_lock(lock)                _raw_read_lock(lock)
1640 +#define read_unlock(lock)      _raw_read_unlock(lock)
1641 +#define write_lock(lock)       _raw_write_lock(lock)
1642 +#define write_unlock(lock)     _raw_write_unlock(lock)
1643 +#define write_trylock(lock)    _raw_write_trylock(lock)
1644 +#endif
1645 +
1646  /* "lock on reference count zero" */
1647  #ifndef ATOMIC_DEC_AND_LOCK
1648  #include <asm/atomic.h>
1649 diff -urN linux-2.4.17/include/linux/tqueue.h linux/include/linux/tqueue.h
1650 --- linux-2.4.17/include/linux/tqueue.h Wed Dec 19 03:17:31 2001
1651 +++ linux/include/linux/tqueue.h        Fri Dec 21 00:41:25 2001
1652 @@ -94,6 +94,22 @@
1653  extern spinlock_t tqueue_lock;
1654  
1655  /*
1656 + * Call all "bottom halfs" on a given list.
1657 + */
1658 +
1659 +extern void __run_task_queue(task_queue *list);
1660 +
1661 +static inline void run_task_queue(task_queue *list)
1662 +{
1663 +       if (TQ_ACTIVE(*list))
1664 +               __run_task_queue(list);
1665 +}
1666 +
1667 +#endif /* _LINUX_TQUEUE_H */
1668 +
1669 +#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1670 +#define _LINUX_TQUEUE_H_INLINES
1671 +/*
1672   * Queue a task on a tq.  Return non-zero if it was successfully
1673   * added.
1674   */
1675 @@ -109,17 +125,4 @@
1676         }
1677         return ret;
1678  }
1679 -
1680 -/*
1681 - * Call all "bottom halfs" on a given list.
1682 - */
1683 -
1684 -extern void __run_task_queue(task_queue *list);
1685 -
1686 -static inline void run_task_queue(task_queue *list)
1687 -{
1688 -       if (TQ_ACTIVE(*list))
1689 -               __run_task_queue(list);
1690 -}
1691 -
1692 -#endif /* _LINUX_TQUEUE_H */
1693 +#endif
1694 diff -urN linux-2.4.17/kernel/exit.c linux/kernel/exit.c
1695 --- linux-2.4.17/kernel/exit.c  Wed Dec 19 03:17:30 2001
1696 +++ linux/kernel/exit.c Fri Dec 21 00:41:25 2001
1697 @@ -273,6 +273,10 @@
1698  struct mm_struct * start_lazy_tlb(void)
1699  {
1700         struct mm_struct *mm = current->mm;
1701 +#ifdef CONFIG_PREEMPT
1702 +       if (preempt_is_disabled() == 0)
1703 +               BUG();
1704 +#endif
1705         current->mm = NULL;
1706         /* active_mm is still 'mm' */
1707         atomic_inc(&mm->mm_count);
1708 @@ -284,6 +288,10 @@
1709  {
1710         struct mm_struct *active_mm = current->active_mm;
1711  
1712 +#ifdef CONFIG_PREEMPT
1713 +       if (preempt_is_disabled() == 0)
1714 +               BUG();
1715 +#endif
1716         current->mm = mm;
1717         if (mm != active_mm) {
1718                 current->active_mm = mm;
1719 @@ -307,8 +315,8 @@
1720                 /* more a memory barrier than a real lock */
1721                 task_lock(tsk);
1722                 tsk->mm = NULL;
1723 -               task_unlock(tsk);
1724                 enter_lazy_tlb(mm, current, smp_processor_id());
1725 +               task_unlock(tsk);
1726                 mmput(mm);
1727         }
1728  }
1729 diff -urN linux-2.4.17/kernel/fork.c linux/kernel/fork.c
1730 --- linux-2.4.17/kernel/fork.c  Wed Dec 19 03:17:30 2001
1731 +++ linux/kernel/fork.c Fri Dec 21 00:41:25 2001
1732 @@ -604,6 +604,12 @@
1733         if (p->binfmt && p->binfmt->module)
1734                 __MOD_INC_USE_COUNT(p->binfmt->module);
1735  
1736 +#ifdef CONFIG_PREEMPT
1737 +        /* Since we are keeping the context switch off state as part
1738 +         * of the context, make sure we start with it off.
1739 +         */
1740 +       p->preempt_count = 1;
1741 +#endif
1742         p->did_exec = 0;
1743         p->swappable = 0;
1744         p->state = TASK_UNINTERRUPTIBLE;
1745 diff -urN linux-2.4.17/kernel/ksyms.c linux/kernel/ksyms.c
1746 --- linux-2.4.17/kernel/ksyms.c Wed Dec 19 03:17:30 2001
1747 +++ linux/kernel/ksyms.c        Fri Dec 21 00:41:25 2001
1748 @@ -436,6 +436,9 @@
1749  EXPORT_SYMBOL(interruptible_sleep_on);
1750  EXPORT_SYMBOL(interruptible_sleep_on_timeout);
1751  EXPORT_SYMBOL(schedule);
1752 +#ifdef CONFIG_PREEMPT
1753 +EXPORT_SYMBOL(preempt_schedule);
1754 +#endif
1755  EXPORT_SYMBOL(schedule_timeout);
1756  EXPORT_SYMBOL(jiffies);
1757  EXPORT_SYMBOL(xtime);
1758 diff -urN linux-2.4.17/kernel/sched.c linux/kernel/sched.c
1759 --- linux-2.4.17/kernel/sched.c Wed Dec 19 03:17:30 2001
1760 +++ linux/kernel/sched.c        Fri Dec 21 00:41:47 2001
1761 @@ -491,7 +491,7 @@
1762         task_lock(prev);
1763         task_release_cpu(prev);
1764         mb();
1765 -       if (prev->state == TASK_RUNNING)
1766 +       if (task_on_runqueue(prev))
1767                 goto needs_resched;
1768  
1769  out_unlock:
1770 @@ -521,7 +521,7 @@
1771                         goto out_unlock;
1772  
1773                 spin_lock_irqsave(&runqueue_lock, flags);
1774 -               if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
1775 +               if (task_on_runqueue(prev) && !task_has_cpu(prev))
1776                         reschedule_idle(prev);
1777                 spin_unlock_irqrestore(&runqueue_lock, flags);
1778                 goto out_unlock;
1779 @@ -534,6 +534,7 @@
1780  asmlinkage void schedule_tail(struct task_struct *prev)
1781  {
1782         __schedule_tail(prev);
1783 +       preempt_enable();
1784  }
1785  
1786  /*
1787 @@ -556,6 +557,8 @@
1788  
1789         spin_lock_prefetch(&runqueue_lock);
1790  
1791 +       preempt_disable(); 
1792 +
1793         if (!current->active_mm) BUG();
1794  need_resched_back:
1795         prev = current;
1796 @@ -583,6 +586,9 @@
1797                         move_last_runqueue(prev);
1798                 }
1799  
1800 +#ifdef CONFIG_PREEMPT
1801 +       if (preempt_is_disabled() & PREEMPT_ACTIVE) goto treat_like_run;
1802 +#endif
1803         switch (prev->state) {
1804                 case TASK_INTERRUPTIBLE:
1805                         if (signal_pending(prev)) {
1806 @@ -593,6 +599,9 @@
1807                         del_from_runqueue(prev);
1808                 case TASK_RUNNING:;
1809         }
1810 +#ifdef CONFIG_PREEMPT
1811 +       treat_like_run:
1812 +#endif
1813         prev->need_resched = 0;
1814  
1815         /*
1816 @@ -701,6 +710,7 @@
1817         reacquire_kernel_lock(current);
1818         if (current->need_resched)
1819                 goto need_resched_back;
1820 +       preempt_enable_no_resched();
1821         return;
1822  }
1823  
1824 @@ -979,6 +989,19 @@
1825         return setscheduler(pid, -1, param);
1826  }
1827  
1828 +#ifdef CONFIG_PREEMPT
1829 +asmlinkage void preempt_schedule(void)
1830 +{
1831 +       while (current->need_resched) {
1832 +               current->preempt_count += PREEMPT_ACTIVE + 1;
1833 +               barrier();
1834 +               schedule();
1835 +               current->preempt_count -= PREEMPT_ACTIVE + 1;
1836 +               barrier();
1837 +       }
1838 +}
1839 +#endif /* CONFIG_PREEMPT */
1840 +
1841  asmlinkage long sys_sched_getscheduler(pid_t pid)
1842  {
1843         struct task_struct *p;
1844 diff -urN linux-2.4.17/lib/dec_and_lock.c linux/lib/dec_and_lock.c
1845 --- linux-2.4.17/lib/dec_and_lock.c     Wed Dec 19 03:17:30 2001
1846 +++ linux/lib/dec_and_lock.c    Fri Dec 21 00:41:25 2001
1847 @@ -1,5 +1,6 @@
1848  #include <linux/module.h>
1849  #include <linux/spinlock.h>
1850 +#include <linux/sched.h>
1851  #include <asm/atomic.h>
1852  
1853  /*
1854 diff -urN linux-2.4.17/mm/slab.c linux/mm/slab.c
1855 --- linux-2.4.17/mm/slab.c      Wed Dec 19 03:17:30 2001
1856 +++ linux/mm/slab.c     Fri Dec 21 00:41:25 2001
1857 @@ -49,7 +49,9 @@
1858   *  constructors and destructors are called without any locking.
1859   *  Several members in kmem_cache_t and slab_t never change, they
1860   *     are accessed without any locking.
1861 - *  The per-cpu arrays are never accessed from the wrong cpu, no locking.
1862 + *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
1863 + *     they are however called with local interrupts disabled so no
1864 + *     preempt_disable needed.
1865   *  The non-constant members are protected with a per-cache irq spinlock.
1866   *
1867   * Further notes from the original documentation:
1868 diff -urN linux-2.4.17/net/socket.c linux/net/socket.c
1869 --- linux-2.4.17/net/socket.c   Wed Dec 19 03:17:37 2001
1870 +++ linux/net/socket.c  Fri Dec 21 00:41:25 2001
1871 @@ -133,7 +133,7 @@
1872  
1873  static struct net_proto_family *net_families[NPROTO];
1874  
1875 -#ifdef CONFIG_SMP
1876 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1877  static atomic_t net_family_lockct = ATOMIC_INIT(0);
1878  static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
1879  
This page took 0.174916 seconds and 3 git commands to generate.