]> git.pld-linux.org Git - packages/kernel.git/blob - preempt-kernel-rml-2.4.14-2.patch
9cdd694a797623b3d0f76ee69304dd85 linux-loop-hvr-2.4.16.0.patch
[packages/kernel.git] / preempt-kernel-rml-2.4.14-2.patch
1 diff -urN linux-2.4.14/CREDITS linux/CREDITS
2 --- linux-2.4.14/CREDITS        Mon Nov  5 20:10:45 2001
3 +++ linux/CREDITS       Wed Nov  7 20:53:19 2001
4 @@ -971,8 +971,8 @@
5  
6  N: Nigel Gamble
7  E: nigel@nrg.org
8 -E: nigel@sgi.com
9  D: Interrupt-driven printer driver
10 +D: Preemptible kernel
11  S: 120 Alley Way
12  S: Mountain View, California 94040
13  S: USA
14 diff -urN linux-2.4.14/Documentation/Configure.help linux/Documentation/Configure.help
15 --- linux-2.4.14/Documentation/Configure.help   Mon Nov  5 20:11:27 2001
16 +++ linux/Documentation/Configure.help  Wed Nov  7 20:53:19 2001
17 @@ -153,6 +153,19 @@
18    If you have a system with several CPUs, you do not need to say Y
19    here: the local APIC will be used automatically.
20  
21 +Preemptible Kernel
22 +CONFIG_PREEMPT
23 +  This option reduces the latency of the kernel when reacting to
24 +  real-time or interactive events by allowing a low priority process to
25 +  be preempted even if it is in kernel mode executing a system call.
26 +  This allows applications to run more reliably even when the system is
27 +  under load due to other, lower priority, processes.
28 +
29 +  Say Y here if you are building a kernel for a desktop system, embedded
30 +  system or real-time system.  Say N if you are building a kernel for a
31 +  system where throughput is more important than interactive response,
32 +  such as a server system.  Say N if you are unsure.
33 +
34  Kernel math emulation
35  CONFIG_MATH_EMULATION
36    Linux can emulate a math coprocessor (used for floating point
37 diff -urN linux-2.4.14/Documentation/preempt-locking.txt linux/Documentation/preempt-locking.txt
38 --- linux-2.4.14/Documentation/preempt-locking.txt      Wed Dec 31 19:00:00 1969
39 +++ linux/Documentation/preempt-locking.txt     Wed Nov  7 20:53:19 2001
40 @@ -0,0 +1,94 @@
41 +                 Proper Locking Under a Preemptible Kernel:
42 +                      Keeping Kernel Code Preempt-Safe
43 +                         Robert Love <rml@tech9.net>
44 +                          Last Updated: 21 Oct 2001
45 +
46 +
47 +INTRODUCTION
48 +
49 +
50 +A preemptible kernel creates new locking issues.  The issues are the same as
51 +those under SMP: concurrency and reentrancy.  Thankfully, the Linux preemptible
52 +kernel model leverages existing SMP locking mechanisms.  Thus, the kernel
53 +requires explicit additional locking for very few additional situations.
54 +
55 +This document is for all kernel hackers.  Developing code in the kernel
56 +requires protecting these situations.  As you will see, these situations would 
57 +normally require a lock, where they not per-CPU.
58
59 +
60 +RULE #1: Per-CPU data structures need explicit protection
61 +
62 +
63 +Two similar problems arise. An example code snippet:
64 +
65 +       struct this_needs_locking tux[NR_CPUS];
66 +       tux[smp_processor_id()] = some_value;
67 +       /* task is preempted here... */
68 +       something = tux[smp_processor_id()];
69 +
70 +First, since the data is per-CPU, it may not have explicit SMP locking, but
71 +require it otherwise.  Second, when a preempted task is finally rescheduled,
72 +the previous value of smp_processor_id may not equal the current.  You must
73 +protect these situations by disabling preemption around them.
74 +
75 +
76 +RULE #2: CPU state must be protected.
77 +
78 +
79 +Under preemption, the state of the CPU must be protected.  This is arch-
80 +dependent, but includes CPU structures and state not preserved over a context
81 +switch.  For example, on x86, entering and exiting FPU mode is now a critical
82 +section that must occur while preemption is disabled.  Think what would happen
83 +if the kernel is executing a floating-point instruction and is then preempted.
84 +Remember, the kernel does not save FPU state except for user tasks.  Therefore,
85 +upon preemption, the FPU registers will be sold to the lowest bidder.  Thus,
86 +preemption must be disabled around such regions.i
87 +
88 +Note, some FPU functions are already explicitly preempt safe.  For example,
89 +kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
90 +However, math_state_restore must be called with preemption disabled.
91 +
92 +
93 +SOLUTION
94 +
95 +
96 +Data protection under preemption is achieved by disabling preemption for the
97 +duration of the critical region.
98 +
99 +preempt_enable()               decrement the preempt counter
100 +preempt_disable()              increment the preempt counter
101 +preempt_enable_no_resched()    decrement, but do not immediately preempt
102 +
103 +The functions are nestable.  In other words, you can call preempt_disable
104 +n-times in a code path, and preemption will not be reenabled until the n-th
105 +call to preempt_enable.  The preempt statements define to nothing if
106 +preemption is not enabled.
107 +
108 +Note that you do not need to explicitly prevent preemption if you are holding
109 +any locks or interrupts are disabled, since preemption is implicitly disabled
110 +in those cases.
111 +
112 +Example:
113 +
114 +       cpucache_t *cc; /* this is per-CPU */
115 +       preempt_disable();
116 +       cc = cc_data(searchp);
117 +       if (cc && cc->avail) {
118 +               __free_block(searchp, cc_entry(cc), cc->avail);
119 +               cc->avail = 0;
120 +       }
121 +       preempt_enable();
122 +       return 0;
123 +
124 +Notice how the preemption statements must encompass every reference of the
125 +critical variables.  Another example:
126 +
127 +       int buf[NR_CPUS];
128 +       set_cpu_val(buf);
129 +       if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n");
130 +       spin_lock(&buf_lock);
131 +       /* ... */
132 +
133 +This code is not preempt-safe, but see how easily we can fix it by simply
134 +moving the spin_lock up two lines.
135 diff -urN linux-2.4.14/MAINTAINERS linux/MAINTAINERS
136 --- linux-2.4.14/MAINTAINERS    Mon Nov  5 20:10:55 2001
137 +++ linux/MAINTAINERS   Wed Nov  7 20:53:19 2001
138 @@ -1187,6 +1187,14 @@
139  M:     mostrows@styx.uwaterloo.ca
140  S:     Maintained
141  
142 +PREEMPTIBLE KERNEL
143 +P:     Robert M. Love
144 +M:     rml@tech9.net
145 +L:     linux-kernel@vger.kernel.org
146 +L:     kpreempt-tech@lists.sourceforge.net
147 +W:     http://tech9.net/rml/linux
148 +S:     Maintained
149 +
150  PROMISE DC4030 CACHING DISK CONTROLLER DRIVER
151  P:     Peter Denison
152  M:     promise@pnd-pc.demon.co.uk
153 diff -urN linux-2.4.14/arch/arm/config.in linux/arch/arm/config.in
154 --- linux-2.4.14/arch/arm/config.in     Mon Nov  5 20:11:22 2001
155 +++ linux/arch/arm/config.in    Wed Nov  7 20:53:24 2001
156 @@ -414,6 +414,7 @@
157  if [ "$CONFIG_CPU_32" = "y" -a "$CONFIG_ARCH_EBSA110" != "y" ]; then
158     bool 'Kernel-mode alignment trap handler' CONFIG_ALIGNMENT_TRAP
159  fi
160 +dep_bool 'Preemptible Kernel (experimental)' CONFIG_PREEMPT $CONFIG_CPU_32 $CONFIG_EXPERIMENTAL
161  endmenu
162  
163  source drivers/parport/Config.in
164 diff -urN linux-2.4.14/arch/arm/kernel/entry-armv.S linux/arch/arm/kernel/entry-armv.S
165 --- linux-2.4.14/arch/arm/kernel/entry-armv.S   Mon Nov  5 20:11:22 2001
166 +++ linux/arch/arm/kernel/entry-armv.S  Wed Nov  7 20:53:24 2001
167 @@ -672,6 +672,12 @@
168                 add     r4, sp, #S_SP
169                 mov     r6, lr
170                 stmia   r4, {r5, r6, r7, r8, r9}        @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
171 +#ifdef CONFIG_PREEMPT
172 +               get_current_task r9
173 +               ldr     r8, [r9, #TSK_PREEMPT]
174 +               add     r8, r8, #1
175 +               str     r8, [r9, #TSK_PREEMPT]
176 +#endif
177  1:             get_irqnr_and_base r0, r6, r5, lr
178                 movne   r1, sp
179                 @
180 @@ -679,6 +685,25 @@
181                 @
182                 adrsvc  ne, lr, 1b
183                 bne     do_IRQ
184 +#ifdef CONFIG_PREEMPT
185 +2:             ldr     r8, [r9, #TSK_PREEMPT]
186 +               subs    r8, r8, #1
187 +               bne     3f
188 +               ldr     r7, [r9, #TSK_NEED_RESCHED]
189 +               teq     r7, #0
190 +               beq     3f
191 +               ldr     r6, .LCirqstat
192 +               ldr     r0, [r6, #IRQSTAT_BH_COUNT]
193 +               teq     r0, #0
194 +               bne     3f
195 +               mov     r0, #MODE_SVC
196 +               msr     cpsr_c, r0              @ enable interrupts
197 +               bl      SYMBOL_NAME(preempt_schedule)
198 +               mov     r0, #I_BIT | MODE_SVC
199 +               msr     cpsr_c, r0              @ disable interrupts
200 +               b       2b
201 +3:             str     r8, [r9, #TSK_PREEMPT]
202 +#endif
203                 ldr     r0, [sp, #S_PSR]                @ irqs are already disabled
204                 msr     spsr, r0
205                 ldmia   sp, {r0 - pc}^                  @ load r0 - pc, cpsr
206 @@ -736,6 +761,9 @@
207  .LCprocfns:    .word   SYMBOL_NAME(processor)
208  #endif
209  .LCfp:         .word   SYMBOL_NAME(fp_enter)
210 +#ifdef CONFIG_PREEMPT
211 +.LCirqstat:    .word   SYMBOL_NAME(irq_stat)
212 +#endif
213  
214                 irq_prio_table
215  
216 @@ -775,6 +803,12 @@
217                 stmdb   r8, {sp, lr}^
218                 alignment_trap r4, r7, __temp_irq
219                 zero_fp
220 +               get_current_task tsk
221 +#ifdef CONFIG_PREEMPT
222 +               ldr     r0, [tsk, #TSK_PREEMPT]
223 +               add     r0, r0, #1
224 +               str     r0, [tsk, #TSK_PREEMPT]
225 +#endif
226  1:             get_irqnr_and_base r0, r6, r5, lr
227                 movne   r1, sp
228                 adrsvc  ne, lr, 1b
229 @@ -782,8 +816,12 @@
230                 @ routine called with r0 = irq number, r1 = struct pt_regs *
231                 @
232                 bne     do_IRQ
233 +#ifdef CONFIG_PREEMPT
234 +               ldr     r0, [tsk, #TSK_PREEMPT]
235 +               sub     r0, r0, #1
236 +               str     r0, [tsk, #TSK_PREEMPT]
237 +#endif
238                 mov     why, #0
239 -               get_current_task tsk
240                 b       ret_to_user
241  
242                 .align  5
243 diff -urN linux-2.4.14/arch/arm/tools/getconstants.c linux/arch/arm/tools/getconstants.c
244 --- linux-2.4.14/arch/arm/tools/getconstants.c  Mon Nov  5 20:11:24 2001
245 +++ linux/arch/arm/tools/getconstants.c Wed Nov  7 20:53:24 2001
246 @@ -13,6 +13,7 @@
247  
248  #include <asm/pgtable.h>
249  #include <asm/uaccess.h>
250 +#include <asm/hardirq.h>
251  
252  /*
253   * Make sure that the compiler and target are compatible.
254 @@ -39,6 +40,11 @@
255  DEFN("TSS_SAVE",               OFF_TSK(thread.save));
256  DEFN("TSS_FPESAVE",            OFF_TSK(thread.fpstate.soft.save));
257  
258 +#ifdef CONFIG_PREEMPT
259 +DEFN("TSK_PREEMPT",            OFF_TSK(preempt_count));
260 +DEFN("IRQSTAT_BH_COUNT",       (unsigned long)&(((irq_cpustat_t *)0)->__local_bh_count));
261 +#endif
262 +
263  #ifdef CONFIG_CPU_32
264  DEFN("TSS_DOMAIN",             OFF_TSK(thread.domain));
265  
266 diff -urN linux-2.4.14/arch/i386/config.in linux/arch/i386/config.in
267 --- linux-2.4.14/arch/i386/config.in    Mon Nov  5 20:11:16 2001
268 +++ linux/arch/i386/config.in   Wed Nov  7 20:53:19 2001
269 @@ -170,6 +170,7 @@
270  bool 'Math emulation' CONFIG_MATH_EMULATION
271  bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
272  bool 'Symmetric multi-processing support' CONFIG_SMP
273 +bool 'Preemptible Kernel' CONFIG_PREEMPT
274  if [ "$CONFIG_SMP" != "y" ]; then
275     bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
276     dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
277 @@ -183,9 +184,12 @@
278     bool 'Multiquad NUMA system' CONFIG_MULTIQUAD
279  fi
280  
281 -if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
282 -   define_bool CONFIG_HAVE_DEC_LOCK y
283 +if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then
284 +   if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then
285 +      define_bool CONFIG_HAVE_DEC_LOCK y
286 +   fi
287  fi
288 +
289  endmenu
290  
291  mainmenu_option next_comment
292 diff -urN linux-2.4.14/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
293 --- linux-2.4.14/arch/i386/kernel/entry.S       Mon Nov  5 20:11:16 2001
294 +++ linux/arch/i386/kernel/entry.S      Wed Nov  7 20:53:19 2001
295 @@ -71,7 +71,7 @@
296   * these are offsets into the task-struct.
297   */
298  state          =  0
299 -flags          =  4
300 +preempt_count  =  4
301  sigpending     =  8
302  addr_limit     = 12
303  exec_domain    = 16
304 @@ -79,8 +79,28 @@
305  tsk_ptrace     = 24
306  processor      = 52
307  
308 +        /* These are offsets into the irq_stat structure
309 +         * There is one per cpu and it is aligned to 32
310 +         * byte boundry (we put that here as a shift count)
311 +         */
312 +irq_array_shift                 = CONFIG_X86_L1_CACHE_SHIFT
313 +
314 +irq_stat_local_irq_count        = 4
315 +irq_stat_local_bh_count         = 8
316 +
317  ENOSYS = 38
318  
319 +#ifdef CONFIG_SMP
320 +#define GET_CPU_INDX   movl processor(%ebx),%eax;  \
321 +                        shll $irq_array_shift,%eax
322 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \
323 +                             GET_CPU_INDX
324 +#define CPU_INDX (,%eax)
325 +#else
326 +#define GET_CPU_INDX
327 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx)
328 +#define CPU_INDX
329 +#endif
330  
331  #define SAVE_ALL \
332         cld; \
333 @@ -247,12 +267,30 @@
334         ALIGN
335  ENTRY(ret_from_intr)
336         GET_CURRENT(%ebx)
337 +#ifdef CONFIG_PREEMPT
338 +       cli
339 +       decl preempt_count(%ebx)
340 +#endif
341  ret_from_exception:
342         movl EFLAGS(%esp),%eax          # mix EFLAGS and CS
343         movb CS(%esp),%al
344         testl $(VM_MASK | 3),%eax       # return to VM86 mode or non-supervisor?
345         jne ret_from_sys_call
346 +#ifdef CONFIG_PREEMPT
347 +       cmpl $0,preempt_count(%ebx)
348 +       jnz restore_all
349 +       cmpl $0,need_resched(%ebx)
350 +       jz restore_all
351 +       movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
352 +       addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
353 +       jnz restore_all
354 +       incl preempt_count(%ebx)
355 +       sti
356 +       call SYMBOL_NAME(preempt_schedule)
357 +       jmp ret_from_intr
358 +#else
359         jmp restore_all
360 +#endif
361  
362         ALIGN
363  reschedule:
364 @@ -289,6 +327,9 @@
365         GET_CURRENT(%ebx)
366         call *%edi
367         addl $8,%esp
368 +#ifdef CONFIG_PREEMPT
369 +       cli
370 +#endif
371         jmp ret_from_exception
372  
373  ENTRY(coprocessor_error)
374 @@ -308,12 +349,18 @@
375         movl %cr0,%eax
376         testl $0x4,%eax                 # EM (math emulation bit)
377         jne device_not_available_emulate
378 +#ifdef CONFIG_PREEMPT
379 +       cli
380 +#endif
381         call SYMBOL_NAME(math_state_restore)
382         jmp ret_from_exception
383  device_not_available_emulate:
384         pushl $0                # temporary storage for ORIG_EIP
385         call  SYMBOL_NAME(math_emulate)
386         addl $4,%esp
387 +#ifdef CONFIG_PREEMPT
388 +       cli
389 +#endif
390         jmp ret_from_exception
391  
392  ENTRY(debug)
393 diff -urN linux-2.4.14/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c
394 --- linux-2.4.14/arch/i386/kernel/i387.c        Mon Nov  5 20:11:16 2001
395 +++ linux/arch/i386/kernel/i387.c       Wed Nov  7 20:53:19 2001
396 @@ -10,6 +10,7 @@
397  
398  #include <linux/config.h>
399  #include <linux/sched.h>
400 +#include <linux/spinlock.h>
401  #include <asm/processor.h>
402  #include <asm/i387.h>
403  #include <asm/math_emu.h>
404 @@ -65,6 +66,8 @@
405  {
406         struct task_struct *tsk = current;
407  
408 +       preempt_disable();
409 +       
410         if (tsk->flags & PF_USEDFPU) {
411                 __save_init_fpu(tsk);
412                 return;
413 diff -urN linux-2.4.14/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
414 --- linux-2.4.14/arch/i386/kernel/traps.c       Mon Nov  5 20:11:16 2001
415 +++ linux/arch/i386/kernel/traps.c      Wed Nov  7 20:53:19 2001
416 @@ -697,6 +697,11 @@
417   */
418  asmlinkage void math_state_restore(struct pt_regs regs)
419  {
420 +       /*
421 +        * CONFIG_PREEMPT
422 +        * Must be called with preemption disabled
423 +        */
424 +
425         __asm__ __volatile__("clts");           /* Allow maths ops (or we recurse) */
426  
427         if (current->used_math) {
428 diff -urN linux-2.4.14/arch/i386/lib/dec_and_lock.c linux/arch/i386/lib/dec_and_lock.c
429 --- linux-2.4.14/arch/i386/lib/dec_and_lock.c   Mon Nov  5 20:11:16 2001
430 +++ linux/arch/i386/lib/dec_and_lock.c  Wed Nov  7 20:53:19 2001
431 @@ -8,6 +8,7 @@
432   */
433  
434  #include <linux/spinlock.h>
435 +#include <linux/sched.h>
436  #include <asm/atomic.h>
437  
438  int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
439 diff -urN linux-2.4.14/drivers/char/console.c linux/drivers/char/console.c
440 --- linux-2.4.14/drivers/char/console.c Mon Nov  5 20:11:01 2001
441 +++ linux/drivers/char/console.c        Wed Nov  7 20:53:19 2001
442 @@ -2356,8 +2356,14 @@
443                 return;
444  
445         pm_access(pm_con);
446 +       
447 +       /*
448 +        * If we raced with con_close(), `vt' may be null.
449 +        * Hence this bandaid.   - akpm
450 +        */
451         acquire_console_sem();
452 -       set_cursor(vt->vc_num);
453 +       if (vt)
454 +               set_cursor(vt->vc_num);
455         release_console_sem();
456  }
457  
458 diff -urN linux-2.4.14/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c
459 --- linux-2.4.14/drivers/ieee1394/csr.c Mon Nov  5 20:11:12 2001
460 +++ linux/drivers/ieee1394/csr.c        Wed Nov  7 20:53:19 2001
461 @@ -10,6 +10,7 @@
462   */
463  
464  #include <linux/string.h>
465 +#include <linux/sched.h>
466  
467  #include "ieee1394_types.h"
468  #include "hosts.h"
469 diff -urN linux-2.4.14/fs/adfs/map.c linux/fs/adfs/map.c
470 --- linux-2.4.14/fs/adfs/map.c  Mon Nov  5 20:10:45 2001
471 +++ linux/fs/adfs/map.c Wed Nov  7 20:53:19 2001
472 @@ -12,6 +12,7 @@
473  #include <linux/fs.h>
474  #include <linux/adfs_fs.h>
475  #include <linux/spinlock.h>
476 +#include <linux/sched.h>
477  
478  #include "adfs.h"
479  
480 diff -urN linux-2.4.14/fs/exec.c linux/fs/exec.c
481 --- linux-2.4.14/fs/exec.c      Mon Nov  5 20:10:44 2001
482 +++ linux/fs/exec.c     Wed Nov  7 20:53:19 2001
483 @@ -420,8 +420,8 @@
484                 active_mm = current->active_mm;
485                 current->mm = mm;
486                 current->active_mm = mm;
487 -               task_unlock(current);
488                 activate_mm(active_mm, mm);
489 +               task_unlock(current);
490                 mm_release();
491                 if (old_mm) {
492                         if (active_mm != old_mm) BUG();
493 diff -urN linux-2.4.14/fs/fat/cache.c linux/fs/fat/cache.c
494 --- linux-2.4.14/fs/fat/cache.c Mon Nov  5 20:10:44 2001
495 +++ linux/fs/fat/cache.c        Wed Nov  7 20:53:19 2001
496 @@ -14,6 +14,7 @@
497  #include <linux/string.h>
498  #include <linux/stat.h>
499  #include <linux/fat_cvf.h>
500 +#include <linux/sched.h>
501  
502  #if 0
503  #  define PRINTK(x) printk x
504 diff -urN linux-2.4.14/include/asm-arm/dma.h linux/include/asm-arm/dma.h
505 --- linux-2.4.14/include/asm-arm/dma.h  Mon Nov  5 20:10:51 2001
506 +++ linux/include/asm-arm/dma.h Wed Nov  7 20:53:28 2001
507 @@ -5,6 +5,7 @@
508  
509  #include <linux/config.h>
510  #include <linux/spinlock.h>
511 +#include <linux/sched.h>
512  #include <asm/system.h>
513  #include <asm/memory.h>
514  #include <asm/scatterlist.h>
515 diff -urN linux-2.4.14/include/asm-arm/hardirq.h linux/include/asm-arm/hardirq.h
516 --- linux-2.4.14/include/asm-arm/hardirq.h      Mon Nov  5 20:10:51 2001
517 +++ linux/include/asm-arm/hardirq.h     Wed Nov  7 20:53:28 2001
518 @@ -34,6 +34,7 @@
519  #define irq_exit(cpu,irq)      (local_irq_count(cpu)--)
520  
521  #define synchronize_irq()      do { } while (0)
522 +#define release_irqlock(cpu)   do { } while (0)
523  
524  #else
525  #error SMP not supported
526 diff -urN linux-2.4.14/include/asm-arm/mmu_context.h linux/include/asm-arm/mmu_context.h
527 --- linux-2.4.14/include/asm-arm/mmu_context.h  Mon Nov  5 20:10:51 2001
528 +++ linux/include/asm-arm/mmu_context.h Wed Nov  7 20:53:28 2001
529 @@ -42,6 +42,10 @@
530  switch_mm(struct mm_struct *prev, struct mm_struct *next,
531           struct task_struct *tsk, unsigned int cpu)
532  {
533 +#ifdef CONFIG_PREEMPT
534 +       if (preempt_is_disable() == 0)
535 +               BUG();
536 +#endif
537         if (prev != next) {
538                 cpu_switch_mm(next->pgd, tsk);
539                 clear_bit(cpu, &prev->cpu_vm_mask);
540 diff -urN linux-2.4.14/include/asm-arm/pgalloc.h linux/include/asm-arm/pgalloc.h
541 --- linux-2.4.14/include/asm-arm/pgalloc.h      Mon Nov  5 20:10:51 2001
542 +++ linux/include/asm-arm/pgalloc.h     Wed Nov  7 20:53:28 2001
543 @@ -57,40 +57,48 @@
544  {
545         unsigned long *ret;
546  
547 +       preempt_disable();
548         if ((ret = pgd_quicklist) != NULL) {
549                 pgd_quicklist = (unsigned long *)__pgd_next(ret);
550                 ret[1] = ret[2];
551                 clean_dcache_entry(ret + 1);
552                 pgtable_cache_size--;
553         }
554 +       preempt_enable();
555         return (pgd_t *)ret;
556  }
557  
558  static inline void free_pgd_fast(pgd_t *pgd)
559  {
560 +       preempt_disable();
561         __pgd_next(pgd) = (unsigned long) pgd_quicklist;
562         pgd_quicklist = (unsigned long *) pgd;
563         pgtable_cache_size++;
564 +       preempt_enable();
565  }
566  
567  static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
568  {
569         unsigned long *ret;
570  
571 +       preempt_disable();
572         if((ret = pte_quicklist) != NULL) {
573                 pte_quicklist = (unsigned long *)__pte_next(ret);
574                 ret[0] = 0;
575                 clean_dcache_entry(ret);
576                 pgtable_cache_size--;
577         }
578 +       preempt_enable();
579         return (pte_t *)ret;
580  }
581  
582  static inline void free_pte_fast(pte_t *pte)
583  {
584 +       preempt_disable();
585         __pte_next(pte) = (unsigned long) pte_quicklist;
586         pte_quicklist = (unsigned long *) pte;
587         pgtable_cache_size++;
588 +       preempt_enable();
589  }
590  
591  #else  /* CONFIG_NO_PGT_CACHE */
592 diff -urN linux-2.4.14/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h
593 --- linux-2.4.14/include/asm-arm/smplock.h      Mon Nov  5 20:10:51 2001
594 +++ linux/include/asm-arm/smplock.h     Wed Nov  7 20:53:28 2001
595 @@ -3,12 +3,17 @@
596   *
597   * Default SMP lock implementation
598   */
599 +#include <linux/config.h>
600  #include <linux/interrupt.h>
601  #include <linux/spinlock.h>
602  
603  extern spinlock_t kernel_flag;
604  
605 +#ifdef CONFIG_PREEMPT
606 +#define kernel_locked()                preempt_is_disable()
607 +#else
608  #define kernel_locked()                spin_is_locked(&kernel_flag)
609 +#endif
610  
611  /*
612   * Release global kernel lock and global interrupt lock
613 @@ -40,8 +45,14 @@
614   */
615  static inline void lock_kernel(void)
616  {
617 +#ifdef CONFIG_PREEMPT
618 +       if (current->lock_depth == -1)
619 +               spin_lock(&kernel_flag);
620 +       ++current->lock_depth;
621 +#else
622         if (!++current->lock_depth)
623                 spin_lock(&kernel_flag);
624 +#endif
625  }
626  
627  static inline void unlock_kernel(void)
628 diff -urN linux-2.4.14/include/asm-arm/softirq.h linux/include/asm-arm/softirq.h
629 --- linux-2.4.14/include/asm-arm/softirq.h      Mon Nov  5 20:10:51 2001
630 +++ linux/include/asm-arm/softirq.h     Wed Nov  7 20:53:28 2001
631 @@ -5,20 +5,22 @@
632  #include <asm/hardirq.h>
633  
634  #define __cpu_bh_enable(cpu) \
635 -               do { barrier(); local_bh_count(cpu)--; } while (0)
636 +               do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
637  #define cpu_bh_disable(cpu) \
638 -               do { local_bh_count(cpu)++; barrier(); } while (0)
639 +               do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
640  
641  #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
642  #define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
643  
644  #define in_softirq()           (local_bh_count(smp_processor_id()) != 0)
645  
646 -#define local_bh_enable()                                              \
647 +#define _local_bh_enable()                                             \
648  do {                                                                   \
649         unsigned int *ptr = &local_bh_count(smp_processor_id());        \
650         if (!--*ptr && ptr[-2])                                         \
651                 __asm__("bl%? __do_softirq": : : "lr");/* out of line */\
652  } while (0)
653  
654 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
655 +
656  #endif /* __ASM_SOFTIRQ_H */
657 diff -urN linux-2.4.14/include/asm-i386/hardirq.h linux/include/asm-i386/hardirq.h
658 --- linux-2.4.14/include/asm-i386/hardirq.h     Mon Nov  5 20:10:46 2001
659 +++ linux/include/asm-i386/hardirq.h    Wed Nov  7 20:53:19 2001
660 @@ -36,6 +36,8 @@
661  
662  #define synchronize_irq()      barrier()
663  
664 +#define release_irqlock(cpu)   do { } while (0)
665 +
666  #else
667  
668  #include <asm/atomic.h>
669 diff -urN linux-2.4.14/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h
670 --- linux-2.4.14/include/asm-i386/highmem.h     Mon Nov  5 20:10:46 2001
671 +++ linux/include/asm-i386/highmem.h    Wed Nov  7 20:53:19 2001
672 @@ -88,6 +88,7 @@
673         enum fixed_addresses idx;
674         unsigned long vaddr;
675  
676 +       preempt_disable();
677         if (page < highmem_start_page)
678                 return page_address(page);
679  
680 @@ -109,8 +110,10 @@
681         unsigned long vaddr = (unsigned long) kvaddr;
682         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
683  
684 -       if (vaddr < FIXADDR_START) // FIXME
685 +       if (vaddr < FIXADDR_START) { // FIXME
686 +               preempt_enable();
687                 return;
688 +       }
689  
690         if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
691                 BUG();
692 @@ -122,6 +125,8 @@
693         pte_clear(kmap_pte-idx);
694         __flush_tlb_one(vaddr);
695  #endif
696 +
697 +       preempt_enable();
698  }
699  
700  #endif /* __KERNEL__ */
701 diff -urN linux-2.4.14/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h
702 --- linux-2.4.14/include/asm-i386/hw_irq.h      Mon Nov  5 20:10:46 2001
703 +++ linux/include/asm-i386/hw_irq.h     Wed Nov  7 20:53:19 2001
704 @@ -95,6 +95,18 @@
705  #define __STR(x) #x
706  #define STR(x) __STR(x)
707  
708 +#define GET_CURRENT \
709 +       "movl %esp, %ebx\n\t" \
710 +       "andl $-8192, %ebx\n\t"
711 +
712 +#ifdef CONFIG_PREEMPT
713 +#define BUMP_CONTEX_SWITCH_LOCK \
714 +       GET_CURRENT \
715 +       "incl 4(%ebx)\n\t"
716 +#else
717 +#define BUMP_CONTEX_SWITCH_LOCK
718 +#endif
719 +
720  #define SAVE_ALL \
721         "cld\n\t" \
722         "pushl %es\n\t" \
723 @@ -108,15 +120,12 @@
724         "pushl %ebx\n\t" \
725         "movl $" STR(__KERNEL_DS) ",%edx\n\t" \
726         "movl %edx,%ds\n\t" \
727 -       "movl %edx,%es\n\t"
728 +       "movl %edx,%es\n\t" \
729 +       BUMP_CONTEX_SWITCH_LOCK
730  
731  #define IRQ_NAME2(nr) nr##_interrupt(void)
732  #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
733  
734 -#define GET_CURRENT \
735 -       "movl %esp, %ebx\n\t" \
736 -       "andl $-8192, %ebx\n\t"
737 -
738  /*
739   *     SMP has a few special interrupts for IPI messages
740   */
741 diff -urN linux-2.4.14/include/asm-i386/i387.h linux/include/asm-i386/i387.h
742 --- linux-2.4.14/include/asm-i386/i387.h        Mon Nov  5 20:10:46 2001
743 +++ linux/include/asm-i386/i387.h       Wed Nov  7 20:53:19 2001
744 @@ -12,6 +12,7 @@
745  #define __ASM_I386_I387_H
746  
747  #include <linux/sched.h>
748 +#include <linux/spinlock.h>
749  #include <asm/processor.h>
750  #include <asm/sigcontext.h>
751  #include <asm/user.h>
752 @@ -24,7 +25,7 @@
753  extern void restore_fpu( struct task_struct *tsk );
754  
755  extern void kernel_fpu_begin(void);
756 -#define kernel_fpu_end() stts()
757 +#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
758  
759  
760  #define unlazy_fpu( tsk ) do { \
761 diff -urN linux-2.4.14/include/asm-i386/mmu_context.h linux/include/asm-i386/mmu_context.h
762 --- linux-2.4.14/include/asm-i386/mmu_context.h Mon Nov  5 20:10:46 2001
763 +++ linux/include/asm-i386/mmu_context.h        Wed Nov  7 20:53:19 2001
764 @@ -27,6 +27,10 @@
765  
766  static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
767  {
768 +#ifdef CONFIG_PREEMPT
769 +       if (preempt_is_disabled() == 0)
770 +               BUG();
771 +#endif
772         if (prev != next) {
773                 /* stop flush ipis for the previous mm */
774                 clear_bit(cpu, &prev->cpu_vm_mask);
775 diff -urN linux-2.4.14/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h
776 --- linux-2.4.14/include/asm-i386/pgalloc.h     Mon Nov  5 20:10:46 2001
777 +++ linux/include/asm-i386/pgalloc.h    Wed Nov  7 20:53:19 2001
778 @@ -65,20 +65,26 @@
779  {
780         unsigned long *ret;
781  
782 +       preempt_disable();
783         if ((ret = pgd_quicklist) != NULL) {
784                 pgd_quicklist = (unsigned long *)(*ret);
785                 ret[0] = 0;
786                 pgtable_cache_size--;
787 -       } else
788 +               preempt_enable();
789 +       } else {
790 +               preempt_enable();
791                 ret = (unsigned long *)get_pgd_slow();
792 +       }
793         return (pgd_t *)ret;
794  }
795  
796  static __inline__ void free_pgd_fast(pgd_t *pgd)
797  {
798 +       preempt_disable();
799         *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
800         pgd_quicklist = (unsigned long *) pgd;
801         pgtable_cache_size++;
802 +       preempt_enable();
803  }
804  
805  static __inline__ void free_pgd_slow(pgd_t *pgd)
806 @@ -108,19 +114,23 @@
807  {
808         unsigned long *ret;
809  
810 +       preempt_disable();
811         if ((ret = (unsigned long *)pte_quicklist) != NULL) {
812                 pte_quicklist = (unsigned long *)(*ret);
813                 ret[0] = ret[1];
814                 pgtable_cache_size--;
815         }
816 +       preempt_enable();
817         return (pte_t *)ret;
818  }
819  
820  static __inline__ void pte_free_fast(pte_t *pte)
821  {
822 +       preempt_disable();
823         *(unsigned long *)pte = (unsigned long) pte_quicklist;
824         pte_quicklist = (unsigned long *) pte;
825         pgtable_cache_size++;
826 +       preempt_enable();
827  }
828  
829  static __inline__ void pte_free_slow(pte_t *pte)
830 diff -urN linux-2.4.14/include/asm-i386/processor.h linux/include/asm-i386/processor.h
831 --- linux-2.4.14/include/asm-i386/processor.h   Mon Nov  5 20:10:46 2001
832 +++ linux/include/asm-i386/processor.h  Wed Nov  7 20:53:19 2001
833 @@ -502,7 +502,10 @@
834  {
835          __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
836  }
837 -#define spin_lock_prefetch(x)  prefetchw(x)
838 +#define spin_lock_prefetch(x) do {                             \
839 +       prefetchw(x);                                           \
840 +       preempt_prefetch(&current->preempt_count);              \
841 +} while(0)
842  
843  #endif
844  
845 diff -urN linux-2.4.14/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h
846 --- linux-2.4.14/include/asm-i386/smplock.h     Mon Nov  5 20:10:46 2001
847 +++ linux/include/asm-i386/smplock.h    Wed Nov  7 20:53:19 2001
848 @@ -10,7 +10,15 @@
849  
850  extern spinlock_t kernel_flag;
851  
852 +#ifdef CONFIG_SMP
853  #define kernel_locked()                spin_is_locked(&kernel_flag)
854 +#else
855 +#ifdef CONFIG_PREEMPT
856 +#define kernel_locked()                preempt_is_disabled()
857 +#else
858 +#define kernel_locked()                1
859 +#endif
860 +#endif
861  
862  /*
863   * Release global kernel lock and global interrupt lock
864 @@ -42,6 +50,11 @@
865   */
866  static __inline__ void lock_kernel(void)
867  {
868 +#ifdef CONFIG_PREEMPT
869 +       if (current->lock_depth == -1)
870 +               spin_lock(&kernel_flag);
871 +       ++current->lock_depth;
872 +#else
873  #if 1
874         if (!++current->lock_depth)
875                 spin_lock(&kernel_flag);
876 @@ -54,6 +67,7 @@
877                 :"=m" (__dummy_lock(&kernel_flag)),
878                  "=m" (current->lock_depth));
879  #endif
880 +#endif
881  }
882  
883  static __inline__ void unlock_kernel(void)
884 diff -urN linux-2.4.14/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h
885 --- linux-2.4.14/include/asm-i386/softirq.h     Mon Nov  5 20:10:46 2001
886 +++ linux/include/asm-i386/softirq.h    Wed Nov  7 20:53:19 2001
887 @@ -5,9 +5,9 @@
888  #include <asm/hardirq.h>
889  
890  #define __cpu_bh_enable(cpu) \
891 -               do { barrier(); local_bh_count(cpu)--; } while (0)
892 +               do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
893  #define cpu_bh_disable(cpu) \
894 -               do { local_bh_count(cpu)++; barrier(); } while (0)
895 +               do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
896  
897  #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
898  #define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
899 @@ -22,7 +22,7 @@
900   * If you change the offsets in irq_stat then you have to
901   * update this code as well.
902   */
903 -#define local_bh_enable()                                              \
904 +#define _local_bh_enable()                                             \
905  do {                                                                   \
906         unsigned int *ptr = &local_bh_count(smp_processor_id());        \
907                                                                         \
908 @@ -45,4 +45,6 @@
909                 /* no registers clobbered */ );                         \
910  } while (0)
911  
912 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
913 +
914  #endif /* __ASM_SOFTIRQ_H */
915 diff -urN linux-2.4.14/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
916 --- linux-2.4.14/include/asm-i386/spinlock.h    Mon Nov  5 20:10:46 2001
917 +++ linux/include/asm-i386/spinlock.h   Wed Nov  7 20:53:19 2001
918 @@ -77,7 +77,7 @@
919                 :"=m" (lock->lock) : : "memory"
920  
921  
922 -static inline void spin_unlock(spinlock_t *lock)
923 +static inline void _raw_spin_unlock(spinlock_t *lock)
924  {
925  #if SPINLOCK_DEBUG
926         if (lock->magic != SPINLOCK_MAGIC)
927 @@ -97,7 +97,7 @@
928                 :"=q" (oldval), "=m" (lock->lock) \
929                 :"0" (oldval) : "memory"
930  
931 -static inline void spin_unlock(spinlock_t *lock)
932 +static inline void _raw_spin_unlock(spinlock_t *lock)
933  {
934         char oldval = 1;
935  #if SPINLOCK_DEBUG
936 @@ -113,7 +113,7 @@
937  
938  #endif
939  
940 -static inline int spin_trylock(spinlock_t *lock)
941 +static inline int _raw_spin_trylock(spinlock_t *lock)
942  {
943         char oldval;
944         __asm__ __volatile__(
945 @@ -123,7 +123,7 @@
946         return oldval > 0;
947  }
948  
949 -static inline void spin_lock(spinlock_t *lock)
950 +static inline void _raw_spin_lock(spinlock_t *lock)
951  {
952  #if SPINLOCK_DEBUG
953         __label__ here;
954 @@ -179,7 +179,7 @@
955   */
956  /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
957  
958 -static inline void read_lock(rwlock_t *rw)
959 +static inline void _raw_read_lock(rwlock_t *rw)
960  {
961  #if SPINLOCK_DEBUG
962         if (rw->magic != RWLOCK_MAGIC)
963 @@ -188,7 +188,7 @@
964         __build_read_lock(rw, "__read_lock_failed");
965  }
966  
967 -static inline void write_lock(rwlock_t *rw)
968 +static inline void _raw_write_lock(rwlock_t *rw)
969  {
970  #if SPINLOCK_DEBUG
971         if (rw->magic != RWLOCK_MAGIC)
972 @@ -197,10 +197,10 @@
973         __build_write_lock(rw, "__write_lock_failed");
974  }
975  
976 -#define read_unlock(rw)                asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
977 -#define write_unlock(rw)       asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
978 +#define _raw_read_unlock(rw)           asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
979 +#define _raw_write_unlock(rw)  asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
980  
981 -static inline int write_trylock(rwlock_t *lock)
982 +static inline int _raw_write_trylock(rwlock_t *lock)
983  {
984         atomic_t *count = (atomic_t *)lock;
985         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
986 diff -urN linux-2.4.14/include/linux/brlock.h linux/include/linux/brlock.h
987 --- linux-2.4.14/include/linux/brlock.h Mon Nov  5 20:10:46 2001
988 +++ linux/include/linux/brlock.h        Wed Nov  7 20:53:19 2001
989 @@ -171,11 +171,11 @@
990  }
991  
992  #else
993 -# define br_read_lock(idx)     ((void)(idx))
994 -# define br_read_unlock(idx)   ((void)(idx))
995 -# define br_write_lock(idx)    ((void)(idx))
996 -# define br_write_unlock(idx)  ((void)(idx))
997 -#endif
998 +# define br_read_lock(idx)     ({ (void)(idx); preempt_disable(); })
999 +# define br_read_unlock(idx)   ({ (void)(idx); preempt_enable(); })
1000 +# define br_write_lock(idx)    ({ (void)(idx); preempt_disable(); })
1001 +# define br_write_unlock(idx)  ({ (void)(idx); preempt_enable(); })
1002 +#endif /* CONFIG_SMP */
1003  
1004  /*
1005   * Now enumerate all of the possible sw/hw IRQ protected
1006 diff -urN linux-2.4.14/include/linux/dcache.h linux/include/linux/dcache.h
1007 --- linux-2.4.14/include/linux/dcache.h Mon Nov  5 20:10:46 2001
1008 +++ linux/include/linux/dcache.h        Wed Nov  7 20:53:19 2001
1009 @@ -126,31 +126,6 @@
1010  
1011  extern spinlock_t dcache_lock;
1012  
1013 -/**
1014 - * d_drop - drop a dentry
1015 - * @dentry: dentry to drop
1016 - *
1017 - * d_drop() unhashes the entry from the parent
1018 - * dentry hashes, so that it won't be found through
1019 - * a VFS lookup any more. Note that this is different
1020 - * from deleting the dentry - d_delete will try to
1021 - * mark the dentry negative if possible, giving a
1022 - * successful _negative_ lookup, while d_drop will
1023 - * just make the cache lookup fail.
1024 - *
1025 - * d_drop() is used mainly for stuff that wants
1026 - * to invalidate a dentry for some reason (NFS
1027 - * timeouts or autofs deletes).
1028 - */
1029 -
1030 -static __inline__ void d_drop(struct dentry * dentry)
1031 -{
1032 -       spin_lock(&dcache_lock);
1033 -       list_del(&dentry->d_hash);
1034 -       INIT_LIST_HEAD(&dentry->d_hash);
1035 -       spin_unlock(&dcache_lock);
1036 -}
1037 -
1038  static __inline__ int dname_external(struct dentry *d)
1039  {
1040         return d->d_name.name != d->d_iname; 
1041 @@ -275,3 +250,34 @@
1042  #endif /* __KERNEL__ */
1043  
1044  #endif /* __LINUX_DCACHE_H */
1045 +
1046 +#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1047 +#define __LINUX_DCACHE_H_INLINES
1048 +
1049 +#ifdef __KERNEL__
1050 +/**
1051 + * d_drop - drop a dentry
1052 + * @dentry: dentry to drop
1053 + *
1054 + * d_drop() unhashes the entry from the parent
1055 + * dentry hashes, so that it won't be found through
1056 + * a VFS lookup any more. Note that this is different
1057 + * from deleting the dentry - d_delete will try to
1058 + * mark the dentry negative if possible, giving a
1059 + * successful _negative_ lookup, while d_drop will
1060 + * just make the cache lookup fail.
1061 + *
1062 + * d_drop() is used mainly for stuff that wants
1063 + * to invalidate a dentry for some reason (NFS
1064 + * timeouts or autofs deletes).
1065 + */
1066 +
1067 +static __inline__ void d_drop(struct dentry * dentry)
1068 +{
1069 +       spin_lock(&dcache_lock);
1070 +       list_del(&dentry->d_hash);
1071 +       INIT_LIST_HEAD(&dentry->d_hash);
1072 +       spin_unlock(&dcache_lock);
1073 +}
1074 +#endif
1075 +#endif
1076 diff -urN linux-2.4.14/include/linux/fs_struct.h linux/include/linux/fs_struct.h
1077 --- linux-2.4.14/include/linux/fs_struct.h      Mon Nov  5 20:10:46 2001
1078 +++ linux/include/linux/fs_struct.h     Wed Nov  7 20:53:19 2001
1079 @@ -20,6 +20,15 @@
1080  extern void exit_fs(struct task_struct *);
1081  extern void set_fs_altroot(void);
1082  
1083 +struct fs_struct *copy_fs_struct(struct fs_struct *old);
1084 +void put_fs_struct(struct fs_struct *fs);
1085 +
1086 +#endif
1087 +#endif
1088 +
1089 +#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1090 +#define _LINUX_FS_STRUCT_H_INLINES
1091 +#ifdef __KERNEL__
1092  /*
1093   * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1094   * It can block. Requires the big lock held.
1095 @@ -65,9 +74,5 @@
1096                 mntput(old_pwdmnt);
1097         }
1098  }
1099 -
1100 -struct fs_struct *copy_fs_struct(struct fs_struct *old);
1101 -void put_fs_struct(struct fs_struct *fs);
1102 -
1103  #endif
1104  #endif
1105 diff -urN linux-2.4.14/include/linux/sched.h linux/include/linux/sched.h
1106 --- linux-2.4.14/include/linux/sched.h  Mon Nov  5 20:10:45 2001
1107 +++ linux/include/linux/sched.h Wed Nov  7 20:53:19 2001
1108 @@ -88,6 +88,7 @@
1109  #define TASK_UNINTERRUPTIBLE   2
1110  #define TASK_ZOMBIE            4
1111  #define TASK_STOPPED           8
1112 +#define PREEMPT_ACTIVE         0x40000000
1113  
1114  #define __set_task_state(tsk, state_value)             \
1115         do { (tsk)->state = (state_value); } while (0)
1116 @@ -154,6 +155,9 @@
1117  #define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
1118  extern signed long FASTCALL(schedule_timeout(signed long timeout));
1119  asmlinkage void schedule(void);
1120 +#ifdef CONFIG_PREEMPT
1121 +asmlinkage void preempt_schedule(void);
1122 +#endif
1123  
1124  extern int schedule_task(struct tq_struct *task);
1125  extern void flush_scheduled_tasks(void);
1126 @@ -283,7 +287,17 @@
1127          * offsets of these are hardcoded elsewhere - touch with care
1128          */
1129         volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
1130 -       unsigned long flags;    /* per process flags, defined below */
1131 +        /*
1132 +         * We want the preempt_count in this cache line, but we
1133 +         * a) don't want to mess up the offsets in asm code, and
1134 +         * b) the alignment of the next line below,
1135 +         * so we move "flags" down
1136 +        *
1137 +        * Also note we don't make preempt_count volatile, but we do
1138 +        * need to make sure it is never hiding in a register when
1139 +        * we have an interrupt, so we need to use barrier()
1140 +         */
1141 +       int preempt_count;          /* 0=> preemptable, < 0 => BUG */
1142         int sigpending;
1143         mm_segment_t addr_limit;        /* thread address space:
1144                                                 0-0xBFFFFFFF for user-thead
1145 @@ -317,6 +331,7 @@
1146         struct mm_struct *active_mm;
1147         struct list_head local_pages;
1148         unsigned int allocation_order, nr_local_pages;
1149 +       unsigned long flags;
1150  
1151  /* task state */
1152         struct linux_binfmt *binfmt;
1153 @@ -900,6 +915,11 @@
1154         return res;
1155  }
1156  
1157 +#define _TASK_STRUCT_DEFINED
1158 +#include <linux/dcache.h>
1159 +#include <linux/tqueue.h>
1160 +#include <linux/fs_struct.h>
1161 +
1162  #endif /* __KERNEL__ */
1163  
1164  #endif
1165 diff -urN linux-2.4.14/include/linux/smp.h linux/include/linux/smp.h
1166 --- linux-2.4.14/include/linux/smp.h    Mon Nov  5 20:10:46 2001
1167 +++ linux/include/linux/smp.h   Wed Nov  7 20:53:19 2001
1168 @@ -81,7 +81,9 @@
1169  #define smp_processor_id()                     0
1170  #define hard_smp_processor_id()                        0
1171  #define smp_threads_ready                      1
1172 +#ifndef CONFIG_PREEMPT
1173  #define kernel_lock()
1174 +#endif
1175  #define cpu_logical_map(cpu)                   0
1176  #define cpu_number_map(cpu)                    0
1177  #define smp_call_function(func,info,retry,wait)        ({ 0; })
1178 diff -urN linux-2.4.14/include/linux/smp_lock.h linux/include/linux/smp_lock.h
1179 --- linux-2.4.14/include/linux/smp_lock.h       Mon Nov  5 20:10:46 2001
1180 +++ linux/include/linux/smp_lock.h      Wed Nov  7 20:53:19 2001
1181 @@ -3,7 +3,7 @@
1182  
1183  #include <linux/config.h>
1184  
1185 -#ifndef CONFIG_SMP
1186 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
1187  
1188  #define lock_kernel()                          do { } while(0)
1189  #define unlock_kernel()                                do { } while(0)
1190 diff -urN linux-2.4.14/include/linux/spinlock.h linux/include/linux/spinlock.h
1191 --- linux-2.4.14/include/linux/spinlock.h       Mon Nov  5 20:10:46 2001
1192 +++ linux/include/linux/spinlock.h      Wed Nov  7 20:53:19 2001
1193 @@ -45,8 +45,10 @@
1194  
1195  #if (DEBUG_SPINLOCKS < 1)
1196  
1197 +#ifndef CONFIG_PREEMPT
1198  #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
1199  #define ATOMIC_DEC_AND_LOCK
1200 +#endif
1201  
1202  /*
1203   * Your basic spinlocks, allowing only a single CPU anywhere
1204 @@ -62,11 +64,11 @@
1205  #endif
1206  
1207  #define spin_lock_init(lock)   do { } while(0)
1208 -#define spin_lock(lock)                (void)(lock) /* Not "unused variable". */
1209 +#define _raw_spin_lock(lock)   (void)(lock) /* Not "unused variable". */
1210  #define spin_is_locked(lock)   (0)
1211 -#define spin_trylock(lock)     ({1; })
1212 +#define _raw_spin_trylock(lock)        ({1; })
1213  #define spin_unlock_wait(lock) do { } while(0)
1214 -#define spin_unlock(lock)      do { } while(0)
1215 +#define _raw_spin_unlock(lock) do { } while(0)
1216  
1217  #elif (DEBUG_SPINLOCKS < 2)
1218  
1219 @@ -125,13 +127,77 @@
1220  #endif
1221  
1222  #define rwlock_init(lock)      do { } while(0)
1223 -#define read_lock(lock)                (void)(lock) /* Not "unused variable". */
1224 -#define read_unlock(lock)      do { } while(0)
1225 -#define write_lock(lock)       (void)(lock) /* Not "unused variable". */
1226 -#define write_unlock(lock)     do { } while(0)
1227 +#define _raw_read_lock(lock)   (void)(lock) /* Not "unused variable". */
1228 +#define _raw_read_unlock(lock) do { } while(0)
1229 +#define _raw_write_lock(lock)  (void)(lock) /* Not "unused variable". */
1230 +#define _raw_write_unlock(lock)        do { } while(0)
1231  
1232  #endif /* !SMP */
1233  
1234 +#ifdef CONFIG_PREEMPT
1235 +
1236 +#define preempt_is_disabled() (current->preempt_count)
1237 +#define preempt_prefetch(a) prefetchw(a)
1238 +
1239 +#define preempt_disable() \
1240 +do { \
1241 +       ++current->preempt_count; \
1242 +       barrier(); \
1243 +} while (0)
1244 +
1245 +#define preempt_enable_no_resched() \
1246 +do { \
1247 +       --current->preempt_count; \
1248 +       barrier(); \
1249 +} while (0)
1250 +
1251 +#define preempt_enable() \
1252 +do { \
1253 +       --current->preempt_count; \
1254 +       barrier(); \
1255 +       if ((current->preempt_count == 0) && current->need_resched) \
1256 +               preempt_schedule(); \
1257 +} while (0)
1258 +
1259 +#define spin_lock(lock)        \
1260 +do { \
1261 +       preempt_disable(); \
1262 +       _raw_spin_lock(lock); \
1263 +} while(0)
1264 +#define spin_trylock(lock)     ({preempt_disable(); _raw_spin_trylock(lock) ? \
1265 +                                       1 : ({preempt_enable(); 0;});})
1266 +#define spin_unlock(lock) \
1267 +do { \
1268 +       _raw_spin_unlock(lock); \
1269 +       preempt_enable(); \
1270 +} while (0)
1271 +
1272 +#define read_lock(lock)                ({preempt_disable(); _raw_read_lock(lock);})
1273 +#define read_unlock(lock)      ({_raw_read_unlock(lock); preempt_enable();})
1274 +#define write_lock(lock)       ({preempt_disable(); _raw_write_lock(lock);})
1275 +#define write_unlock(lock)     ({_raw_write_unlock(lock); preempt_enable();})
1276 +#define write_trylock(lock)    ({preempt_disable(); _raw_write_trylock(lock) ? \
1277 +                                       1 : ({preempt_enable(); 0;});})
1278 +
1279 +#else
1280 +
1281 +#define preempt_is_disabled() do { } while (0)
1282 +#define preempt_disable()    do { } while (0)
1283 +#define preempt_enable_no_resched()
1284 +#define preempt_enable()     do { } while (0)
1285 +#define preempt_prefetch(a)
1286 +
1287 +#define spin_lock(lock)                _raw_spin_lock(lock)
1288 +#define spin_trylock(lock)     _raw_spin_trylock(lock)
1289 +#define spin_unlock(lock)      _raw_spin_unlock(lock)
1290 +
1291 +#define read_lock(lock)                _raw_read_lock(lock)
1292 +#define read_unlock(lock)      _raw_read_unlock(lock)
1293 +#define write_lock(lock)       _raw_write_lock(lock)
1294 +#define write_unlock(lock)     _raw_write_unlock(lock)
1295 +#define write_trylock(lock)    _raw_write_trylock(lock)
1296 +#endif
1297 +
1298  /* "lock on reference count zero" */
1299  #ifndef ATOMIC_DEC_AND_LOCK
1300  #include <asm/atomic.h>
1301 diff -urN linux-2.4.14/include/linux/tqueue.h linux/include/linux/tqueue.h
1302 --- linux-2.4.14/include/linux/tqueue.h Mon Nov  5 20:10:46 2001
1303 +++ linux/include/linux/tqueue.h        Wed Nov  7 20:53:19 2001
1304 @@ -94,6 +94,22 @@
1305  extern spinlock_t tqueue_lock;
1306  
1307  /*
1308 + * Call all "bottom halfs" on a given list.
1309 + */
1310 +
1311 +extern void __run_task_queue(task_queue *list);
1312 +
1313 +static inline void run_task_queue(task_queue *list)
1314 +{
1315 +       if (TQ_ACTIVE(*list))
1316 +               __run_task_queue(list);
1317 +}
1318 +
1319 +#endif /* _LINUX_TQUEUE_H */
1320 +
1321 +#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1322 +#define _LINUX_TQUEUE_H_INLINES
1323 +/*
1324   * Queue a task on a tq.  Return non-zero if it was successfully
1325   * added.
1326   */
1327 @@ -109,17 +125,4 @@
1328         }
1329         return ret;
1330  }
1331 -
1332 -/*
1333 - * Call all "bottom halfs" on a given list.
1334 - */
1335 -
1336 -extern void __run_task_queue(task_queue *list);
1337 -
1338 -static inline void run_task_queue(task_queue *list)
1339 -{
1340 -       if (TQ_ACTIVE(*list))
1341 -               __run_task_queue(list);
1342 -}
1343 -
1344 -#endif /* _LINUX_TQUEUE_H */
1345 +#endif
1346 diff -urN linux-2.4.14/kernel/exit.c linux/kernel/exit.c
1347 --- linux-2.4.14/kernel/exit.c  Mon Nov  5 20:10:45 2001
1348 +++ linux/kernel/exit.c Wed Nov  7 20:53:19 2001
1349 @@ -272,6 +272,10 @@
1350  struct mm_struct * start_lazy_tlb(void)
1351  {
1352         struct mm_struct *mm = current->mm;
1353 +#ifdef CONFIG_PREEMPT
1354 +       if (preempt_is_disabled() == 0)
1355 +               BUG();
1356 +#endif
1357         current->mm = NULL;
1358         /* active_mm is still 'mm' */
1359         atomic_inc(&mm->mm_count);
1360 @@ -283,6 +287,10 @@
1361  {
1362         struct mm_struct *active_mm = current->active_mm;
1363  
1364 +#ifdef CONFIG_PREEMPT
1365 +       if (preempt_is_disabled() == 0)
1366 +               BUG();
1367 +#endif
1368         current->mm = mm;
1369         if (mm != active_mm) {
1370                 current->active_mm = mm;
1371 @@ -306,8 +314,8 @@
1372                 /* more a memory barrier than a real lock */
1373                 task_lock(tsk);
1374                 tsk->mm = NULL;
1375 -               task_unlock(tsk);
1376                 enter_lazy_tlb(mm, current, smp_processor_id());
1377 +               task_unlock(tsk);
1378                 mmput(mm);
1379         }
1380  }
1381 diff -urN linux-2.4.14/kernel/fork.c linux/kernel/fork.c
1382 --- linux-2.4.14/kernel/fork.c  Mon Nov  5 20:10:45 2001
1383 +++ linux/kernel/fork.c Wed Nov  7 20:53:19 2001
1384 @@ -604,6 +604,12 @@
1385         if (p->binfmt && p->binfmt->module)
1386                 __MOD_INC_USE_COUNT(p->binfmt->module);
1387  
1388 +#ifdef CONFIG_PREEMPT
1389 +        /* Since we are keeping the context switch off state as part
1390 +         * of the context, make sure we start with it off.
1391 +         */
1392 +       p->preempt_count = 1;
1393 +#endif
1394         p->did_exec = 0;
1395         p->swappable = 0;
1396         p->state = TASK_UNINTERRUPTIBLE;
1397 diff -urN linux-2.4.14/kernel/ksyms.c linux/kernel/ksyms.c
1398 --- linux-2.4.14/kernel/ksyms.c Mon Nov  5 20:10:45 2001
1399 +++ linux/kernel/ksyms.c        Wed Nov  7 20:53:19 2001
1400 @@ -434,6 +434,9 @@
1401  EXPORT_SYMBOL(interruptible_sleep_on);
1402  EXPORT_SYMBOL(interruptible_sleep_on_timeout);
1403  EXPORT_SYMBOL(schedule);
1404 +#ifdef CONFIG_PREEMPT
1405 +EXPORT_SYMBOL(preempt_schedule);
1406 +#endif
1407  EXPORT_SYMBOL(schedule_timeout);
1408  EXPORT_SYMBOL(jiffies);
1409  EXPORT_SYMBOL(xtime);
1410 diff -urN linux-2.4.14/kernel/sched.c linux/kernel/sched.c
1411 --- linux-2.4.14/kernel/sched.c Mon Nov  5 20:10:45 2001
1412 +++ linux/kernel/sched.c        Wed Nov  7 20:53:19 2001
1413 @@ -475,7 +475,7 @@
1414         task_lock(prev);
1415         prev->has_cpu = 0;
1416         mb();
1417 -       if (prev->state == TASK_RUNNING)
1418 +       if (task_on_runqueue(prev))
1419                 goto needs_resched;
1420  
1421  out_unlock:
1422 @@ -505,7 +505,7 @@
1423                         goto out_unlock;
1424  
1425                 spin_lock_irqsave(&runqueue_lock, flags);
1426 -               if ((prev->state == TASK_RUNNING) && !prev->has_cpu)
1427 +               if (task_on_runqueue(prev) && !prev->has_cpu)
1428                         reschedule_idle(prev);
1429                 spin_unlock_irqrestore(&runqueue_lock, flags);
1430                 goto out_unlock;
1431 @@ -518,6 +518,7 @@
1432  void schedule_tail(struct task_struct *prev)
1433  {
1434         __schedule_tail(prev);
1435 +       preempt_enable();
1436  }
1437  
1438  /*
1439 @@ -540,6 +541,8 @@
1440  
1441         spin_lock_prefetch(&runqueue_lock);
1442  
1443 +       preempt_disable(); 
1444 +
1445         if (!current->active_mm) BUG();
1446  need_resched_back:
1447         prev = current;
1448 @@ -563,6 +566,9 @@
1449                 goto move_rr_last;
1450  move_rr_back:
1451  
1452 +#ifdef CONFIG_PREEMPT
1453 +       if (preempt_is_disabled() & PREEMPT_ACTIVE) goto treat_like_run;
1454 +#endif
1455         switch (prev->state) {
1456                 case TASK_INTERRUPTIBLE:
1457                         if (signal_pending(prev)) {
1458 @@ -573,6 +579,9 @@
1459                         del_from_runqueue(prev);
1460                 case TASK_RUNNING:;
1461         }
1462 +#ifdef CONFIG_PREEMPT
1463 +       treat_like_run:
1464 +#endif
1465         prev->need_resched = 0;
1466  
1467         /*
1468 @@ -585,7 +594,7 @@
1469          */
1470         next = idle_task(this_cpu);
1471         c = -1000;
1472 -       if (prev->state == TASK_RUNNING)
1473 +       if (task_on_runqueue(prev))
1474                 goto still_running;
1475  
1476  still_running_back:
1477 @@ -678,7 +687,7 @@
1478         reacquire_kernel_lock(current);
1479         if (current->need_resched)
1480                 goto need_resched_back;
1481 -
1482 +       preempt_enable_no_resched(); 
1483         return;
1484  
1485  recalculate:
1486 @@ -988,6 +997,34 @@
1487         return setscheduler(pid, -1, param);
1488  }
1489  
1490 +#ifdef CONFIG_PREEMPT
1491 +
1492 +#ifdef CONFIG_SMP
1493 +#define lock_to_this_cpu()                             \
1494 +        unsigned long old_cpus_allowed = current->cpus_allowed;        \
1495 +        current->cpus_allowed = 1UL << smp_processor_id()
1496 +#define restore_cpus_allowed() current->cpus_allowed = old_cpus_allowed
1497 +#else
1498 +#define lock_to_this_cpu()
1499 +#define restore_cpus_allowed()
1500 +#endif /* !CONFIG_SMP */
1501 +
1502 +asmlinkage void preempt_schedule(void)
1503 +{
1504 +       while (current->need_resched) {
1505 +               /* it would be ideal not to lock tasks to their cpu here,
1506 +                * but only around the data that needs such locking */
1507 +               lock_to_this_cpu();
1508 +               current->preempt_count += PREEMPT_ACTIVE + 1;
1509 +               barrier();
1510 +               schedule();
1511 +               current->preempt_count -= PREEMPT_ACTIVE + 1;
1512 +               barrier();
1513 +               restore_cpus_allowed();
1514 +       }
1515 +}
1516 +#endif /* CONFIG_PREEMPT */
1517 +
1518  asmlinkage long sys_sched_getscheduler(pid_t pid)
1519  {
1520         struct task_struct *p;
1521 diff -urN linux-2.4.14/lib/dec_and_lock.c linux/lib/dec_and_lock.c
1522 --- linux-2.4.14/lib/dec_and_lock.c     Mon Nov  5 20:10:45 2001
1523 +++ linux/lib/dec_and_lock.c    Wed Nov  7 20:53:19 2001
1524 @@ -1,5 +1,6 @@
1525  #include <linux/module.h>
1526  #include <linux/spinlock.h>
1527 +#include <linux/sched.h>
1528  #include <asm/atomic.h>
1529  
1530  /*
1531 diff -urN linux-2.4.14/mm/slab.c linux/mm/slab.c
1532 --- linux-2.4.14/mm/slab.c      Mon Nov  5 20:10:45 2001
1533 +++ linux/mm/slab.c     Wed Nov  7 20:53:19 2001
1534 @@ -49,7 +49,9 @@
1535   *  constructors and destructors are called without any locking.
1536   *  Several members in kmem_cache_t and slab_t never change, they
1537   *     are accessed without any locking.
1538 - *  The per-cpu arrays are never accessed from the wrong cpu, no locking.
1539 + *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
1540 + *     they are however called with local interrupts disabled so no
1541 + *     preempt_disable needed.
1542   *  The non-constant members are protected with a per-cache irq spinlock.
1543   *
1544   * Further notes from the original documentation:
1545 diff -urN linux-2.4.14/net/socket.c linux/net/socket.c
1546 --- linux-2.4.14/net/socket.c   Mon Nov  5 20:10:52 2001
1547 +++ linux/net/socket.c  Wed Nov  7 20:53:19 2001
1548 @@ -135,7 +135,7 @@
1549  
1550  static struct net_proto_family *net_families[NPROTO];
1551  
1552 -#ifdef CONFIG_SMP
1553 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1554  static atomic_t net_family_lockct = ATOMIC_INIT(0);
1555  static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
1556  
This page took 0.134653 seconds and 3 git commands to generate.