]> git.pld-linux.org Git - packages/kernel.git/blob - preempt-kernel-rml-2.4.16-1.patch
- added CONFIG_PDC202XXX_FORCE, for new ide drivers
[packages/kernel.git] / preempt-kernel-rml-2.4.16-1.patch
1 diff -urN linux-2.4.16/CREDITS linux/CREDITS
2 --- linux-2.4.16/CREDITS        Thu Nov 22 12:49:30 2001
3 +++ linux/CREDITS       Thu Nov 22 12:52:07 2001
4 @@ -971,8 +971,8 @@
5  
6  N: Nigel Gamble
7  E: nigel@nrg.org
8 -E: nigel@sgi.com
9  D: Interrupt-driven printer driver
10 +D: Preemptible kernel
11  S: 120 Alley Way
12  S: Mountain View, California 94040
13  S: USA
14 diff -urN linux-2.4.16/Documentation/Configure.help linux/Documentation/Configure.help
15 --- linux-2.4.16/Documentation/Configure.help   Thu Nov 22 12:51:28 2001
16 +++ linux/Documentation/Configure.help  Thu Nov 22 12:52:07 2001
17 @@ -264,6 +264,19 @@
18    If you have a system with several CPUs, you do not need to say Y
19    here: the local APIC will be used automatically.
20  
21 +Preemptible Kernel
22 +CONFIG_PREEMPT
23 +  This option reduces the latency of the kernel when reacting to
24 +  real-time or interactive events by allowing a low priority process to
25 +  be preempted even if it is in kernel mode executing a system call.
26 +  This allows applications to run more reliably even when the system is
27 +  under load due to other, lower priority, processes.
28 +
29 +  Say Y here if you are building a kernel for a desktop system, embedded
30 +  system or real-time system.  Say N if you are building a kernel for a
31 +  system where throughput is more important than interactive response,
32 +  such as a server system.  Say N if you are unsure.
33 +
34  Kernel math emulation
35  CONFIG_MATH_EMULATION
36    Linux can emulate a math coprocessor (used for floating point
37 diff -urN linux-2.4.16/Documentation/preempt-locking.txt linux/Documentation/preempt-locking.txt
38 --- linux-2.4.16/Documentation/preempt-locking.txt      Wed Dec 31 19:00:00 1969
39 +++ linux/Documentation/preempt-locking.txt     Thu Nov 22 12:52:07 2001
40 @@ -0,0 +1,94 @@
41 +                 Proper Locking Under a Preemptible Kernel:
42 +                      Keeping Kernel Code Preempt-Safe
43 +                         Robert Love <rml@tech9.net>
44 +                          Last Updated: 21 Oct 2001
45 +
46 +
47 +INTRODUCTION
48 +
49 +
50 +A preemptible kernel creates new locking issues.  The issues are the same as
51 +those under SMP: concurrency and reentrancy.  Thankfully, the Linux preemptible
52 +kernel model leverages existing SMP locking mechanisms.  Thus, the kernel
53 +requires explicit additional locking for very few additional situations.
54 +
55 +This document is for all kernel hackers.  Developing code in the kernel
56 +requires protecting these situations.  As you will see, these situations would 
57 +normally require a lock, where they not per-CPU.
58
59 +
60 +RULE #1: Per-CPU data structures need explicit protection
61 +
62 +
63 +Two similar problems arise. An example code snippet:
64 +
65 +       struct this_needs_locking tux[NR_CPUS];
66 +       tux[smp_processor_id()] = some_value;
67 +       /* task is preempted here... */
68 +       something = tux[smp_processor_id()];
69 +
70 +First, since the data is per-CPU, it may not have explicit SMP locking, but
71 +require it otherwise.  Second, when a preempted task is finally rescheduled,
72 +the previous value of smp_processor_id may not equal the current.  You must
73 +protect these situations by disabling preemption around them.
74 +
75 +
76 +RULE #2: CPU state must be protected.
77 +
78 +
79 +Under preemption, the state of the CPU must be protected.  This is arch-
80 +dependent, but includes CPU structures and state not preserved over a context
81 +switch.  For example, on x86, entering and exiting FPU mode is now a critical
82 +section that must occur while preemption is disabled.  Think what would happen
83 +if the kernel is executing a floating-point instruction and is then preempted.
84 +Remember, the kernel does not save FPU state except for user tasks.  Therefore,
85 +upon preemption, the FPU registers will be sold to the lowest bidder.  Thus,
86 +preemption must be disabled around such regions.i
87 +
88 +Note, some FPU functions are already explicitly preempt safe.  For example,
89 +kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
90 +However, math_state_restore must be called with preemption disabled.
91 +
92 +
93 +SOLUTION
94 +
95 +
96 +Data protection under preemption is achieved by disabling preemption for the
97 +duration of the critical region.
98 +
99 +preempt_enable()               decrement the preempt counter
100 +preempt_disable()              increment the preempt counter
101 +preempt_enable_no_resched()    decrement, but do not immediately preempt
102 +
103 +The functions are nestable.  In other words, you can call preempt_disable
104 +n-times in a code path, and preemption will not be reenabled until the n-th
105 +call to preempt_enable.  The preempt statements define to nothing if
106 +preemption is not enabled.
107 +
108 +Note that you do not need to explicitly prevent preemption if you are holding
109 +any locks or interrupts are disabled, since preemption is implicitly disabled
110 +in those cases.
111 +
112 +Example:
113 +
114 +       cpucache_t *cc; /* this is per-CPU */
115 +       preempt_disable();
116 +       cc = cc_data(searchp);
117 +       if (cc && cc->avail) {
118 +               __free_block(searchp, cc_entry(cc), cc->avail);
119 +               cc->avail = 0;
120 +       }
121 +       preempt_enable();
122 +       return 0;
123 +
124 +Notice how the preemption statements must encompass every reference of the
125 +critical variables.  Another example:
126 +
127 +       int buf[NR_CPUS];
128 +       set_cpu_val(buf);
129 +       if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n");
130 +       spin_lock(&buf_lock);
131 +       /* ... */
132 +
133 +This code is not preempt-safe, but see how easily we can fix it by simply
134 +moving the spin_lock up two lines.
135 diff -urN linux-2.4.16/MAINTAINERS linux/MAINTAINERS
136 --- linux-2.4.16/MAINTAINERS    Thu Nov 22 12:49:54 2001
137 +++ linux/MAINTAINERS   Thu Nov 22 12:52:07 2001
138 @@ -1222,6 +1222,14 @@
139  M:     mostrows@styx.uwaterloo.ca
140  S:     Maintained
141  
142 +PREEMPTIBLE KERNEL
143 +P:     Robert M. Love
144 +M:     rml@tech9.net
145 +L:     linux-kernel@vger.kernel.org
146 +L:     kpreempt-tech@lists.sourceforge.net
147 +W:     http://tech9.net/rml/linux
148 +S:     Maintained
149 +
150  PROMISE DC4030 CACHING DISK CONTROLLER DRIVER
151  P:     Peter Denison
152  M:     promise@pnd-pc.demon.co.uk
153 diff -urN linux-2.4.16/arch/arm/config.in linux/arch/arm/config.in
154 --- linux-2.4.16/arch/arm/config.in     Thu Nov 22 12:51:08 2001
155 +++ linux/arch/arm/config.in    Thu Nov 22 12:52:07 2001
156 @@ -437,6 +437,7 @@
157  if [ "$CONFIG_CPU_32" = "y" -a "$CONFIG_ARCH_EBSA110" != "y" ]; then
158     bool 'Kernel-mode alignment trap handler' CONFIG_ALIGNMENT_TRAP
159  fi
160 +dep_bool 'Preemptible Kernel (experimental)' CONFIG_PREEMPT $CONFIG_CPU_32 $CONFIG_EXPERIMENTAL
161  endmenu
162  
163  source drivers/parport/Config.in
164 diff -urN linux-2.4.16/arch/arm/kernel/entry-armv.S linux/arch/arm/kernel/entry-armv.S
165 --- linux-2.4.16/arch/arm/kernel/entry-armv.S   Thu Nov 22 12:51:08 2001
166 +++ linux/arch/arm/kernel/entry-armv.S  Thu Nov 22 12:52:07 2001
167 @@ -672,6 +672,12 @@
168                 add     r4, sp, #S_SP
169                 mov     r6, lr
170                 stmia   r4, {r5, r6, r7, r8, r9}        @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
171 +#ifdef CONFIG_PREEMPT
172 +               get_current_task r9
173 +               ldr     r8, [r9, #TSK_PREEMPT]
174 +               add     r8, r8, #1
175 +               str     r8, [r9, #TSK_PREEMPT]
176 +#endif
177  1:             get_irqnr_and_base r0, r6, r5, lr
178                 movne   r1, sp
179                 @
180 @@ -679,6 +685,25 @@
181                 @
182                 adrsvc  ne, lr, 1b
183                 bne     do_IRQ
184 +#ifdef CONFIG_PREEMPT
185 +2:             ldr     r8, [r9, #TSK_PREEMPT]
186 +               subs    r8, r8, #1
187 +               bne     3f
188 +               ldr     r7, [r9, #TSK_NEED_RESCHED]
189 +               teq     r7, #0
190 +               beq     3f
191 +               ldr     r6, .LCirqstat
192 +               ldr     r0, [r6, #IRQSTAT_BH_COUNT]
193 +               teq     r0, #0
194 +               bne     3f
195 +               mov     r0, #MODE_SVC
196 +               msr     cpsr_c, r0              @ enable interrupts
197 +               bl      SYMBOL_NAME(preempt_schedule)
198 +               mov     r0, #I_BIT | MODE_SVC
199 +               msr     cpsr_c, r0              @ disable interrupts
200 +               b       2b
201 +3:             str     r8, [r9, #TSK_PREEMPT]
202 +#endif
203                 ldr     r0, [sp, #S_PSR]                @ irqs are already disabled
204                 msr     spsr, r0
205                 ldmia   sp, {r0 - pc}^                  @ load r0 - pc, cpsr
206 @@ -736,6 +761,9 @@
207  .LCprocfns:    .word   SYMBOL_NAME(processor)
208  #endif
209  .LCfp:         .word   SYMBOL_NAME(fp_enter)
210 +#ifdef CONFIG_PREEMPT
211 +.LCirqstat:    .word   SYMBOL_NAME(irq_stat)
212 +#endif
213  
214                 irq_prio_table
215  
216 @@ -775,6 +803,12 @@
217                 stmdb   r8, {sp, lr}^
218                 alignment_trap r4, r7, __temp_irq
219                 zero_fp
220 +               get_current_task tsk
221 +#ifdef CONFIG_PREEMPT
222 +               ldr     r0, [tsk, #TSK_PREEMPT]
223 +               add     r0, r0, #1
224 +               str     r0, [tsk, #TSK_PREEMPT]
225 +#endif
226  1:             get_irqnr_and_base r0, r6, r5, lr
227                 movne   r1, sp
228                 adrsvc  ne, lr, 1b
229 @@ -782,8 +816,12 @@
230                 @ routine called with r0 = irq number, r1 = struct pt_regs *
231                 @
232                 bne     do_IRQ
233 +#ifdef CONFIG_PREEMPT
234 +               ldr     r0, [tsk, #TSK_PREEMPT]
235 +               sub     r0, r0, #1
236 +               str     r0, [tsk, #TSK_PREEMPT]
237 +#endif
238                 mov     why, #0
239 -               get_current_task tsk
240                 b       ret_to_user
241  
242                 .align  5
243 diff -urN linux-2.4.16/arch/arm/tools/getconstants.c linux/arch/arm/tools/getconstants.c
244 --- linux-2.4.16/arch/arm/tools/getconstants.c  Thu Nov 22 12:51:09 2001
245 +++ linux/arch/arm/tools/getconstants.c Thu Nov 22 12:52:07 2001
246 @@ -13,6 +13,7 @@
247  
248  #include <asm/pgtable.h>
249  #include <asm/uaccess.h>
250 +#include <asm/hardirq.h>
251  
252  /*
253   * Make sure that the compiler and target are compatible.
254 @@ -39,6 +40,11 @@
255  DEFN("TSS_SAVE",               OFF_TSK(thread.save));
256  DEFN("TSS_FPESAVE",            OFF_TSK(thread.fpstate.soft.save));
257  
258 +#ifdef CONFIG_PREEMPT
259 +DEFN("TSK_PREEMPT",            OFF_TSK(preempt_count));
260 +DEFN("IRQSTAT_BH_COUNT",       (unsigned long)&(((irq_cpustat_t *)0)->__local_bh_count));
261 +#endif
262 +
263  #ifdef CONFIG_CPU_32
264  DEFN("TSS_DOMAIN",             OFF_TSK(thread.domain));
265  
266 diff -urN linux-2.4.16/arch/i386/config.in linux/arch/i386/config.in
267 --- linux-2.4.16/arch/i386/config.in    Thu Nov 22 12:50:52 2001
268 +++ linux/arch/i386/config.in   Thu Nov 22 12:52:07 2001
269 @@ -170,6 +170,7 @@
270  bool 'Math emulation' CONFIG_MATH_EMULATION
271  bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
272  bool 'Symmetric multi-processing support' CONFIG_SMP
273 +bool 'Preemptible Kernel' CONFIG_PREEMPT
274  if [ "$CONFIG_SMP" != "y" ]; then
275     bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
276     dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
277 @@ -183,9 +184,12 @@
278     bool 'Multiquad NUMA system' CONFIG_MULTIQUAD
279  fi
280  
281 -if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
282 -   define_bool CONFIG_HAVE_DEC_LOCK y
283 +if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then
284 +   if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then
285 +      define_bool CONFIG_HAVE_DEC_LOCK y
286 +   fi
287  fi
288 +
289  endmenu
290  
291  mainmenu_option next_comment
292 diff -urN linux-2.4.16/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
293 --- linux-2.4.16/arch/i386/kernel/entry.S       Thu Nov 22 12:50:53 2001
294 +++ linux/arch/i386/kernel/entry.S      Thu Nov 22 12:52:07 2001
295 @@ -71,7 +71,7 @@
296   * these are offsets into the task-struct.
297   */
298  state          =  0
299 -flags          =  4
300 +preempt_count  =  4
301  sigpending     =  8
302  addr_limit     = 12
303  exec_domain    = 16
304 @@ -79,8 +79,28 @@
305  tsk_ptrace     = 24
306  processor      = 52
307  
308 +        /* These are offsets into the irq_stat structure
309 +         * There is one per cpu and it is aligned to 32
310 +         * byte boundry (we put that here as a shift count)
311 +         */
312 +irq_array_shift                 = CONFIG_X86_L1_CACHE_SHIFT
313 +
314 +irq_stat_local_irq_count        = 4
315 +irq_stat_local_bh_count         = 8
316 +
317  ENOSYS = 38
318  
319 +#ifdef CONFIG_SMP
320 +#define GET_CPU_INDX   movl processor(%ebx),%eax;  \
321 +                        shll $irq_array_shift,%eax
322 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \
323 +                             GET_CPU_INDX
324 +#define CPU_INDX (,%eax)
325 +#else
326 +#define GET_CPU_INDX
327 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx)
328 +#define CPU_INDX
329 +#endif
330  
331  #define SAVE_ALL \
332         cld; \
333 @@ -247,12 +267,30 @@
334         ALIGN
335  ENTRY(ret_from_intr)
336         GET_CURRENT(%ebx)
337 +#ifdef CONFIG_PREEMPT
338 +       cli
339 +       decl preempt_count(%ebx)
340 +#endif
341  ret_from_exception:
342         movl EFLAGS(%esp),%eax          # mix EFLAGS and CS
343         movb CS(%esp),%al
344         testl $(VM_MASK | 3),%eax       # return to VM86 mode or non-supervisor?
345         jne ret_from_sys_call
346 +#ifdef CONFIG_PREEMPT
347 +       cmpl $0,preempt_count(%ebx)
348 +       jnz restore_all
349 +       cmpl $0,need_resched(%ebx)
350 +       jz restore_all
351 +       movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
352 +       addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
353 +       jnz restore_all
354 +       incl preempt_count(%ebx)
355 +       sti
356 +       call SYMBOL_NAME(preempt_schedule)
357 +       jmp ret_from_intr
358 +#else
359         jmp restore_all
360 +#endif
361  
362         ALIGN
363  reschedule:
364 @@ -289,6 +327,9 @@
365         GET_CURRENT(%ebx)
366         call *%edi
367         addl $8,%esp
368 +#ifdef CONFIG_PREEMPT
369 +       cli
370 +#endif
371         jmp ret_from_exception
372  
373  ENTRY(coprocessor_error)
374 @@ -308,12 +349,18 @@
375         movl %cr0,%eax
376         testl $0x4,%eax                 # EM (math emulation bit)
377         jne device_not_available_emulate
378 +#ifdef CONFIG_PREEMPT
379 +       cli
380 +#endif
381         call SYMBOL_NAME(math_state_restore)
382         jmp ret_from_exception
383  device_not_available_emulate:
384         pushl $0                # temporary storage for ORIG_EIP
385         call  SYMBOL_NAME(math_emulate)
386         addl $4,%esp
387 +#ifdef CONFIG_PREEMPT
388 +       cli
389 +#endif
390         jmp ret_from_exception
391  
392  ENTRY(debug)
393 diff -urN linux-2.4.16/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c
394 --- linux-2.4.16/arch/i386/kernel/i387.c        Thu Nov 22 12:50:53 2001
395 +++ linux/arch/i386/kernel/i387.c       Thu Nov 22 12:52:07 2001
396 @@ -10,6 +10,7 @@
397  
398  #include <linux/config.h>
399  #include <linux/sched.h>
400 +#include <linux/spinlock.h>
401  #include <asm/processor.h>
402  #include <asm/i387.h>
403  #include <asm/math_emu.h>
404 @@ -65,6 +66,8 @@
405  {
406         struct task_struct *tsk = current;
407  
408 +       preempt_disable();
409 +       
410         if (tsk->flags & PF_USEDFPU) {
411                 __save_init_fpu(tsk);
412                 return;
413 diff -urN linux-2.4.16/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
414 --- linux-2.4.16/arch/i386/kernel/traps.c       Thu Nov 22 12:50:53 2001
415 +++ linux/arch/i386/kernel/traps.c      Thu Nov 22 12:52:08 2001
416 @@ -697,6 +697,11 @@
417   */
418  asmlinkage void math_state_restore(struct pt_regs regs)
419  {
420 +       /*
421 +        * CONFIG_PREEMPT
422 +        * Must be called with preemption disabled
423 +        */
424 +
425         __asm__ __volatile__("clts");           /* Allow maths ops (or we recurse) */
426  
427         if (current->used_math) {
428 diff -urN linux-2.4.16/arch/i386/lib/dec_and_lock.c linux/arch/i386/lib/dec_and_lock.c
429 --- linux-2.4.16/arch/i386/lib/dec_and_lock.c   Thu Nov 22 12:50:53 2001
430 +++ linux/arch/i386/lib/dec_and_lock.c  Thu Nov 22 12:52:08 2001
431 @@ -8,6 +8,7 @@
432   */
433  
434  #include <linux/spinlock.h>
435 +#include <linux/sched.h>
436  #include <asm/atomic.h>
437  
438  int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
439 diff -urN linux-2.4.16/drivers/char/console.c linux/drivers/char/console.c
440 --- linux-2.4.16/drivers/char/console.c Thu Nov 22 12:49:55 2001
441 +++ linux/drivers/char/console.c        Thu Nov 22 12:52:08 2001
442 @@ -2356,8 +2356,14 @@
443                 return;
444  
445         pm_access(pm_con);
446 +       
447 +       /*
448 +        * If we raced with con_close(), `vt' may be null.
449 +        * Hence this bandaid.   - akpm
450 +        */
451         acquire_console_sem();
452 -       set_cursor(vt->vc_num);
453 +       if (vt)
454 +               set_cursor(vt->vc_num);
455         release_console_sem();
456  }
457  
458 diff -urN linux-2.4.16/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c
459 --- linux-2.4.16/drivers/ieee1394/csr.c Thu Nov 22 12:50:42 2001
460 +++ linux/drivers/ieee1394/csr.c        Thu Nov 22 12:52:08 2001
461 @@ -10,6 +10,7 @@
462   */
463  
464  #include <linux/string.h>
465 +#include <linux/sched.h>
466  
467  #include "ieee1394_types.h"
468  #include "hosts.h"
469 diff -urN linux-2.4.16/fs/adfs/map.c linux/fs/adfs/map.c
470 --- linux-2.4.16/fs/adfs/map.c  Thu Nov 22 12:49:30 2001
471 +++ linux/fs/adfs/map.c Thu Nov 22 12:52:08 2001
472 @@ -12,6 +12,7 @@
473  #include <linux/fs.h>
474  #include <linux/adfs_fs.h>
475  #include <linux/spinlock.h>
476 +#include <linux/sched.h>
477  
478  #include "adfs.h"
479  
480 diff -urN linux-2.4.16/fs/exec.c linux/fs/exec.c
481 --- linux-2.4.16/fs/exec.c      Thu Nov 22 12:49:29 2001
482 +++ linux/fs/exec.c     Thu Nov 22 12:52:08 2001
483 @@ -420,8 +420,8 @@
484                 active_mm = current->active_mm;
485                 current->mm = mm;
486                 current->active_mm = mm;
487 -               task_unlock(current);
488                 activate_mm(active_mm, mm);
489 +               task_unlock(current);
490                 mm_release();
491                 if (old_mm) {
492                         if (active_mm != old_mm) BUG();
493 diff -urN linux-2.4.16/fs/fat/cache.c linux/fs/fat/cache.c
494 --- linux-2.4.16/fs/fat/cache.c Thu Nov 22 12:49:29 2001
495 +++ linux/fs/fat/cache.c        Thu Nov 22 12:52:08 2001
496 @@ -14,6 +14,7 @@
497  #include <linux/string.h>
498  #include <linux/stat.h>
499  #include <linux/fat_cvf.h>
500 +#include <linux/sched.h>
501  
502  #if 0
503  #  define PRINTK(x) printk x
504 diff -urN linux-2.4.16/include/asm-arm/dma.h linux/include/asm-arm/dma.h
505 --- linux-2.4.16/include/asm-arm/dma.h  Thu Nov 22 12:49:32 2001
506 +++ linux/include/asm-arm/dma.h Thu Nov 22 12:52:08 2001
507 @@ -5,6 +5,7 @@
508  
509  #include <linux/config.h>
510  #include <linux/spinlock.h>
511 +#include <linux/sched.h>
512  #include <asm/system.h>
513  #include <asm/memory.h>
514  #include <asm/scatterlist.h>
515 diff -urN linux-2.4.16/include/asm-arm/hardirq.h linux/include/asm-arm/hardirq.h
516 --- linux-2.4.16/include/asm-arm/hardirq.h      Thu Nov 22 12:49:32 2001
517 +++ linux/include/asm-arm/hardirq.h     Thu Nov 22 12:52:08 2001
518 @@ -34,6 +34,7 @@
519  #define irq_exit(cpu,irq)      (local_irq_count(cpu)--)
520  
521  #define synchronize_irq()      do { } while (0)
522 +#define release_irqlock(cpu)   do { } while (0)
523  
524  #else
525  #error SMP not supported
526 diff -urN linux-2.4.16/include/asm-arm/mmu_context.h linux/include/asm-arm/mmu_context.h
527 --- linux-2.4.16/include/asm-arm/mmu_context.h  Thu Nov 22 12:49:32 2001
528 +++ linux/include/asm-arm/mmu_context.h Thu Nov 22 12:52:08 2001
529 @@ -42,6 +42,10 @@
530  switch_mm(struct mm_struct *prev, struct mm_struct *next,
531           struct task_struct *tsk, unsigned int cpu)
532  {
533 +#ifdef CONFIG_PREEMPT
534 +       if (preempt_is_disable() == 0)
535 +               BUG();
536 +#endif
537         if (prev != next) {
538                 cpu_switch_mm(next->pgd, tsk);
539                 clear_bit(cpu, &prev->cpu_vm_mask);
540 diff -urN linux-2.4.16/include/asm-arm/pgalloc.h linux/include/asm-arm/pgalloc.h
541 --- linux-2.4.16/include/asm-arm/pgalloc.h      Thu Nov 22 12:49:32 2001
542 +++ linux/include/asm-arm/pgalloc.h     Thu Nov 22 12:52:08 2001
543 @@ -57,40 +57,48 @@
544  {
545         unsigned long *ret;
546  
547 +       preempt_disable();
548         if ((ret = pgd_quicklist) != NULL) {
549                 pgd_quicklist = (unsigned long *)__pgd_next(ret);
550                 ret[1] = ret[2];
551                 clean_dcache_entry(ret + 1);
552                 pgtable_cache_size--;
553         }
554 +       preempt_enable();
555         return (pgd_t *)ret;
556  }
557  
558  static inline void free_pgd_fast(pgd_t *pgd)
559  {
560 +       preempt_disable();
561         __pgd_next(pgd) = (unsigned long) pgd_quicklist;
562         pgd_quicklist = (unsigned long *) pgd;
563         pgtable_cache_size++;
564 +       preempt_enable();
565  }
566  
567  static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
568  {
569         unsigned long *ret;
570  
571 +       preempt_disable();
572         if((ret = pte_quicklist) != NULL) {
573                 pte_quicklist = (unsigned long *)__pte_next(ret);
574                 ret[0] = 0;
575                 clean_dcache_entry(ret);
576                 pgtable_cache_size--;
577         }
578 +       preempt_enable();
579         return (pte_t *)ret;
580  }
581  
582  static inline void free_pte_fast(pte_t *pte)
583  {
584 +       preempt_disable();
585         __pte_next(pte) = (unsigned long) pte_quicklist;
586         pte_quicklist = (unsigned long *) pte;
587         pgtable_cache_size++;
588 +       preempt_enable();
589  }
590  
591  #else  /* CONFIG_NO_PGT_CACHE */
592 diff -urN linux-2.4.16/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h
593 --- linux-2.4.16/include/asm-arm/smplock.h      Thu Nov 22 12:49:32 2001
594 +++ linux/include/asm-arm/smplock.h     Thu Nov 22 12:52:08 2001
595 @@ -3,12 +3,17 @@
596   *
597   * Default SMP lock implementation
598   */
599 +#include <linux/config.h>
600  #include <linux/interrupt.h>
601  #include <linux/spinlock.h>
602  
603  extern spinlock_t kernel_flag;
604  
605 +#ifdef CONFIG_PREEMPT
606 +#define kernel_locked()                preempt_is_disable()
607 +#else
608  #define kernel_locked()                spin_is_locked(&kernel_flag)
609 +#endif
610  
611  /*
612   * Release global kernel lock and global interrupt lock
613 @@ -40,8 +45,14 @@
614   */
615  static inline void lock_kernel(void)
616  {
617 +#ifdef CONFIG_PREEMPT
618 +       if (current->lock_depth == -1)
619 +               spin_lock(&kernel_flag);
620 +       ++current->lock_depth;
621 +#else
622         if (!++current->lock_depth)
623                 spin_lock(&kernel_flag);
624 +#endif
625  }
626  
627  static inline void unlock_kernel(void)
628 diff -urN linux-2.4.16/include/asm-arm/softirq.h linux/include/asm-arm/softirq.h
629 --- linux-2.4.16/include/asm-arm/softirq.h      Thu Nov 22 12:49:32 2001
630 +++ linux/include/asm-arm/softirq.h     Thu Nov 22 12:52:08 2001
631 @@ -5,20 +5,22 @@
632  #include <asm/hardirq.h>
633  
634  #define __cpu_bh_enable(cpu) \
635 -               do { barrier(); local_bh_count(cpu)--; } while (0)
636 +               do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
637  #define cpu_bh_disable(cpu) \
638 -               do { local_bh_count(cpu)++; barrier(); } while (0)
639 +               do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
640  
641  #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
642  #define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
643  
644  #define in_softirq()           (local_bh_count(smp_processor_id()) != 0)
645  
646 -#define local_bh_enable()                                              \
647 +#define _local_bh_enable()                                             \
648  do {                                                                   \
649         unsigned int *ptr = &local_bh_count(smp_processor_id());        \
650         if (!--*ptr && ptr[-2])                                         \
651                 __asm__("bl%? __do_softirq": : : "lr");/* out of line */\
652  } while (0)
653  
654 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
655 +
656  #endif /* __ASM_SOFTIRQ_H */
657 diff -urN linux-2.4.16/include/asm-i386/hardirq.h linux/include/asm-i386/hardirq.h
658 --- linux-2.4.16/include/asm-i386/hardirq.h     Thu Nov 22 12:49:31 2001
659 +++ linux/include/asm-i386/hardirq.h    Thu Nov 22 12:52:08 2001
660 @@ -36,6 +36,8 @@
661  
662  #define synchronize_irq()      barrier()
663  
664 +#define release_irqlock(cpu)   do { } while (0)
665 +
666  #else
667  
668  #include <asm/atomic.h>
669 diff -urN linux-2.4.16/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h
670 --- linux-2.4.16/include/asm-i386/highmem.h     Thu Nov 22 12:49:31 2001
671 +++ linux/include/asm-i386/highmem.h    Thu Nov 22 12:52:08 2001
672 @@ -88,6 +88,7 @@
673         enum fixed_addresses idx;
674         unsigned long vaddr;
675  
676 +       preempt_disable();
677         if (page < highmem_start_page)
678                 return page_address(page);
679  
680 @@ -109,8 +110,10 @@
681         unsigned long vaddr = (unsigned long) kvaddr;
682         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
683  
684 -       if (vaddr < FIXADDR_START) // FIXME
685 +       if (vaddr < FIXADDR_START) { // FIXME
686 +               preempt_enable();
687                 return;
688 +       }
689  
690         if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
691                 BUG();
692 @@ -122,6 +125,8 @@
693         pte_clear(kmap_pte-idx);
694         __flush_tlb_one(vaddr);
695  #endif
696 +
697 +       preempt_enable();
698  }
699  
700  #endif /* __KERNEL__ */
701 diff -urN linux-2.4.16/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h
702 --- linux-2.4.16/include/asm-i386/hw_irq.h      Thu Nov 22 12:49:31 2001
703 +++ linux/include/asm-i386/hw_irq.h     Thu Nov 22 12:52:08 2001
704 @@ -95,6 +95,18 @@
705  #define __STR(x) #x
706  #define STR(x) __STR(x)
707  
708 +#define GET_CURRENT \
709 +       "movl %esp, %ebx\n\t" \
710 +       "andl $-8192, %ebx\n\t"
711 +
712 +#ifdef CONFIG_PREEMPT
713 +#define BUMP_LOCK_COUNT \
714 +       GET_CURRENT \
715 +       "incl 4(%ebx)\n\t"
716 +#else
717 +#define BUMP_LOCK_COUNT
718 +#endif
719 +
720  #define SAVE_ALL \
721         "cld\n\t" \
722         "pushl %es\n\t" \
723 @@ -108,15 +120,12 @@
724         "pushl %ebx\n\t" \
725         "movl $" STR(__KERNEL_DS) ",%edx\n\t" \
726         "movl %edx,%ds\n\t" \
727 -       "movl %edx,%es\n\t"
728 +       "movl %edx,%es\n\t" \
729 +       BUMP_LOCK_COUNT
730  
731  #define IRQ_NAME2(nr) nr##_interrupt(void)
732  #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
733  
734 -#define GET_CURRENT \
735 -       "movl %esp, %ebx\n\t" \
736 -       "andl $-8192, %ebx\n\t"
737 -
738  /*
739   *     SMP has a few special interrupts for IPI messages
740   */
741 diff -urN linux-2.4.16/include/asm-i386/i387.h linux/include/asm-i386/i387.h
742 --- linux-2.4.16/include/asm-i386/i387.h        Thu Nov 22 12:49:31 2001
743 +++ linux/include/asm-i386/i387.h       Thu Nov 22 12:52:08 2001
744 @@ -12,6 +12,7 @@
745  #define __ASM_I386_I387_H
746  
747  #include <linux/sched.h>
748 +#include <linux/spinlock.h>
749  #include <asm/processor.h>
750  #include <asm/sigcontext.h>
751  #include <asm/user.h>
752 @@ -24,7 +25,7 @@
753  extern void restore_fpu( struct task_struct *tsk );
754  
755  extern void kernel_fpu_begin(void);
756 -#define kernel_fpu_end() stts()
757 +#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
758  
759  
760  #define unlazy_fpu( tsk ) do { \
761 diff -urN linux-2.4.16/include/asm-i386/mmu_context.h linux/include/asm-i386/mmu_context.h
762 --- linux-2.4.16/include/asm-i386/mmu_context.h Thu Nov 22 12:49:31 2001
763 +++ linux/include/asm-i386/mmu_context.h        Thu Nov 22 12:52:08 2001
764 @@ -27,6 +27,10 @@
765  
766  static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
767  {
768 +#ifdef CONFIG_PREEMPT
769 +       if (preempt_is_disabled() == 0)
770 +               BUG();
771 +#endif
772         if (prev != next) {
773                 /* stop flush ipis for the previous mm */
774                 clear_bit(cpu, &prev->cpu_vm_mask);
775 diff -urN linux-2.4.16/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h
776 --- linux-2.4.16/include/asm-i386/pgalloc.h     Thu Nov 22 12:49:31 2001
777 +++ linux/include/asm-i386/pgalloc.h    Thu Nov 22 12:52:08 2001
778 @@ -65,20 +65,26 @@
779  {
780         unsigned long *ret;
781  
782 +       preempt_disable();
783         if ((ret = pgd_quicklist) != NULL) {
784                 pgd_quicklist = (unsigned long *)(*ret);
785                 ret[0] = 0;
786                 pgtable_cache_size--;
787 -       } else
788 +               preempt_enable();
789 +       } else {
790 +               preempt_enable();
791                 ret = (unsigned long *)get_pgd_slow();
792 +       }
793         return (pgd_t *)ret;
794  }
795  
796  static __inline__ void free_pgd_fast(pgd_t *pgd)
797  {
798 +       preempt_disable();
799         *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
800         pgd_quicklist = (unsigned long *) pgd;
801         pgtable_cache_size++;
802 +       preempt_enable();
803  }
804  
805  static __inline__ void free_pgd_slow(pgd_t *pgd)
806 @@ -108,19 +114,23 @@
807  {
808         unsigned long *ret;
809  
810 +       preempt_disable();
811         if ((ret = (unsigned long *)pte_quicklist) != NULL) {
812                 pte_quicklist = (unsigned long *)(*ret);
813                 ret[0] = ret[1];
814                 pgtable_cache_size--;
815         }
816 +       preempt_enable();
817         return (pte_t *)ret;
818  }
819  
820  static __inline__ void pte_free_fast(pte_t *pte)
821  {
822 +       preempt_disable();
823         *(unsigned long *)pte = (unsigned long) pte_quicklist;
824         pte_quicklist = (unsigned long *) pte;
825         pgtable_cache_size++;
826 +       preempt_enable();
827  }
828  
829  static __inline__ void pte_free_slow(pte_t *pte)
830 diff -urN linux-2.4.16/include/asm-i386/processor.h linux/include/asm-i386/processor.h
831 --- linux-2.4.16/include/asm-i386/processor.h   Thu Nov 22 12:49:31 2001
832 +++ linux/include/asm-i386/processor.h  Thu Nov 22 12:52:08 2001
833 @@ -502,7 +502,10 @@
834  {
835          __asm__ __volatile__ ("prefetchw (%0)" : : "r"(x));
836  }
837 -#define spin_lock_prefetch(x)  prefetchw(x)
838 +#define spin_lock_prefetch(x) do {                             \
839 +       prefetchw(x);                                           \
840 +       preempt_prefetch(&current->preempt_count);              \
841 +} while(0)
842  
843  #endif
844  
845 diff -urN linux-2.4.16/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h
846 --- linux-2.4.16/include/asm-i386/smplock.h     Thu Nov 22 12:49:31 2001
847 +++ linux/include/asm-i386/smplock.h    Thu Nov 22 12:52:08 2001
848 @@ -10,7 +10,15 @@
849  
850  extern spinlock_t kernel_flag;
851  
852 +#ifdef CONFIG_SMP
853  #define kernel_locked()                spin_is_locked(&kernel_flag)
854 +#else
855 +#ifdef CONFIG_PREEMPT
856 +#define kernel_locked()                preempt_is_disabled()
857 +#else
858 +#define kernel_locked()                1
859 +#endif
860 +#endif
861  
862  /*
863   * Release global kernel lock and global interrupt lock
864 @@ -42,6 +50,11 @@
865   */
866  static __inline__ void lock_kernel(void)
867  {
868 +#ifdef CONFIG_PREEMPT
869 +       if (current->lock_depth == -1)
870 +               spin_lock(&kernel_flag);
871 +       ++current->lock_depth;
872 +#else
873  #if 1
874         if (!++current->lock_depth)
875                 spin_lock(&kernel_flag);
876 @@ -54,6 +67,7 @@
877                 :"=m" (__dummy_lock(&kernel_flag)),
878                  "=m" (current->lock_depth));
879  #endif
880 +#endif
881  }
882  
883  static __inline__ void unlock_kernel(void)
884 diff -urN linux-2.4.16/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h
885 --- linux-2.4.16/include/asm-i386/softirq.h     Thu Nov 22 12:49:31 2001
886 +++ linux/include/asm-i386/softirq.h    Thu Nov 22 12:52:08 2001
887 @@ -5,9 +5,9 @@
888  #include <asm/hardirq.h>
889  
890  #define __cpu_bh_enable(cpu) \
891 -               do { barrier(); local_bh_count(cpu)--; } while (0)
892 +               do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
893  #define cpu_bh_disable(cpu) \
894 -               do { local_bh_count(cpu)++; barrier(); } while (0)
895 +               do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
896  
897  #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
898  #define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
899 @@ -22,7 +22,7 @@
900   * If you change the offsets in irq_stat then you have to
901   * update this code as well.
902   */
903 -#define local_bh_enable()                                              \
904 +#define _local_bh_enable()                                             \
905  do {                                                                   \
906         unsigned int *ptr = &local_bh_count(smp_processor_id());        \
907                                                                         \
908 @@ -45,4 +45,6 @@
909                 /* no registers clobbered */ );                         \
910  } while (0)
911  
912 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
913 +
914  #endif /* __ASM_SOFTIRQ_H */
915 diff -urN linux-2.4.16/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
916 --- linux-2.4.16/include/asm-i386/spinlock.h    Thu Nov 22 12:49:31 2001
917 +++ linux/include/asm-i386/spinlock.h   Thu Nov 22 12:52:08 2001
918 @@ -77,7 +77,7 @@
919                 :"=m" (lock->lock) : : "memory"
920  
921  
922 -static inline void spin_unlock(spinlock_t *lock)
923 +static inline void _raw_spin_unlock(spinlock_t *lock)
924  {
925  #if SPINLOCK_DEBUG
926         if (lock->magic != SPINLOCK_MAGIC)
927 @@ -97,7 +97,7 @@
928                 :"=q" (oldval), "=m" (lock->lock) \
929                 :"0" (oldval) : "memory"
930  
931 -static inline void spin_unlock(spinlock_t *lock)
932 +static inline void _raw_spin_unlock(spinlock_t *lock)
933  {
934         char oldval = 1;
935  #if SPINLOCK_DEBUG
936 @@ -113,7 +113,7 @@
937  
938  #endif
939  
940 -static inline int spin_trylock(spinlock_t *lock)
941 +static inline int _raw_spin_trylock(spinlock_t *lock)
942  {
943         char oldval;
944         __asm__ __volatile__(
945 @@ -123,7 +123,7 @@
946         return oldval > 0;
947  }
948  
949 -static inline void spin_lock(spinlock_t *lock)
950 +static inline void _raw_spin_lock(spinlock_t *lock)
951  {
952  #if SPINLOCK_DEBUG
953         __label__ here;
954 @@ -179,7 +179,7 @@
955   */
956  /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
957  
958 -static inline void read_lock(rwlock_t *rw)
959 +static inline void _raw_read_lock(rwlock_t *rw)
960  {
961  #if SPINLOCK_DEBUG
962         if (rw->magic != RWLOCK_MAGIC)
963 @@ -188,7 +188,7 @@
964         __build_read_lock(rw, "__read_lock_failed");
965  }
966  
967 -static inline void write_lock(rwlock_t *rw)
968 +static inline void _raw_write_lock(rwlock_t *rw)
969  {
970  #if SPINLOCK_DEBUG
971         if (rw->magic != RWLOCK_MAGIC)
972 @@ -197,10 +197,10 @@
973         __build_write_lock(rw, "__write_lock_failed");
974  }
975  
976 -#define read_unlock(rw)                asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
977 -#define write_unlock(rw)       asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
978 +#define _raw_read_unlock(rw)           asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
979 +#define _raw_write_unlock(rw)  asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
980  
981 -static inline int write_trylock(rwlock_t *lock)
982 +static inline int _raw_write_trylock(rwlock_t *lock)
983  {
984         atomic_t *count = (atomic_t *)lock;
985         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
986 diff -urN linux-2.4.16/include/linux/brlock.h linux/include/linux/brlock.h
987 --- linux-2.4.16/include/linux/brlock.h Thu Nov 22 12:49:30 2001
988 +++ linux/include/linux/brlock.h        Thu Nov 22 12:52:08 2001
989 @@ -171,11 +171,11 @@
990  }
991  
992  #else
993 -# define br_read_lock(idx)     ((void)(idx))
994 -# define br_read_unlock(idx)   ((void)(idx))
995 -# define br_write_lock(idx)    ((void)(idx))
996 -# define br_write_unlock(idx)  ((void)(idx))
997 -#endif
998 +# define br_read_lock(idx)     ({ (void)(idx); preempt_disable(); })
999 +# define br_read_unlock(idx)   ({ (void)(idx); preempt_enable(); })
1000 +# define br_write_lock(idx)    ({ (void)(idx); preempt_disable(); })
1001 +# define br_write_unlock(idx)  ({ (void)(idx); preempt_enable(); })
1002 +#endif /* CONFIG_SMP */
1003  
1004  /*
1005   * Now enumerate all of the possible sw/hw IRQ protected
1006 diff -urN linux-2.4.16/include/linux/dcache.h linux/include/linux/dcache.h
1007 --- linux-2.4.16/include/linux/dcache.h Thu Nov 22 12:49:30 2001
1008 +++ linux/include/linux/dcache.h        Thu Nov 22 12:52:08 2001
1009 @@ -126,31 +126,6 @@
1010  
1011  extern spinlock_t dcache_lock;
1012  
1013 -/**
1014 - * d_drop - drop a dentry
1015 - * @dentry: dentry to drop
1016 - *
1017 - * d_drop() unhashes the entry from the parent
1018 - * dentry hashes, so that it won't be found through
1019 - * a VFS lookup any more. Note that this is different
1020 - * from deleting the dentry - d_delete will try to
1021 - * mark the dentry negative if possible, giving a
1022 - * successful _negative_ lookup, while d_drop will
1023 - * just make the cache lookup fail.
1024 - *
1025 - * d_drop() is used mainly for stuff that wants
1026 - * to invalidate a dentry for some reason (NFS
1027 - * timeouts or autofs deletes).
1028 - */
1029 -
1030 -static __inline__ void d_drop(struct dentry * dentry)
1031 -{
1032 -       spin_lock(&dcache_lock);
1033 -       list_del(&dentry->d_hash);
1034 -       INIT_LIST_HEAD(&dentry->d_hash);
1035 -       spin_unlock(&dcache_lock);
1036 -}
1037 -
1038  static __inline__ int dname_external(struct dentry *d)
1039  {
1040         return d->d_name.name != d->d_iname; 
1041 @@ -275,3 +250,34 @@
1042  #endif /* __KERNEL__ */
1043  
1044  #endif /* __LINUX_DCACHE_H */
1045 +
1046 +#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1047 +#define __LINUX_DCACHE_H_INLINES
1048 +
1049 +#ifdef __KERNEL__
1050 +/**
1051 + * d_drop - drop a dentry
1052 + * @dentry: dentry to drop
1053 + *
1054 + * d_drop() unhashes the entry from the parent
1055 + * dentry hashes, so that it won't be found through
1056 + * a VFS lookup any more. Note that this is different
1057 + * from deleting the dentry - d_delete will try to
1058 + * mark the dentry negative if possible, giving a
1059 + * successful _negative_ lookup, while d_drop will
1060 + * just make the cache lookup fail.
1061 + *
1062 + * d_drop() is used mainly for stuff that wants
1063 + * to invalidate a dentry for some reason (NFS
1064 + * timeouts or autofs deletes).
1065 + */
1066 +
1067 +static __inline__ void d_drop(struct dentry * dentry)
1068 +{
1069 +       spin_lock(&dcache_lock);
1070 +       list_del(&dentry->d_hash);
1071 +       INIT_LIST_HEAD(&dentry->d_hash);
1072 +       spin_unlock(&dcache_lock);
1073 +}
1074 +#endif
1075 +#endif
1076 diff -urN linux-2.4.16/include/linux/fs_struct.h linux/include/linux/fs_struct.h
1077 --- linux-2.4.16/include/linux/fs_struct.h      Thu Nov 22 12:49:30 2001
1078 +++ linux/include/linux/fs_struct.h     Thu Nov 22 12:52:08 2001
1079 @@ -20,6 +20,15 @@
1080  extern void exit_fs(struct task_struct *);
1081  extern void set_fs_altroot(void);
1082  
1083 +struct fs_struct *copy_fs_struct(struct fs_struct *old);
1084 +void put_fs_struct(struct fs_struct *fs);
1085 +
1086 +#endif
1087 +#endif
1088 +
1089 +#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1090 +#define _LINUX_FS_STRUCT_H_INLINES
1091 +#ifdef __KERNEL__
1092  /*
1093   * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1094   * It can block. Requires the big lock held.
1095 @@ -65,9 +74,5 @@
1096                 mntput(old_pwdmnt);
1097         }
1098  }
1099 -
1100 -struct fs_struct *copy_fs_struct(struct fs_struct *old);
1101 -void put_fs_struct(struct fs_struct *fs);
1102 -
1103  #endif
1104  #endif
1105 diff -urN linux-2.4.16/include/linux/sched.h linux/include/linux/sched.h
1106 --- linux-2.4.16/include/linux/sched.h  Thu Nov 22 12:49:30 2001
1107 +++ linux/include/linux/sched.h Thu Nov 22 12:57:10 2001
1108 @@ -88,6 +88,7 @@
1109  #define TASK_UNINTERRUPTIBLE   2
1110  #define TASK_ZOMBIE            4
1111  #define TASK_STOPPED           8
1112 +#define PREEMPT_ACTIVE         0x40000000
1113  
1114  #define __set_task_state(tsk, state_value)             \
1115         do { (tsk)->state = (state_value); } while (0)
1116 @@ -154,6 +155,9 @@
1117  #define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
1118  extern signed long FASTCALL(schedule_timeout(signed long timeout));
1119  asmlinkage void schedule(void);
1120 +#ifdef CONFIG_PREEMPT
1121 +asmlinkage void preempt_schedule(void);
1122 +#endif
1123  
1124  extern int schedule_task(struct tq_struct *task);
1125  extern void flush_scheduled_tasks(void);
1126 @@ -283,7 +287,17 @@
1127          * offsets of these are hardcoded elsewhere - touch with care
1128          */
1129         volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
1130 -       unsigned long flags;    /* per process flags, defined below */
1131 +        /*
1132 +         * We want the preempt_count in this cache line, but we
1133 +         * a) don't want to mess up the offsets in asm code, and
1134 +         * b) the alignment of the next line below,
1135 +         * so we move "flags" down
1136 +        *
1137 +        * Also note we don't make preempt_count volatile, but we do
1138 +        * need to make sure it is never hiding in a register when
1139 +        * we have an interrupt, so we need to use barrier()
1140 +         */
1141 +       int preempt_count;          /* 0=> preemptable, < 0 => BUG */
1142         int sigpending;
1143         mm_segment_t addr_limit;        /* thread address space:
1144                                                 0-0xBFFFFFFF for user-thead
1145 @@ -325,6 +339,7 @@
1146         struct mm_struct *active_mm;
1147         struct list_head local_pages;
1148         unsigned int allocation_order, nr_local_pages;
1149 +       unsigned long flags;
1150  
1151  /* task state */
1152         struct linux_binfmt *binfmt;
1153 @@ -926,6 +941,11 @@
1154         return res;
1155  }
1156  
1157 +#define _TASK_STRUCT_DEFINED
1158 +#include <linux/dcache.h>
1159 +#include <linux/tqueue.h>
1160 +#include <linux/fs_struct.h>
1161 +
1162  #endif /* __KERNEL__ */
1163  
1164  #endif
1165 diff -urN linux-2.4.16/include/linux/smp.h linux/include/linux/smp.h
1166 --- linux-2.4.16/include/linux/smp.h    Thu Nov 22 12:49:30 2001
1167 +++ linux/include/linux/smp.h   Thu Nov 22 12:52:08 2001
1168 @@ -81,7 +81,9 @@
1169  #define smp_processor_id()                     0
1170  #define hard_smp_processor_id()                        0
1171  #define smp_threads_ready                      1
1172 +#ifndef CONFIG_PREEMPT
1173  #define kernel_lock()
1174 +#endif
1175  #define cpu_logical_map(cpu)                   0
1176  #define cpu_number_map(cpu)                    0
1177  #define smp_call_function(func,info,retry,wait)        ({ 0; })
1178 diff -urN linux-2.4.16/include/linux/smp_lock.h linux/include/linux/smp_lock.h
1179 --- linux-2.4.16/include/linux/smp_lock.h       Thu Nov 22 12:49:30 2001
1180 +++ linux/include/linux/smp_lock.h      Thu Nov 22 12:52:08 2001
1181 @@ -3,7 +3,7 @@
1182  
1183  #include <linux/config.h>
1184  
1185 -#ifndef CONFIG_SMP
1186 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
1187  
1188  #define lock_kernel()                          do { } while(0)
1189  #define unlock_kernel()                                do { } while(0)
1190 diff -urN linux-2.4.16/include/linux/spinlock.h linux/include/linux/spinlock.h
1191 --- linux-2.4.16/include/linux/spinlock.h       Thu Nov 22 12:49:30 2001
1192 +++ linux/include/linux/spinlock.h      Thu Nov 22 12:52:08 2001
1193 @@ -2,6 +2,7 @@
1194  #define __LINUX_SPINLOCK_H
1195  
1196  #include <linux/config.h>
1197 +#include <linux/compiler.h>
1198  
1199  /*
1200   * These are the generic versions of the spinlocks and read-write
1201 @@ -45,8 +46,10 @@
1202  
1203  #if (DEBUG_SPINLOCKS < 1)
1204  
1205 +#ifndef CONFIG_PREEMPT
1206  #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
1207  #define ATOMIC_DEC_AND_LOCK
1208 +#endif
1209  
1210  /*
1211   * Your basic spinlocks, allowing only a single CPU anywhere
1212 @@ -62,11 +65,11 @@
1213  #endif
1214  
1215  #define spin_lock_init(lock)   do { } while(0)
1216 -#define spin_lock(lock)                (void)(lock) /* Not "unused variable". */
1217 +#define _raw_spin_lock(lock)   (void)(lock) /* Not "unused variable". */
1218  #define spin_is_locked(lock)   (0)
1219 -#define spin_trylock(lock)     ({1; })
1220 +#define _raw_spin_trylock(lock)        ({1; })
1221  #define spin_unlock_wait(lock) do { } while(0)
1222 -#define spin_unlock(lock)      do { } while(0)
1223 +#define _raw_spin_unlock(lock) do { } while(0)
1224  
1225  #elif (DEBUG_SPINLOCKS < 2)
1226  
1227 @@ -125,13 +128,77 @@
1228  #endif
1229  
1230  #define rwlock_init(lock)      do { } while(0)
1231 -#define read_lock(lock)                (void)(lock) /* Not "unused variable". */
1232 -#define read_unlock(lock)      do { } while(0)
1233 -#define write_lock(lock)       (void)(lock) /* Not "unused variable". */
1234 -#define write_unlock(lock)     do { } while(0)
1235 +#define _raw_read_lock(lock)   (void)(lock) /* Not "unused variable". */
1236 +#define _raw_read_unlock(lock) do { } while(0)
1237 +#define _raw_write_lock(lock)  (void)(lock) /* Not "unused variable". */
1238 +#define _raw_write_unlock(lock)        do { } while(0)
1239  
1240  #endif /* !SMP */
1241  
1242 +#ifdef CONFIG_PREEMPT
1243 +
1244 +#define preempt_is_disabled() (current->preempt_count)
1245 +#define preempt_prefetch(a) prefetchw(a)
1246 +
1247 +#define preempt_disable() \
1248 +do { \
1249 +       ++current->preempt_count; \
1250 +       barrier(); \
1251 +} while (0)
1252 +
1253 +#define preempt_enable_no_resched() \
1254 +do { \
1255 +       --current->preempt_count; \
1256 +       barrier(); \
1257 +} while (0)
1258 +
1259 +#define preempt_enable() \
1260 +do { \
1261 +       --current->preempt_count; \
1262 +       barrier(); \
1263 +       if (unlikely((current->preempt_count == 0) && current->need_resched)) \
1264 +               preempt_schedule(); \
1265 +} while (0)
1266 +
1267 +#define spin_lock(lock)        \
1268 +do { \
1269 +       preempt_disable(); \
1270 +       _raw_spin_lock(lock); \
1271 +} while(0)
1272 +#define spin_trylock(lock)     ({preempt_disable(); _raw_spin_trylock(lock) ? \
1273 +                                       1 : ({preempt_enable(); 0;});})
1274 +#define spin_unlock(lock) \
1275 +do { \
1276 +       _raw_spin_unlock(lock); \
1277 +       preempt_enable(); \
1278 +} while (0)
1279 +
1280 +#define read_lock(lock)                ({preempt_disable(); _raw_read_lock(lock);})
1281 +#define read_unlock(lock)      ({_raw_read_unlock(lock); preempt_enable();})
1282 +#define write_lock(lock)       ({preempt_disable(); _raw_write_lock(lock);})
1283 +#define write_unlock(lock)     ({_raw_write_unlock(lock); preempt_enable();})
1284 +#define write_trylock(lock)    ({preempt_disable(); _raw_write_trylock(lock) ? \
1285 +                                       1 : ({preempt_enable(); 0;});})
1286 +
1287 +#else
1288 +
1289 +#define preempt_is_disabled() do { } while (0)
1290 +#define preempt_disable()    do { } while (0)
1291 +#define preempt_enable_no_resched()
1292 +#define preempt_enable()     do { } while (0)
1293 +#define preempt_prefetch(a)
1294 +
1295 +#define spin_lock(lock)                _raw_spin_lock(lock)
1296 +#define spin_trylock(lock)     _raw_spin_trylock(lock)
1297 +#define spin_unlock(lock)      _raw_spin_unlock(lock)
1298 +
1299 +#define read_lock(lock)                _raw_read_lock(lock)
1300 +#define read_unlock(lock)      _raw_read_unlock(lock)
1301 +#define write_lock(lock)       _raw_write_lock(lock)
1302 +#define write_unlock(lock)     _raw_write_unlock(lock)
1303 +#define write_trylock(lock)    _raw_write_trylock(lock)
1304 +#endif
1305 +
1306  /* "lock on reference count zero" */
1307  #ifndef ATOMIC_DEC_AND_LOCK
1308  #include <asm/atomic.h>
1309 diff -urN linux-2.4.16/include/linux/tqueue.h linux/include/linux/tqueue.h
1310 --- linux-2.4.16/include/linux/tqueue.h Thu Nov 22 12:49:30 2001
1311 +++ linux/include/linux/tqueue.h        Thu Nov 22 12:52:08 2001
1312 @@ -94,6 +94,22 @@
1313  extern spinlock_t tqueue_lock;
1314  
1315  /*
1316 + * Call all "bottom halfs" on a given list.
1317 + */
1318 +
1319 +extern void __run_task_queue(task_queue *list);
1320 +
1321 +static inline void run_task_queue(task_queue *list)
1322 +{
1323 +       if (TQ_ACTIVE(*list))
1324 +               __run_task_queue(list);
1325 +}
1326 +
1327 +#endif /* _LINUX_TQUEUE_H */
1328 +
1329 +#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1330 +#define _LINUX_TQUEUE_H_INLINES
1331 +/*
1332   * Queue a task on a tq.  Return non-zero if it was successfully
1333   * added.
1334   */
1335 @@ -109,17 +125,4 @@
1336         }
1337         return ret;
1338  }
1339 -
1340 -/*
1341 - * Call all "bottom halfs" on a given list.
1342 - */
1343 -
1344 -extern void __run_task_queue(task_queue *list);
1345 -
1346 -static inline void run_task_queue(task_queue *list)
1347 -{
1348 -       if (TQ_ACTIVE(*list))
1349 -               __run_task_queue(list);
1350 -}
1351 -
1352 -#endif /* _LINUX_TQUEUE_H */
1353 +#endif
1354 diff -urN linux-2.4.16/kernel/exit.c linux/kernel/exit.c
1355 --- linux-2.4.16/kernel/exit.c  Thu Nov 22 12:49:30 2001
1356 +++ linux/kernel/exit.c Thu Nov 22 12:52:08 2001
1357 @@ -273,6 +273,10 @@
1358  struct mm_struct * start_lazy_tlb(void)
1359  {
1360         struct mm_struct *mm = current->mm;
1361 +#ifdef CONFIG_PREEMPT
1362 +       if (preempt_is_disabled() == 0)
1363 +               BUG();
1364 +#endif
1365         current->mm = NULL;
1366         /* active_mm is still 'mm' */
1367         atomic_inc(&mm->mm_count);
1368 @@ -284,6 +288,10 @@
1369  {
1370         struct mm_struct *active_mm = current->active_mm;
1371  
1372 +#ifdef CONFIG_PREEMPT
1373 +       if (preempt_is_disabled() == 0)
1374 +               BUG();
1375 +#endif
1376         current->mm = mm;
1377         if (mm != active_mm) {
1378                 current->active_mm = mm;
1379 @@ -307,8 +315,8 @@
1380                 /* more a memory barrier than a real lock */
1381                 task_lock(tsk);
1382                 tsk->mm = NULL;
1383 -               task_unlock(tsk);
1384                 enter_lazy_tlb(mm, current, smp_processor_id());
1385 +               task_unlock(tsk);
1386                 mmput(mm);
1387         }
1388  }
1389 diff -urN linux-2.4.16/kernel/fork.c linux/kernel/fork.c
1390 --- linux-2.4.16/kernel/fork.c  Thu Nov 22 12:49:30 2001
1391 +++ linux/kernel/fork.c Thu Nov 22 12:52:08 2001
1392 @@ -604,6 +604,12 @@
1393         if (p->binfmt && p->binfmt->module)
1394                 __MOD_INC_USE_COUNT(p->binfmt->module);
1395  
1396 +#ifdef CONFIG_PREEMPT
1397 +        /* Since we are keeping the context switch off state as part
1398 +         * of the context, make sure we start with it off.
1399 +         */
1400 +       p->preempt_count = 1;
1401 +#endif
1402         p->did_exec = 0;
1403         p->swappable = 0;
1404         p->state = TASK_UNINTERRUPTIBLE;
1405 diff -urN linux-2.4.16/kernel/ksyms.c linux/kernel/ksyms.c
1406 --- linux-2.4.16/kernel/ksyms.c Thu Nov 22 12:49:30 2001
1407 +++ linux/kernel/ksyms.c        Thu Nov 22 12:52:08 2001
1408 @@ -435,6 +435,9 @@
1409  EXPORT_SYMBOL(interruptible_sleep_on);
1410  EXPORT_SYMBOL(interruptible_sleep_on_timeout);
1411  EXPORT_SYMBOL(schedule);
1412 +#ifdef CONFIG_PREEMPT
1413 +EXPORT_SYMBOL(preempt_schedule);
1414 +#endif
1415  EXPORT_SYMBOL(schedule_timeout);
1416  EXPORT_SYMBOL(jiffies);
1417  EXPORT_SYMBOL(xtime);
1418 diff -urN linux-2.4.16/kernel/sched.c linux/kernel/sched.c
1419 --- linux-2.4.16/kernel/sched.c Thu Nov 22 12:49:30 2001
1420 +++ linux/kernel/sched.c        Thu Nov 22 12:56:30 2001
1421 @@ -476,7 +476,7 @@
1422         task_lock(prev);
1423         task_release_cpu(prev);
1424         mb();
1425 -       if (prev->state == TASK_RUNNING)
1426 +       if (task_on_runqueue(prev))
1427                 goto needs_resched;
1428  
1429  out_unlock:
1430 @@ -506,7 +506,7 @@
1431                         goto out_unlock;
1432  
1433                 spin_lock_irqsave(&runqueue_lock, flags);
1434 -               if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
1435 +               if (task_on_runqueue(prev) && !task_has_cpu(prev))
1436                         reschedule_idle(prev);
1437                 spin_unlock_irqrestore(&runqueue_lock, flags);
1438                 goto out_unlock;
1439 @@ -519,6 +519,7 @@
1440  asmlinkage void schedule_tail(struct task_struct *prev)
1441  {
1442         __schedule_tail(prev);
1443 +       preempt_enable();
1444  }
1445  
1446  /*
1447 @@ -541,6 +542,8 @@
1448  
1449         spin_lock_prefetch(&runqueue_lock);
1450  
1451 +       preempt_disable(); 
1452 +
1453         if (!current->active_mm) BUG();
1454  need_resched_back:
1455         prev = current;
1456 @@ -568,6 +571,9 @@
1457                         move_last_runqueue(prev);
1458                 }
1459  
1460 +#ifdef CONFIG_PREEMPT
1461 +       if (preempt_is_disabled() & PREEMPT_ACTIVE) goto treat_like_run;
1462 +#endif
1463         switch (prev->state) {
1464                 case TASK_INTERRUPTIBLE:
1465                         if (signal_pending(prev)) {
1466 @@ -578,6 +584,9 @@
1467                         del_from_runqueue(prev);
1468                 case TASK_RUNNING:;
1469         }
1470 +#ifdef CONFIG_PREEMPT
1471 +       treat_like_run:
1472 +#endif
1473         prev->need_resched = 0;
1474  
1475         /*
1476 @@ -686,6 +695,7 @@
1477         reacquire_kernel_lock(current);
1478         if (current->need_resched)
1479                 goto need_resched_back;
1480 +       preempt_enable_no_resched();
1481         return;
1482  }
1483  
1484 @@ -964,6 +974,34 @@
1485         return setscheduler(pid, -1, param);
1486  }
1487  
1488 +#ifdef CONFIG_PREEMPT
1489 +
1490 +#ifdef CONFIG_SMP
1491 +#define lock_to_this_cpu()                             \
1492 +        unsigned long old_cpus_allowed = current->cpus_allowed;        \
1493 +        current->cpus_allowed = 1UL << smp_processor_id()
1494 +#define restore_cpus_allowed() current->cpus_allowed = old_cpus_allowed
1495 +#else
1496 +#define lock_to_this_cpu()
1497 +#define restore_cpus_allowed()
1498 +#endif /* !CONFIG_SMP */
1499 +
1500 +asmlinkage void preempt_schedule(void)
1501 +{
1502 +       while (current->need_resched) {
1503 +               /* it would be ideal not to lock tasks to their cpu here,
1504 +                * but only around the data that needs such locking */
1505 +               lock_to_this_cpu();
1506 +               current->preempt_count += PREEMPT_ACTIVE + 1;
1507 +               barrier();
1508 +               schedule();
1509 +               current->preempt_count -= PREEMPT_ACTIVE + 1;
1510 +               barrier();
1511 +               restore_cpus_allowed();
1512 +       }
1513 +}
1514 +#endif /* CONFIG_PREEMPT */
1515 +
1516  asmlinkage long sys_sched_getscheduler(pid_t pid)
1517  {
1518         struct task_struct *p;
1519 diff -urN linux-2.4.16/lib/dec_and_lock.c linux/lib/dec_and_lock.c
1520 --- linux-2.4.16/lib/dec_and_lock.c     Thu Nov 22 12:49:30 2001
1521 +++ linux/lib/dec_and_lock.c    Thu Nov 22 12:52:08 2001
1522 @@ -1,5 +1,6 @@
1523  #include <linux/module.h>
1524  #include <linux/spinlock.h>
1525 +#include <linux/sched.h>
1526  #include <asm/atomic.h>
1527  
1528  /*
1529 diff -urN linux-2.4.16/mm/slab.c linux/mm/slab.c
1530 --- linux-2.4.16/mm/slab.c      Thu Nov 22 12:49:30 2001
1531 +++ linux/mm/slab.c     Thu Nov 22 12:52:08 2001
1532 @@ -49,7 +49,9 @@
1533   *  constructors and destructors are called without any locking.
1534   *  Several members in kmem_cache_t and slab_t never change, they
1535   *     are accessed without any locking.
1536 - *  The per-cpu arrays are never accessed from the wrong cpu, no locking.
1537 + *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
1538 + *     they are however called with local interrupts disabled so no
1539 + *     preempt_disable needed.
1540   *  The non-constant members are protected with a per-cache irq spinlock.
1541   *
1542   * Further notes from the original documentation:
1543 diff -urN linux-2.4.16/net/socket.c linux/net/socket.c
1544 --- linux-2.4.16/net/socket.c   Thu Nov 22 12:49:32 2001
1545 +++ linux/net/socket.c  Thu Nov 22 12:52:08 2001
1546 @@ -135,7 +135,7 @@
1547  
1548  static struct net_proto_family *net_families[NPROTO];
1549  
1550 -#ifdef CONFIG_SMP
1551 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1552  static atomic_t net_family_lockct = ATOMIC_INIT(0);
1553  static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
1554  
This page took 0.163474 seconds and 3 git commands to generate.