]> git.pld-linux.org Git - packages/kernel.git/blob - preempt-kernel-rml-2.4.18-rc1-ingo-K3-1.patch
- obsolete
[packages/kernel.git] / preempt-kernel-rml-2.4.18-rc1-ingo-K3-1.patch
1 diff -urN linux-2.4.18-rc1-ingo-K3/CREDITS linux/CREDITS
2 --- linux-2.4.18-rc1-ingo-K3/CREDITS    Wed Feb 13 16:24:09 2002
3 +++ linux/CREDITS       Wed Feb 13 16:23:44 2002
4 @@ -981,8 +981,8 @@
5  
6  N: Nigel Gamble
7  E: nigel@nrg.org
8 -E: nigel@sgi.com
9  D: Interrupt-driven printer driver
10 +D: Preemptible kernel
11  S: 120 Alley Way
12  S: Mountain View, California 94040
13  S: USA
14 diff -urN linux-2.4.18-rc1-ingo-K3/Documentation/Configure.help linux/Documentation/Configure.help
15 --- linux-2.4.18-rc1-ingo-K3/Documentation/Configure.help       Wed Feb 13 16:25:06 2002
16 +++ linux/Documentation/Configure.help  Wed Feb 13 16:23:44 2002
17 @@ -266,6 +266,17 @@
18    If you have a system with several CPUs, you do not need to say Y
19    here: the local APIC will be used automatically.
20  
21 +Preemptible Kernel
22 +CONFIG_PREEMPT
23 +  This option reduces the latency of the kernel when reacting to
24 +  real-time or interactive events by allowing a low priority process to
25 +  be preempted even if it is in kernel mode executing a system call.
26 +  This allows applications to run more reliably even when the system is
27 +  under load.
28 +
29 +  Say Y here if you are building a kernel for a desktop, embedded
30 +  real-time system.  Say N if you are unsure.
31 +
32  Kernel math emulation
33  CONFIG_MATH_EMULATION
34    Linux can emulate a math coprocessor (used for floating point
35 diff -urN linux-2.4.18-rc1-ingo-K3/Documentation/preempt-locking.txt linux/Documentation/preempt-locking.txt
36 --- linux-2.4.18-rc1-ingo-K3/Documentation/preempt-locking.txt  Wed Dec 31 19:00:00 1969
37 +++ linux/Documentation/preempt-locking.txt     Wed Feb 13 16:23:44 2002
38 @@ -0,0 +1,104 @@
39 +                 Proper Locking Under a Preemptible Kernel:
40 +                      Keeping Kernel Code Preempt-Safe
41 +                         Robert Love <rml@tech9.net>
42 +                          Last Updated: 22 Jan 2002
43 +
44 +
45 +INTRODUCTION
46 +
47 +
48 +A preemptible kernel creates new locking issues.  The issues are the same as
49 +those under SMP: concurrency and reentrancy.  Thankfully, the Linux preemptible
50 +kernel model leverages existing SMP locking mechanisms.  Thus, the kernel
51 +requires explicit additional locking for very few additional situations.
52 +
53 +This document is for all kernel hackers.  Developing code in the kernel
54 +requires protecting these situations.
55
56 +
57 +RULE #1: Per-CPU data structures need explicit protection
58 +
59 +
60 +Two similar problems arise. An example code snippet:
61 +
62 +       struct this_needs_locking tux[NR_CPUS];
63 +       tux[smp_processor_id()] = some_value;
64 +       /* task is preempted here... */
65 +       something = tux[smp_processor_id()];
66 +
67 +First, since the data is per-CPU, it may not have explicit SMP locking, but
68 +require it otherwise.  Second, when a preempted task is finally rescheduled,
69 +the previous value of smp_processor_id may not equal the current.  You must
70 +protect these situations by disabling preemption around them.
71 +
72 +
73 +RULE #2: CPU state must be protected.
74 +
75 +
76 +Under preemption, the state of the CPU must be protected.  This is arch-
77 +dependent, but includes CPU structures and state not preserved over a context
78 +switch.  For example, on x86, entering and exiting FPU mode is now a critical
79 +section that must occur while preemption is disabled.  Think what would happen
80 +if the kernel is executing a floating-point instruction and is then preempted.
81 +Remember, the kernel does not save FPU state except for user tasks.  Therefore,
82 +upon preemption, the FPU registers will be sold to the lowest bidder.  Thus,
83 +preemption must be disabled around such regions.
84 +
85 +Note, some FPU functions are already explicitly preempt safe.  For example,
86 +kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
87 +However, math_state_restore must be called with preemption disabled.
88 +
89 +
90 +RULE #3: Lock acquire and release must be performed by same task
91 +
92 +
93 +A lock acquired in one task must be released by the same task.  This
94 +means you can't do oddball things like acquire a lock and go off to
95 +play while another task releases it.  If you want to do something
96 +like this, acquire and release the task in the same code path and
97 +have the caller wait on an event by the other task.
98 +
99 +
100 +SOLUTION
101 +
102 +
103 +Data protection under preemption is achieved by disabling preemption for the
104 +duration of the critical region.
105 +
106 +preempt_enable()               decrement the preempt counter
107 +preempt_disable()              increment the preempt counter
108 +preempt_enable_no_resched()    decrement, but do not immediately preempt
109 +preempt_get_count()            return the preempt counter
110 +
111 +The functions are nestable.  In other words, you can call preempt_disable
112 +n-times in a code path, and preemption will not be reenabled until the n-th
113 +call to preempt_enable.  The preempt statements define to nothing if
114 +preemption is not enabled.
115 +
116 +Note that you do not need to explicitly prevent preemption if you are holding
117 +any locks or interrupts are disabled, since preemption is implicitly disabled
118 +in those cases.
119 +
120 +Example:
121 +
122 +       cpucache_t *cc; /* this is per-CPU */
123 +       preempt_disable();
124 +       cc = cc_data(searchp);
125 +       if (cc && cc->avail) {
126 +               __free_block(searchp, cc_entry(cc), cc->avail);
127 +               cc->avail = 0;
128 +       }
129 +       preempt_enable();
130 +       return 0;
131 +
132 +Notice how the preemption statements must encompass every reference of the
133 +critical variables.  Another example:
134 +
135 +       int buf[NR_CPUS];
136 +       set_cpu_val(buf);
137 +       if (buf[smp_processor_id()] == -1) printf(KERN_INFO "wee!\n");
138 +       spin_lock(&buf_lock);
139 +       /* ... */
140 +
141 +This code is not preempt-safe, but see how easily we can fix it by simply
142 +moving the spin_lock up two lines.
143 diff -urN linux-2.4.18-rc1-ingo-K3/MAINTAINERS linux/MAINTAINERS
144 --- linux-2.4.18-rc1-ingo-K3/MAINTAINERS        Wed Feb 13 16:24:20 2002
145 +++ linux/MAINTAINERS   Wed Feb 13 16:23:45 2002
146 @@ -1248,6 +1248,14 @@
147  M:     mostrows@styx.uwaterloo.ca
148  S:     Maintained
149  
150 +PREEMPTIBLE KERNEL
151 +P:     Robert M. Love
152 +M:     rml@tech9.net
153 +L:     linux-kernel@vger.kernel.org
154 +L:     kpreempt-tech@lists.sourceforge.net
155 +W:     http://tech9.net/rml/linux
156 +S:     Supported
157 +
158  PROMISE DC4030 CACHING DISK CONTROLLER DRIVER
159  P:     Peter Denison
160  M:     promise@pnd-pc.demon.co.uk
161 diff -urN linux-2.4.18-rc1-ingo-K3/arch/arm/config.in linux/arch/arm/config.in
162 --- linux-2.4.18-rc1-ingo-K3/arch/arm/config.in Wed Feb 13 16:24:59 2002
163 +++ linux/arch/arm/config.in    Wed Feb 13 16:23:45 2002
164 @@ -508,6 +508,7 @@
165  if [ "$CONFIG_ISDN" != "n" ]; then
166     source drivers/isdn/Config.in
167  fi
168 +dep_bool 'Preemptible Kernel' CONFIG_PREEMPT $CONFIG_CPU_32
169  endmenu
170  
171  #
172 diff -urN linux-2.4.18-rc1-ingo-K3/arch/arm/kernel/entry-armv.S linux/arch/arm/kernel/entry-armv.S
173 --- linux-2.4.18-rc1-ingo-K3/arch/arm/kernel/entry-armv.S       Wed Feb 13 16:24:59 2002
174 +++ linux/arch/arm/kernel/entry-armv.S  Wed Feb 13 16:23:45 2002
175 @@ -672,6 +672,12 @@
176                 add     r4, sp, #S_SP
177                 mov     r6, lr
178                 stmia   r4, {r5, r6, r7, r8, r9}        @ save sp_SVC, lr_SVC, pc, cpsr, old_ro
179 +#ifdef CONFIG_PREEMPT
180 +               get_current_task r9
181 +               ldr     r8, [r9, #TSK_PREEMPT]
182 +               add     r8, r8, #1
183 +               str     r8, [r9, #TSK_PREEMPT]
184 +#endif
185  1:             get_irqnr_and_base r0, r6, r5, lr
186                 movne   r1, sp
187                 @
188 @@ -679,6 +685,25 @@
189                 @
190                 adrsvc  ne, lr, 1b
191                 bne     do_IRQ
192 +#ifdef CONFIG_PREEMPT
193 +2:             ldr     r8, [r9, #TSK_PREEMPT]
194 +               subs    r8, r8, #1
195 +               bne     3f
196 +               ldr     r7, [r9, #TSK_NEED_RESCHED]
197 +               teq     r7, #0
198 +               beq     3f
199 +               ldr     r6, .LCirqstat
200 +               ldr     r0, [r6, #IRQSTAT_BH_COUNT]
201 +               teq     r0, #0
202 +               bne     3f
203 +               mov     r0, #MODE_SVC
204 +               msr     cpsr_c, r0              @ enable interrupts
205 +               bl      SYMBOL_NAME(preempt_schedule)
206 +               mov     r0, #I_BIT | MODE_SVC
207 +               msr     cpsr_c, r0              @ disable interrupts
208 +               b       2b
209 +3:             str     r8, [r9, #TSK_PREEMPT]
210 +#endif
211                 ldr     r0, [sp, #S_PSR]                @ irqs are already disabled
212                 msr     spsr, r0
213                 ldmia   sp, {r0 - pc}^                  @ load r0 - pc, cpsr
214 @@ -736,6 +761,9 @@
215  .LCprocfns:    .word   SYMBOL_NAME(processor)
216  #endif
217  .LCfp:         .word   SYMBOL_NAME(fp_enter)
218 +#ifdef CONFIG_PREEMPT
219 +.LCirqstat:    .word   SYMBOL_NAME(irq_stat)
220 +#endif
221  
222                 irq_prio_table
223  
224 @@ -775,6 +803,12 @@
225                 stmdb   r8, {sp, lr}^
226                 alignment_trap r4, r7, __temp_irq
227                 zero_fp
228 +               get_current_task tsk
229 +#ifdef CONFIG_PREEMPT
230 +               ldr     r0, [tsk, #TSK_PREEMPT]
231 +               add     r0, r0, #1
232 +               str     r0, [tsk, #TSK_PREEMPT]
233 +#endif
234  1:             get_irqnr_and_base r0, r6, r5, lr
235                 movne   r1, sp
236                 adrsvc  ne, lr, 1b
237 @@ -782,8 +816,12 @@
238                 @ routine called with r0 = irq number, r1 = struct pt_regs *
239                 @
240                 bne     do_IRQ
241 +#ifdef CONFIG_PREEMPT
242 +               ldr     r0, [tsk, #TSK_PREEMPT]
243 +               sub     r0, r0, #1
244 +               str     r0, [tsk, #TSK_PREEMPT]
245 +#endif
246                 mov     why, #0
247 -               get_current_task tsk
248                 b       ret_to_user
249  
250                 .align  5
251 diff -urN linux-2.4.18-rc1-ingo-K3/arch/arm/tools/getconstants.c linux/arch/arm/tools/getconstants.c
252 --- linux-2.4.18-rc1-ingo-K3/arch/arm/tools/getconstants.c      Wed Feb 13 16:25:00 2002
253 +++ linux/arch/arm/tools/getconstants.c Wed Feb 13 16:23:45 2002
254 @@ -13,6 +13,7 @@
255  
256  #include <asm/pgtable.h>
257  #include <asm/uaccess.h>
258 +#include <asm/hardirq.h>
259  
260  /*
261   * Make sure that the compiler and target are compatible.
262 @@ -39,6 +40,11 @@
263  DEFN("TSS_SAVE",               OFF_TSK(thread.save));
264  DEFN("TSS_FPESAVE",            OFF_TSK(thread.fpstate.soft.save));
265  
266 +#ifdef CONFIG_PREEMPT
267 +DEFN("TSK_PREEMPT",            OFF_TSK(preempt_count));
268 +DEFN("IRQSTAT_BH_COUNT",       (unsigned long)&(((irq_cpustat_t *)0)->__local_bh_count));
269 +#endif
270 +
271  #ifdef CONFIG_CPU_32
272  DEFN("TSS_DOMAIN",             OFF_TSK(thread.domain));
273  
274 diff -urN linux-2.4.18-rc1-ingo-K3/arch/i386/config.in linux/arch/i386/config.in
275 --- linux-2.4.18-rc1-ingo-K3/arch/i386/config.in        Wed Feb 13 16:24:50 2002
276 +++ linux/arch/i386/config.in   Wed Feb 13 16:23:45 2002
277 @@ -185,6 +185,7 @@
278  bool 'Math emulation' CONFIG_MATH_EMULATION
279  bool 'MTRR (Memory Type Range Register) support' CONFIG_MTRR
280  bool 'Symmetric multi-processing support' CONFIG_SMP
281 +bool 'Preemptible Kernel' CONFIG_PREEMPT
282  if [ "$CONFIG_SMP" != "y" ]; then
283     bool 'Local APIC support on uniprocessors' CONFIG_X86_UP_APIC
284     dep_bool 'IO-APIC support on uniprocessors' CONFIG_X86_UP_IOAPIC $CONFIG_X86_UP_APIC
285 @@ -198,9 +199,12 @@
286     bool 'Multiquad NUMA system' CONFIG_MULTIQUAD
287  fi
288  
289 -if [ "$CONFIG_SMP" = "y" -a "$CONFIG_X86_CMPXCHG" = "y" ]; then
290 -   define_bool CONFIG_HAVE_DEC_LOCK y
291 +if [ "$CONFIG_SMP" = "y" -o "$CONFIG_PREEMPT" = "y" ]; then
292 +   if [ "$CONFIG_X86_CMPXCHG" = "y" ]; then
293 +      define_bool CONFIG_HAVE_DEC_LOCK y
294 +   fi
295  fi
296 +
297  endmenu
298  
299  mainmenu_option next_comment
300 diff -urN linux-2.4.18-rc1-ingo-K3/arch/i386/kernel/entry.S linux/arch/i386/kernel/entry.S
301 --- linux-2.4.18-rc1-ingo-K3/arch/i386/kernel/entry.S   Wed Feb 13 16:25:18 2002
302 +++ linux/arch/i386/kernel/entry.S      Wed Feb 13 16:23:45 2002
303 @@ -71,7 +71,7 @@
304   * these are offsets into the task-struct.
305   */
306  state          =  0
307 -flags          =  4
308 +preempt_count  =  4
309  sigpending     =  8
310  addr_limit     = 12
311  exec_domain    = 16
312 @@ -79,8 +79,28 @@
313  tsk_ptrace     = 24
314  cpu            = 32
315  
316 +/* These are offsets into the irq_stat structure
317 + * There is one per cpu and it is aligned to 32
318 + * byte boundry (we put that here as a shift count)
319 + */
320 +irq_array_shift                 = CONFIG_X86_L1_CACHE_SHIFT
321 +
322 +irq_stat_local_irq_count        = 4
323 +irq_stat_local_bh_count         = 8
324 +
325  ENOSYS = 38
326  
327 +#ifdef CONFIG_SMP
328 +#define GET_CPU_INDX   movl cpu(%ebx),%eax;  \
329 +                        shll $irq_array_shift,%eax
330 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx); \
331 +                             GET_CPU_INDX
332 +#define CPU_INDX (,%eax)
333 +#else
334 +#define GET_CPU_INDX
335 +#define GET_CURRENT_CPU_INDX GET_CURRENT(%ebx)
336 +#define CPU_INDX
337 +#endif
338  
339  #define SAVE_ALL \
340         cld; \
341 @@ -249,12 +269,30 @@
342         ALIGN
343  ENTRY(ret_from_intr)
344         GET_CURRENT(%ebx)
345 +#ifdef CONFIG_PREEMPT
346 +       cli
347 +       decl preempt_count(%ebx)
348 +#endif
349  ret_from_exception:
350         movl EFLAGS(%esp),%eax          # mix EFLAGS and CS
351         movb CS(%esp),%al
352         testl $(VM_MASK | 3),%eax       # return to VM86 mode or non-supervisor?
353         jne ret_from_sys_call
354 +#ifdef CONFIG_PREEMPT
355 +       cmpl $0,preempt_count(%ebx)
356 +       jnz restore_all
357 +       cmpl $0,need_resched(%ebx)
358 +       jz restore_all
359 +       movl SYMBOL_NAME(irq_stat)+irq_stat_local_bh_count CPU_INDX,%ecx
360 +       addl SYMBOL_NAME(irq_stat)+irq_stat_local_irq_count CPU_INDX,%ecx
361 +       jnz restore_all
362 +       incl preempt_count(%ebx)
363 +       sti
364 +       call SYMBOL_NAME(preempt_schedule)
365 +       jmp ret_from_intr
366 +#else
367         jmp restore_all
368 +#endif
369  
370         ALIGN
371  reschedule:
372 @@ -291,6 +329,9 @@
373         GET_CURRENT(%ebx)
374         call *%edi
375         addl $8,%esp
376 +#ifdef CONFIG_PREEMPT
377 +       cli
378 +#endif
379         jmp ret_from_exception
380  
381  ENTRY(coprocessor_error)
382 @@ -310,12 +351,18 @@
383         movl %cr0,%eax
384         testl $0x4,%eax                 # EM (math emulation bit)
385         jne device_not_available_emulate
386 +#ifdef CONFIG_PREEMPT
387 +       cli
388 +#endif
389         call SYMBOL_NAME(math_state_restore)
390         jmp ret_from_exception
391  device_not_available_emulate:
392         pushl $0                # temporary storage for ORIG_EIP
393         call  SYMBOL_NAME(math_emulate)
394         addl $4,%esp
395 +#ifdef CONFIG_PREEMPT
396 +       cli
397 +#endif
398         jmp ret_from_exception
399  
400  ENTRY(debug)
401 diff -urN linux-2.4.18-rc1-ingo-K3/arch/i386/kernel/i387.c linux/arch/i386/kernel/i387.c
402 --- linux-2.4.18-rc1-ingo-K3/arch/i386/kernel/i387.c    Wed Feb 13 16:24:50 2002
403 +++ linux/arch/i386/kernel/i387.c       Wed Feb 13 16:23:45 2002
404 @@ -10,6 +10,7 @@
405  
406  #include <linux/config.h>
407  #include <linux/sched.h>
408 +#include <linux/spinlock.h>
409  #include <asm/processor.h>
410  #include <asm/i387.h>
411  #include <asm/math_emu.h>
412 @@ -65,6 +66,8 @@
413  {
414         struct task_struct *tsk = current;
415  
416 +       preempt_disable();
417 +       
418         if (tsk->flags & PF_USEDFPU) {
419                 __save_init_fpu(tsk);
420                 return;
421 diff -urN linux-2.4.18-rc1-ingo-K3/arch/i386/kernel/smp.c linux/arch/i386/kernel/smp.c
422 --- linux-2.4.18-rc1-ingo-K3/arch/i386/kernel/smp.c     Wed Feb 13 16:25:18 2002
423 +++ linux/arch/i386/kernel/smp.c        Wed Feb 13 16:23:45 2002
424 @@ -497,7 +497,7 @@
425         /*
426          * The target CPU will unlock the migration spinlock:
427          */
428 -       spin_lock(&migration_lock);
429 +       _raw_spin_lock(&migration_lock);
430         new_task = p;
431         send_IPI_mask(1 << cpu, TASK_MIGRATION_VECTOR);
432  }
433 @@ -511,7 +511,7 @@
434  
435         ack_APIC_irq();
436         p = new_task;
437 -       spin_unlock(&migration_lock);
438 +       _raw_spin_unlock(&migration_lock);
439         sched_task_migrated(p);
440  }
441  /*
442 diff -urN linux-2.4.18-rc1-ingo-K3/arch/i386/kernel/traps.c linux/arch/i386/kernel/traps.c
443 --- linux-2.4.18-rc1-ingo-K3/arch/i386/kernel/traps.c   Wed Feb 13 16:24:50 2002
444 +++ linux/arch/i386/kernel/traps.c      Wed Feb 13 16:23:45 2002
445 @@ -694,6 +694,8 @@
446   *
447   * Careful.. There are problems with IBM-designed IRQ13 behaviour.
448   * Don't touch unless you *really* know how it works.
449 + *
450 + * Must be called with kernel preemption disabled.
451   */
452  asmlinkage void math_state_restore(struct pt_regs regs)
453  {
454 diff -urN linux-2.4.18-rc1-ingo-K3/arch/i386/lib/dec_and_lock.c linux/arch/i386/lib/dec_and_lock.c
455 --- linux-2.4.18-rc1-ingo-K3/arch/i386/lib/dec_and_lock.c       Wed Feb 13 16:24:50 2002
456 +++ linux/arch/i386/lib/dec_and_lock.c  Wed Feb 13 16:23:45 2002
457 @@ -8,6 +8,7 @@
458   */
459  
460  #include <linux/spinlock.h>
461 +#include <linux/sched.h>
462  #include <asm/atomic.h>
463  
464  int atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
465 diff -urN linux-2.4.18-rc1-ingo-K3/arch/sh/config.in linux/arch/sh/config.in
466 --- linux-2.4.18-rc1-ingo-K3/arch/sh/config.in  Wed Feb 13 16:25:01 2002
467 +++ linux/arch/sh/config.in     Wed Feb 13 16:23:45 2002
468 @@ -124,6 +124,7 @@
469     hex 'Physical memory start address' CONFIG_MEMORY_START 08000000
470     hex 'Physical memory size' CONFIG_MEMORY_SIZE 00400000
471  fi
472 +bool 'Preemptible Kernel' CONFIG_PREEMPT
473  endmenu
474  
475  if [ "$CONFIG_SH_HP690" = "y" ]; then
476 diff -urN linux-2.4.18-rc1-ingo-K3/arch/sh/kernel/entry.S linux/arch/sh/kernel/entry.S
477 --- linux-2.4.18-rc1-ingo-K3/arch/sh/kernel/entry.S     Wed Feb 13 16:25:01 2002
478 +++ linux/arch/sh/kernel/entry.S        Wed Feb 13 16:23:45 2002
479 @@ -60,10 +60,18 @@
480  /*
481   * These are offsets into the task-struct.
482   */
483 -flags          =  4
484 +preempt_count  =  4
485  sigpending     =  8
486  need_resched   = 20
487  tsk_ptrace     = 24
488 +flags          = 84
489 +
490 +/*
491 + * These offsets are into irq_stat.
492 + * (Find irq_cpustat_t in asm-sh/hardirq.h)
493 + */
494 +local_irq_count =  8
495 +local_bh_count  = 12
496  
497  PT_TRACESYS  = 0x00000002
498  PF_USEDFPU   = 0x00100000
499 @@ -143,7 +151,7 @@
500         mov.l   __INV_IMASK, r11;       \
501         stc     sr, r10;                \
502         and     r11, r10;               \
503 -       stc     k_g_imask, r11; \
504 +       stc     k_g_imask, r11;         \
505         or      r11, r10;               \
506         ldc     r10, sr
507  
508 @@ -304,8 +312,8 @@
509         mov.l   @(tsk_ptrace,r0), r0    ! Is current PTRACE_SYSCALL'd?
510         mov     #PT_TRACESYS, r1
511         tst     r1, r0
512 -       bt      ret_from_syscall
513 -       bra     syscall_ret_trace
514 +       bf      syscall_ret_trace
515 +       bra     ret_from_syscall
516          nop     
517  
518         .align  2
519 @@ -505,8 +513,6 @@
520         .long   syscall_ret_trace
521  __syscall_ret:
522         .long   syscall_ret
523 -__INV_IMASK:
524 -       .long   0xffffff0f      ! ~(IMASK)
525  
526  
527         .align  2
528 @@ -518,7 +524,84 @@
529         .align  2
530  1:     .long   SYMBOL_NAME(schedule)
531  
532 +#ifdef CONFIG_PREEMPT  
533 +       !
534 +       ! Returning from interrupt during kernel mode: check if
535 +       ! preempt_schedule should be called. If need_resched flag
536 +       ! is set, preempt_count is zero, and we're not currently
537 +       ! in an interrupt handler (local irq or bottom half) then
538 +       ! call preempt_schedule. 
539 +       !
540 +       ! Increment preempt_count to prevent a nested interrupt
541 +       ! from reentering preempt_schedule, then decrement after
542 +       ! and drop through to regular interrupt return which will
543 +       ! jump back and check again in case such an interrupt did
544 +       ! come in (and didn't preempt due to preempt_count).
545 +       !
546 +       ! NOTE: because we just checked that preempt_count was
547 +       ! zero before getting to the call, can't we use immediate
548 +       ! values (1 and 0) rather than inc/dec? Also, rather than
549 +       ! drop through to ret_from_irq, we already know this thread
550 +       ! is kernel mode, can't we go direct to ret_from_kirq? In
551 +       ! fact, with proper interrupt nesting and so forth could
552 +       ! the loop simply be on the need_resched w/o checking the
553 +       ! other stuff again? Optimize later...
554 +       !
555 +       .align  2
556 +ret_from_kirq:
557 +       ! Nonzero preempt_count prevents scheduling
558 +       stc     k_current, r1
559 +       mov.l   @(preempt_count,r1), r0
560 +       cmp/eq  #0, r0
561 +       bf      restore_all
562 +       ! Zero need_resched prevents scheduling
563 +       mov.l   @(need_resched,r1), r0
564 +       cmp/eq  #0, r0
565 +       bt      restore_all
566 +       ! If in_interrupt(), don't schedule
567 +       mov.l   __irq_stat, r1
568 +       mov.l   @(local_irq_count,r1), r0
569 +       mov.l   @(local_bh_count,r1), r1
570 +       or      r1, r0
571 +       cmp/eq  #0, r0
572 +       bf      restore_all
573 +       ! Allow scheduling using preempt_schedule
574 +       ! Adjust preempt_count and SR as needed.
575 +       stc     k_current, r1
576 +       mov.l   @(preempt_count,r1), r0 ! Could replace this ...
577 +       add     #1, r0                  ! ... and this w/mov #1?
578 +       mov.l   r0, @(preempt_count,r1)
579 +       STI()
580 +       mov.l   __preempt_schedule, r0
581 +       jsr     @r0
582 +        nop    
583 +       /* CLI */
584 +       stc     sr, r0
585 +       or      #0xf0, r0
586 +       ldc     r0, sr
587 +       !
588 +       stc     k_current, r1
589 +       mov.l   @(preempt_count,r1), r0 ! Could replace this ...
590 +       add     #-1, r0                 ! ... and this w/mov #0?
591 +       mov.l   r0, @(preempt_count,r1)
592 +       ! Maybe should bra ret_from_kirq, or loop over need_resched?
593 +       ! For now, fall through to ret_from_irq again...
594 +#endif /* CONFIG_PREEMPT */
595 +       
596  ret_from_irq:
597 +       mov     #OFF_SR, r0
598 +       mov.l   @(r0,r15), r0   ! get status register
599 +       shll    r0
600 +       shll    r0              ! kernel space?
601 +#ifndef CONFIG_PREEMPT
602 +       bt      restore_all     ! Yes, it's from kernel, go back soon
603 +#else /* CONFIG_PREEMPT */
604 +       bt      ret_from_kirq   ! From kernel: maybe preempt_schedule
605 +#endif /* CONFIG_PREEMPT */
606 +       !
607 +       bra     ret_from_syscall
608 +        nop
609 +
610  ret_from_exception:
611         mov     #OFF_SR, r0
612         mov.l   @(r0,r15), r0   ! get status register
613 @@ -564,6 +647,13 @@
614         .long   SYMBOL_NAME(do_signal)
615  __irq_stat:
616         .long   SYMBOL_NAME(irq_stat)
617 +#ifdef CONFIG_PREEMPT
618 +__preempt_schedule:
619 +       .long   SYMBOL_NAME(preempt_schedule)
620 +#endif /* CONFIG_PREEMPT */    
621 +__INV_IMASK:
622 +       .long   0xffffff0f      ! ~(IMASK)
623 +
624  
625         .align 2
626  restore_all:
627 @@ -679,7 +769,7 @@
628  __fpu_prepare_fd:
629         .long   SYMBOL_NAME(fpu_prepare_fd)
630  __init_task_flags:
631 -       .long   SYMBOL_NAME(init_task_union)+4
632 +       .long   SYMBOL_NAME(init_task_union)+flags
633  __PF_USEDFPU:
634         .long   PF_USEDFPU
635  #endif
636 diff -urN linux-2.4.18-rc1-ingo-K3/arch/sh/kernel/irq.c linux/arch/sh/kernel/irq.c
637 --- linux-2.4.18-rc1-ingo-K3/arch/sh/kernel/irq.c       Wed Feb 13 16:25:01 2002
638 +++ linux/arch/sh/kernel/irq.c  Wed Feb 13 16:23:45 2002
639 @@ -229,6 +229,14 @@
640         struct irqaction * action;
641         unsigned int status;
642  
643 +       /*
644 +        * At this point we're now about to actually call handlers,
645 +        * and interrupts might get reenabled during them... bump
646 +        * preempt_count to prevent any preemption while the handler
647 +        * called here is pending...
648 +        */
649 +       preempt_disable();
650 +
651         /* Get IRQ number */
652         asm volatile("stc       r2_bank, %0\n\t"
653                      "shlr2     %0\n\t"
654 @@ -298,8 +306,17 @@
655         desc->handler->end(irq);
656         spin_unlock(&desc->lock);
657  
658 +
659         if (softirq_pending(cpu))
660                 do_softirq();
661 +
662 +       /*
663 +        * We're done with the handlers, interrupts should be
664 +        * currently disabled; decrement preempt_count now so
665 +        * as we return preemption may be allowed...
666 +        */
667 +       preempt_enable_no_resched();
668 +
669         return 1;
670  }
671  
672 diff -urN linux-2.4.18-rc1-ingo-K3/drivers/ieee1394/csr.c linux/drivers/ieee1394/csr.c
673 --- linux-2.4.18-rc1-ingo-K3/drivers/ieee1394/csr.c     Wed Feb 13 16:24:44 2002
674 +++ linux/drivers/ieee1394/csr.c        Wed Feb 13 16:23:45 2002
675 @@ -10,6 +10,7 @@
676   */
677  
678  #include <linux/string.h>
679 +#include <linux/sched.h>
680  
681  #include "ieee1394_types.h"
682  #include "hosts.h"
683 diff -urN linux-2.4.18-rc1-ingo-K3/drivers/sound/sound_core.c linux/drivers/sound/sound_core.c
684 --- linux-2.4.18-rc1-ingo-K3/drivers/sound/sound_core.c Wed Feb 13 16:24:31 2002
685 +++ linux/drivers/sound/sound_core.c    Wed Feb 13 16:23:45 2002
686 @@ -37,6 +37,7 @@
687  #include <linux/config.h>
688  #include <linux/module.h>
689  #include <linux/init.h>
690 +#include <linux/sched.h>
691  #include <linux/slab.h>
692  #include <linux/types.h>
693  #include <linux/kernel.h>
694 diff -urN linux-2.4.18-rc1-ingo-K3/fs/adfs/map.c linux/fs/adfs/map.c
695 --- linux-2.4.18-rc1-ingo-K3/fs/adfs/map.c      Wed Feb 13 16:24:06 2002
696 +++ linux/fs/adfs/map.c Wed Feb 13 16:23:45 2002
697 @@ -12,6 +12,7 @@
698  #include <linux/fs.h>
699  #include <linux/adfs_fs.h>
700  #include <linux/spinlock.h>
701 +#include <linux/sched.h>
702  
703  #include "adfs.h"
704  
705 diff -urN linux-2.4.18-rc1-ingo-K3/fs/exec.c linux/fs/exec.c
706 --- linux-2.4.18-rc1-ingo-K3/fs/exec.c  Wed Feb 13 16:24:05 2002
707 +++ linux/fs/exec.c     Wed Feb 13 16:23:45 2002
708 @@ -420,8 +420,8 @@
709                 active_mm = current->active_mm;
710                 current->mm = mm;
711                 current->active_mm = mm;
712 -               task_unlock(current);
713                 activate_mm(active_mm, mm);
714 +               task_unlock(current);
715                 mm_release();
716                 if (old_mm) {
717                         if (active_mm != old_mm) BUG();
718 diff -urN linux-2.4.18-rc1-ingo-K3/fs/fat/cache.c linux/fs/fat/cache.c
719 --- linux-2.4.18-rc1-ingo-K3/fs/fat/cache.c     Wed Feb 13 16:24:05 2002
720 +++ linux/fs/fat/cache.c        Wed Feb 13 16:23:45 2002
721 @@ -14,6 +14,7 @@
722  #include <linux/string.h>
723  #include <linux/stat.h>
724  #include <linux/fat_cvf.h>
725 +#include <linux/sched.h>
726  
727  #if 0
728  #  define PRINTK(x) printk x
729 diff -urN linux-2.4.18-rc1-ingo-K3/fs/nls/nls_base.c linux/fs/nls/nls_base.c
730 --- linux-2.4.18-rc1-ingo-K3/fs/nls/nls_base.c  Wed Feb 13 16:24:06 2002
731 +++ linux/fs/nls/nls_base.c     Wed Feb 13 16:23:45 2002
732 @@ -18,6 +18,7 @@
733  #ifdef CONFIG_KMOD
734  #include <linux/kmod.h>
735  #endif
736 +#include <linux/sched.h>
737  #include <linux/spinlock.h>
738  
739  static struct nls_table *tables;
740 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-arm/dma.h linux/include/asm-arm/dma.h
741 --- linux-2.4.18-rc1-ingo-K3/include/asm-arm/dma.h      Wed Feb 13 16:24:14 2002
742 +++ linux/include/asm-arm/dma.h Wed Feb 13 16:23:45 2002
743 @@ -5,6 +5,7 @@
744  
745  #include <linux/config.h>
746  #include <linux/spinlock.h>
747 +#include <linux/sched.h>
748  #include <asm/system.h>
749  #include <asm/memory.h>
750  #include <asm/scatterlist.h>
751 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-arm/hardirq.h linux/include/asm-arm/hardirq.h
752 --- linux-2.4.18-rc1-ingo-K3/include/asm-arm/hardirq.h  Wed Feb 13 16:24:14 2002
753 +++ linux/include/asm-arm/hardirq.h     Wed Feb 13 16:23:45 2002
754 @@ -34,6 +34,7 @@
755  #define irq_exit(cpu,irq)      (local_irq_count(cpu)--)
756  
757  #define synchronize_irq()      do { } while (0)
758 +#define release_irqlock(cpu)   do { } while (0)
759  
760  #else
761  #error SMP not supported
762 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-arm/pgalloc.h linux/include/asm-arm/pgalloc.h
763 --- linux-2.4.18-rc1-ingo-K3/include/asm-arm/pgalloc.h  Wed Feb 13 16:24:14 2002
764 +++ linux/include/asm-arm/pgalloc.h     Wed Feb 13 16:23:45 2002
765 @@ -57,40 +57,48 @@
766  {
767         unsigned long *ret;
768  
769 +       preempt_disable();
770         if ((ret = pgd_quicklist) != NULL) {
771                 pgd_quicklist = (unsigned long *)__pgd_next(ret);
772                 ret[1] = ret[2];
773                 clean_dcache_entry(ret + 1);
774                 pgtable_cache_size--;
775         }
776 +       preempt_enable();
777         return (pgd_t *)ret;
778  }
779  
780  static inline void free_pgd_fast(pgd_t *pgd)
781  {
782 +       preempt_disable();
783         __pgd_next(pgd) = (unsigned long) pgd_quicklist;
784         pgd_quicklist = (unsigned long *) pgd;
785         pgtable_cache_size++;
786 +       preempt_enable();
787  }
788  
789  static inline pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
790  {
791         unsigned long *ret;
792  
793 +       preempt_disable();
794         if((ret = pte_quicklist) != NULL) {
795                 pte_quicklist = (unsigned long *)__pte_next(ret);
796                 ret[0] = 0;
797                 clean_dcache_entry(ret);
798                 pgtable_cache_size--;
799         }
800 +       preempt_enable();
801         return (pte_t *)ret;
802  }
803  
804  static inline void free_pte_fast(pte_t *pte)
805  {
806 +       preempt_disable();
807         __pte_next(pte) = (unsigned long) pte_quicklist;
808         pte_quicklist = (unsigned long *) pte;
809         pgtable_cache_size++;
810 +       preempt_enable();
811  }
812  
813  #else  /* CONFIG_NO_PGT_CACHE */
814 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-arm/smplock.h linux/include/asm-arm/smplock.h
815 --- linux-2.4.18-rc1-ingo-K3/include/asm-arm/smplock.h  Wed Feb 13 16:24:14 2002
816 +++ linux/include/asm-arm/smplock.h     Wed Feb 13 16:23:45 2002
817 @@ -3,12 +3,17 @@
818   *
819   * Default SMP lock implementation
820   */
821 +#include <linux/config.h>
822  #include <linux/interrupt.h>
823  #include <linux/spinlock.h>
824  
825  extern spinlock_t kernel_flag;
826  
827 +#ifdef CONFIG_PREEMPT
828 +#define kernel_locked()                preempt_get_count()
829 +#else
830  #define kernel_locked()                spin_is_locked(&kernel_flag)
831 +#endif
832  
833  /*
834   * Release global kernel lock and global interrupt lock
835 @@ -40,8 +45,14 @@
836   */
837  static inline void lock_kernel(void)
838  {
839 +#ifdef CONFIG_PREEMPT
840 +       if (current->lock_depth == -1)
841 +               spin_lock(&kernel_flag);
842 +       ++current->lock_depth;
843 +#else
844         if (!++current->lock_depth)
845                 spin_lock(&kernel_flag);
846 +#endif
847  }
848  
849  static inline void unlock_kernel(void)
850 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-arm/softirq.h linux/include/asm-arm/softirq.h
851 --- linux-2.4.18-rc1-ingo-K3/include/asm-arm/softirq.h  Wed Feb 13 16:24:14 2002
852 +++ linux/include/asm-arm/softirq.h     Wed Feb 13 16:23:45 2002
853 @@ -5,20 +5,22 @@
854  #include <asm/hardirq.h>
855  
856  #define __cpu_bh_enable(cpu) \
857 -               do { barrier(); local_bh_count(cpu)--; } while (0)
858 +               do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
859  #define cpu_bh_disable(cpu) \
860 -               do { local_bh_count(cpu)++; barrier(); } while (0)
861 +               do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
862  
863  #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
864  #define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
865  
866  #define in_softirq()           (local_bh_count(smp_processor_id()) != 0)
867  
868 -#define local_bh_enable()                                              \
869 +#define _local_bh_enable()                                             \
870  do {                                                                   \
871         unsigned int *ptr = &local_bh_count(smp_processor_id());        \
872         if (!--*ptr && ptr[-2])                                         \
873                 __asm__("bl%? __do_softirq": : : "lr");/* out of line */\
874  } while (0)
875  
876 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
877 +
878  #endif /* __ASM_SOFTIRQ_H */
879 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-i386/hardirq.h linux/include/asm-i386/hardirq.h
880 --- linux-2.4.18-rc1-ingo-K3/include/asm-i386/hardirq.h Wed Feb 13 16:24:09 2002
881 +++ linux/include/asm-i386/hardirq.h    Wed Feb 13 16:23:45 2002
882 @@ -36,6 +36,8 @@
883  
884  #define synchronize_irq()      barrier()
885  
886 +#define release_irqlock(cpu)   do { } while (0)
887 +
888  #else
889  
890  #include <asm/atomic.h>
891 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-i386/highmem.h linux/include/asm-i386/highmem.h
892 --- linux-2.4.18-rc1-ingo-K3/include/asm-i386/highmem.h Wed Feb 13 16:24:09 2002
893 +++ linux/include/asm-i386/highmem.h    Wed Feb 13 16:23:45 2002
894 @@ -88,6 +88,7 @@
895         enum fixed_addresses idx;
896         unsigned long vaddr;
897  
898 +       preempt_disable();
899         if (page < highmem_start_page)
900                 return page_address(page);
901  
902 @@ -109,8 +110,10 @@
903         unsigned long vaddr = (unsigned long) kvaddr;
904         enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
905  
906 -       if (vaddr < FIXADDR_START) // FIXME
907 +       if (vaddr < FIXADDR_START) { // FIXME
908 +               preempt_enable();
909                 return;
910 +       }
911  
912         if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
913                 BUG();
914 @@ -122,6 +125,8 @@
915         pte_clear(kmap_pte-idx);
916         __flush_tlb_one(vaddr);
917  #endif
918 +
919 +       preempt_enable();
920  }
921  
922  #endif /* __KERNEL__ */
923 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-i386/hw_irq.h linux/include/asm-i386/hw_irq.h
924 --- linux-2.4.18-rc1-ingo-K3/include/asm-i386/hw_irq.h  Wed Feb 13 16:25:18 2002
925 +++ linux/include/asm-i386/hw_irq.h     Wed Feb 13 16:23:45 2002
926 @@ -96,6 +96,18 @@
927  #define __STR(x) #x
928  #define STR(x) __STR(x)
929  
930 +#define GET_CURRENT \
931 +       "movl %esp, %ebx\n\t" \
932 +       "andl $-8192, %ebx\n\t"
933 +
934 +#ifdef CONFIG_PREEMPT
935 +#define BUMP_LOCK_COUNT \
936 +       GET_CURRENT \
937 +       "incl 4(%ebx)\n\t"
938 +#else
939 +#define BUMP_LOCK_COUNT
940 +#endif
941 +
942  #define SAVE_ALL \
943         "cld\n\t" \
944         "pushl %es\n\t" \
945 @@ -109,15 +121,12 @@
946         "pushl %ebx\n\t" \
947         "movl $" STR(__KERNEL_DS) ",%edx\n\t" \
948         "movl %edx,%ds\n\t" \
949 -       "movl %edx,%es\n\t"
950 +       "movl %edx,%es\n\t" \
951 +       BUMP_LOCK_COUNT
952  
953  #define IRQ_NAME2(nr) nr##_interrupt(void)
954  #define IRQ_NAME(nr) IRQ_NAME2(IRQ##nr)
955  
956 -#define GET_CURRENT \
957 -       "movl %esp, %ebx\n\t" \
958 -       "andl $-8192, %ebx\n\t"
959 -
960  /*
961   *     SMP has a few special interrupts for IPI messages
962   */
963 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-i386/i387.h linux/include/asm-i386/i387.h
964 --- linux-2.4.18-rc1-ingo-K3/include/asm-i386/i387.h    Wed Feb 13 16:24:09 2002
965 +++ linux/include/asm-i386/i387.h       Wed Feb 13 16:23:45 2002
966 @@ -12,6 +12,7 @@
967  #define __ASM_I386_I387_H
968  
969  #include <linux/sched.h>
970 +#include <linux/spinlock.h>
971  #include <asm/processor.h>
972  #include <asm/sigcontext.h>
973  #include <asm/user.h>
974 @@ -24,7 +25,7 @@
975  extern void restore_fpu( struct task_struct *tsk );
976  
977  extern void kernel_fpu_begin(void);
978 -#define kernel_fpu_end() stts()
979 +#define kernel_fpu_end() do { stts(); preempt_enable(); } while(0)
980  
981  
982  #define unlazy_fpu( tsk ) do { \
983 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-i386/pgalloc.h linux/include/asm-i386/pgalloc.h
984 --- linux-2.4.18-rc1-ingo-K3/include/asm-i386/pgalloc.h Wed Feb 13 16:25:18 2002
985 +++ linux/include/asm-i386/pgalloc.h    Wed Feb 13 16:23:45 2002
986 @@ -75,20 +75,26 @@
987  {
988         unsigned long *ret;
989  
990 +       preempt_disable();
991         if ((ret = pgd_quicklist) != NULL) {
992                 pgd_quicklist = (unsigned long *)(*ret);
993                 ret[0] = 0;
994                 pgtable_cache_size--;
995 -       } else
996 +               preempt_enable();
997 +       } else {
998 +               preempt_enable();
999                 ret = (unsigned long *)get_pgd_slow();
1000 +       }
1001         return (pgd_t *)ret;
1002  }
1003  
1004  static inline void free_pgd_fast(pgd_t *pgd)
1005  {
1006 +       preempt_disable();
1007         *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
1008         pgd_quicklist = (unsigned long *) pgd;
1009         pgtable_cache_size++;
1010 +       preempt_enable();
1011  }
1012  
1013  static inline void free_pgd_slow(pgd_t *pgd)
1014 @@ -119,19 +125,23 @@
1015  {
1016         unsigned long *ret;
1017  
1018 +       preempt_disable();
1019         if ((ret = (unsigned long *)pte_quicklist) != NULL) {
1020                 pte_quicklist = (unsigned long *)(*ret);
1021                 ret[0] = ret[1];
1022                 pgtable_cache_size--;
1023         }
1024 +       preempt_enable();
1025         return (pte_t *)ret;
1026  }
1027  
1028  static inline void pte_free_fast(pte_t *pte)
1029  {
1030 +       preempt_disable();
1031         *(unsigned long *)pte = (unsigned long) pte_quicklist;
1032         pte_quicklist = (unsigned long *) pte;
1033         pgtable_cache_size++;
1034 +       preempt_enable();
1035  }
1036  
1037  static __inline__ void pte_free_slow(pte_t *pte)
1038 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-i386/smplock.h linux/include/asm-i386/smplock.h
1039 --- linux-2.4.18-rc1-ingo-K3/include/asm-i386/smplock.h Wed Feb 13 16:24:09 2002
1040 +++ linux/include/asm-i386/smplock.h    Wed Feb 13 16:23:45 2002
1041 @@ -10,7 +10,15 @@
1042  
1043  extern spinlock_t kernel_flag;
1044  
1045 +#ifdef CONFIG_SMP
1046  #define kernel_locked()                spin_is_locked(&kernel_flag)
1047 +#else
1048 +#ifdef CONFIG_PREEMPT
1049 +#define kernel_locked()                preempt_get_count()
1050 +#else
1051 +#define kernel_locked()                1
1052 +#endif
1053 +#endif
1054  
1055  /*
1056   * Release global kernel lock and global interrupt lock
1057 @@ -42,6 +50,11 @@
1058   */
1059  static __inline__ void lock_kernel(void)
1060  {
1061 +#ifdef CONFIG_PREEMPT
1062 +       if (current->lock_depth == -1)
1063 +               spin_lock(&kernel_flag);
1064 +       ++current->lock_depth;
1065 +#else
1066  #if 1
1067         if (!++current->lock_depth)
1068                 spin_lock(&kernel_flag);
1069 @@ -54,6 +67,7 @@
1070                 :"=m" (__dummy_lock(&kernel_flag)),
1071                  "=m" (current->lock_depth));
1072  #endif
1073 +#endif
1074  }
1075  
1076  static __inline__ void unlock_kernel(void)
1077 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-i386/softirq.h linux/include/asm-i386/softirq.h
1078 --- linux-2.4.18-rc1-ingo-K3/include/asm-i386/softirq.h Wed Feb 13 16:24:09 2002
1079 +++ linux/include/asm-i386/softirq.h    Wed Feb 13 16:23:45 2002
1080 @@ -6,9 +6,9 @@
1081  #include <linux/stringify.h>
1082  
1083  #define __cpu_bh_enable(cpu) \
1084 -               do { barrier(); local_bh_count(cpu)--; } while (0)
1085 +               do { barrier(); local_bh_count(cpu)--; preempt_enable(); } while (0)
1086  #define cpu_bh_disable(cpu) \
1087 -               do { local_bh_count(cpu)++; barrier(); } while (0)
1088 +               do { preempt_disable(); local_bh_count(cpu)++; barrier(); } while (0)
1089  
1090  #define local_bh_disable()     cpu_bh_disable(smp_processor_id())
1091  #define __local_bh_enable()    __cpu_bh_enable(smp_processor_id())
1092 @@ -23,7 +23,7 @@
1093   * If you change the offsets in irq_stat then you have to
1094   * update this code as well.
1095   */
1096 -#define local_bh_enable()                                              \
1097 +#define _local_bh_enable()                                             \
1098  do {                                                                   \
1099         unsigned int *ptr = &local_bh_count(smp_processor_id());        \
1100                                                                         \
1101 @@ -49,4 +49,6 @@
1102                 /* no registers clobbered */ );                         \
1103  } while (0)
1104  
1105 +#define local_bh_enable() do { _local_bh_enable(); preempt_enable(); } while (0)
1106 +
1107  #endif /* __ASM_SOFTIRQ_H */
1108 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-i386/spinlock.h linux/include/asm-i386/spinlock.h
1109 --- linux-2.4.18-rc1-ingo-K3/include/asm-i386/spinlock.h        Wed Feb 13 16:24:09 2002
1110 +++ linux/include/asm-i386/spinlock.h   Wed Feb 13 16:23:45 2002
1111 @@ -81,7 +81,7 @@
1112                 :"=m" (lock->lock) : : "memory"
1113  
1114  
1115 -static inline void spin_unlock(spinlock_t *lock)
1116 +static inline void _raw_spin_unlock(spinlock_t *lock)
1117  {
1118  #if SPINLOCK_DEBUG
1119         if (lock->magic != SPINLOCK_MAGIC)
1120 @@ -101,7 +101,7 @@
1121                 :"=q" (oldval), "=m" (lock->lock) \
1122                 :"0" (oldval) : "memory"
1123  
1124 -static inline void spin_unlock(spinlock_t *lock)
1125 +static inline void _raw_spin_unlock(spinlock_t *lock)
1126  {
1127         char oldval = 1;
1128  #if SPINLOCK_DEBUG
1129 @@ -117,7 +117,7 @@
1130  
1131  #endif
1132  
1133 -static inline int spin_trylock(spinlock_t *lock)
1134 +static inline int _raw_spin_trylock(spinlock_t *lock)
1135  {
1136         char oldval;
1137         __asm__ __volatile__(
1138 @@ -127,7 +127,7 @@
1139         return oldval > 0;
1140  }
1141  
1142 -static inline void spin_lock(spinlock_t *lock)
1143 +static inline void _raw_spin_lock(spinlock_t *lock)
1144  {
1145  #if SPINLOCK_DEBUG
1146         __label__ here;
1147 @@ -183,7 +183,7 @@
1148   */
1149  /* the spinlock helpers are in arch/i386/kernel/semaphore.c */
1150  
1151 -static inline void read_lock(rwlock_t *rw)
1152 +static inline void _raw_read_lock(rwlock_t *rw)
1153  {
1154  #if SPINLOCK_DEBUG
1155         if (rw->magic != RWLOCK_MAGIC)
1156 @@ -192,7 +192,7 @@
1157         __build_read_lock(rw, "__read_lock_failed");
1158  }
1159  
1160 -static inline void write_lock(rwlock_t *rw)
1161 +static inline void _raw_write_lock(rwlock_t *rw)
1162  {
1163  #if SPINLOCK_DEBUG
1164         if (rw->magic != RWLOCK_MAGIC)
1165 @@ -201,10 +201,10 @@
1166         __build_write_lock(rw, "__write_lock_failed");
1167  }
1168  
1169 -#define read_unlock(rw)                asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
1170 -#define write_unlock(rw)       asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
1171 +#define _raw_read_unlock(rw)           asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
1172 +#define _raw_write_unlock(rw)  asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
1173  
1174 -static inline int write_trylock(rwlock_t *lock)
1175 +static inline int _raw_write_trylock(rwlock_t *lock)
1176  {
1177         atomic_t *count = (atomic_t *)lock;
1178         if (atomic_sub_and_test(RW_LOCK_BIAS, count))
1179 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-sh/hardirq.h linux/include/asm-sh/hardirq.h
1180 --- linux-2.4.18-rc1-ingo-K3/include/asm-sh/hardirq.h   Wed Feb 13 16:24:15 2002
1181 +++ linux/include/asm-sh/hardirq.h      Wed Feb 13 16:23:45 2002
1182 @@ -34,6 +34,8 @@
1183  
1184  #define synchronize_irq()      barrier()
1185  
1186 +#define release_irqlock(cpu)   do { } while (0)
1187 +
1188  #else
1189  
1190  #error Super-H SMP is not available
1191 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-sh/smplock.h linux/include/asm-sh/smplock.h
1192 --- linux-2.4.18-rc1-ingo-K3/include/asm-sh/smplock.h   Wed Feb 13 16:24:15 2002
1193 +++ linux/include/asm-sh/smplock.h      Wed Feb 13 16:23:45 2002
1194 @@ -9,15 +9,88 @@
1195  
1196  #include <linux/config.h>
1197  
1198 -#ifndef CONFIG_SMP
1199 -
1200 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
1201 +/*
1202 + * Should never happen, since linux/smp_lock.h catches this case;
1203 + * but in case this file is included directly with neither SMP nor
1204 + * PREEMPT configuration, provide same dummys as linux/smp_lock.h
1205 + */
1206  #define lock_kernel()                          do { } while(0)
1207  #define unlock_kernel()                                do { } while(0)
1208 -#define release_kernel_lock(task, cpu, depth)  ((depth) = 1)
1209 -#define reacquire_kernel_lock(task, cpu, depth)        do { } while(0)
1210 +#define release_kernel_lock(task, cpu)         do { } while(0)
1211 +#define reacquire_kernel_lock(task)            do { } while(0)
1212 +#define kernel_locked()                1
1213 +
1214 +#else /* CONFIG_SMP || CONFIG_PREEMPT */
1215 +
1216 +#if CONFIG_SMP
1217 +#error "We do not support SMP on SH yet"
1218 +#endif
1219 +/*
1220 + * Default SMP lock implementation (i.e. the i386 version)
1221 + */
1222 +
1223 +#include <linux/interrupt.h>
1224 +#include <linux/spinlock.h>
1225 +
1226 +extern spinlock_t kernel_flag;
1227 +#define lock_bkl() spin_lock(&kernel_flag)
1228 +#define unlock_bkl() spin_unlock(&kernel_flag)
1229  
1230 +#ifdef CONFIG_SMP
1231 +#define kernel_locked()                spin_is_locked(&kernel_flag)
1232 +#elif  CONFIG_PREEMPT
1233 +#define kernel_locked()                preempt_get_count()
1234 +#else  /* neither */
1235 +#define kernel_locked()                1
1236 +#endif
1237 +
1238 +/*
1239 + * Release global kernel lock and global interrupt lock
1240 + */
1241 +#define release_kernel_lock(task, cpu) \
1242 +do { \
1243 +       if (task->lock_depth >= 0) \
1244 +               spin_unlock(&kernel_flag); \
1245 +       release_irqlock(cpu); \
1246 +       __sti(); \
1247 +} while (0)
1248 +
1249 +/*
1250 + * Re-acquire the kernel lock
1251 + */
1252 +#define reacquire_kernel_lock(task) \
1253 +do { \
1254 +       if (task->lock_depth >= 0) \
1255 +               spin_lock(&kernel_flag); \
1256 +} while (0)
1257 +
1258 +/*
1259 + * Getting the big kernel lock.
1260 + *
1261 + * This cannot happen asynchronously,
1262 + * so we only need to worry about other
1263 + * CPU's.
1264 + */
1265 +static __inline__ void lock_kernel(void)
1266 +{
1267 +#ifdef CONFIG_PREEMPT
1268 +       if (current->lock_depth == -1)
1269 +               spin_lock(&kernel_flag);
1270 +       ++current->lock_depth;
1271  #else
1272 -#error "We do not support SMP on SH"
1273 -#endif /* CONFIG_SMP */
1274 +       if (!++current->lock_depth)
1275 +               spin_lock(&kernel_flag);
1276 +#endif
1277 +}
1278 +
1279 +static __inline__ void unlock_kernel(void)
1280 +{
1281 +       if (current->lock_depth < 0)
1282 +               BUG();
1283 +       if (--current->lock_depth < 0)
1284 +               spin_unlock(&kernel_flag);
1285 +}
1286 +#endif /* CONFIG_SMP || CONFIG_PREEMPT */
1287  
1288  #endif /* __ASM_SH_SMPLOCK_H */
1289 diff -urN linux-2.4.18-rc1-ingo-K3/include/asm-sh/softirq.h linux/include/asm-sh/softirq.h
1290 --- linux-2.4.18-rc1-ingo-K3/include/asm-sh/softirq.h   Wed Feb 13 16:24:15 2002
1291 +++ linux/include/asm-sh/softirq.h      Wed Feb 13 16:23:45 2002
1292 @@ -6,6 +6,7 @@
1293  
1294  #define local_bh_disable()                     \
1295  do {                                           \
1296 +       preempt_disable();                      \
1297         local_bh_count(smp_processor_id())++;   \
1298         barrier();                              \
1299  } while (0)
1300 @@ -14,6 +15,7 @@
1301  do {                                           \
1302         barrier();                              \
1303         local_bh_count(smp_processor_id())--;   \
1304 +       preempt_enable();                       \
1305  } while (0)
1306  
1307  #define local_bh_enable()                              \
1308 @@ -23,6 +25,7 @@
1309             && softirq_pending(smp_processor_id())) {   \
1310                 do_softirq();                           \
1311         }                                               \
1312 +       preempt_enable();                               \
1313  } while (0)
1314  
1315  #define in_softirq() (local_bh_count(smp_processor_id()) != 0)
1316 diff -urN linux-2.4.18-rc1-ingo-K3/include/linux/brlock.h linux/include/linux/brlock.h
1317 --- linux-2.4.18-rc1-ingo-K3/include/linux/brlock.h     Wed Feb 13 16:24:09 2002
1318 +++ linux/include/linux/brlock.h        Wed Feb 13 16:23:45 2002
1319 @@ -171,11 +171,11 @@
1320  }
1321  
1322  #else
1323 -# define br_read_lock(idx)     ((void)(idx))
1324 -# define br_read_unlock(idx)   ((void)(idx))
1325 -# define br_write_lock(idx)    ((void)(idx))
1326 -# define br_write_unlock(idx)  ((void)(idx))
1327 -#endif
1328 +# define br_read_lock(idx)     ({ (void)(idx); preempt_disable(); })
1329 +# define br_read_unlock(idx)   ({ (void)(idx); preempt_enable(); })
1330 +# define br_write_lock(idx)    ({ (void)(idx); preempt_disable(); })
1331 +# define br_write_unlock(idx)  ({ (void)(idx); preempt_enable(); })
1332 +#endif /* CONFIG_SMP */
1333  
1334  /*
1335   * Now enumerate all of the possible sw/hw IRQ protected
1336 diff -urN linux-2.4.18-rc1-ingo-K3/include/linux/dcache.h linux/include/linux/dcache.h
1337 --- linux-2.4.18-rc1-ingo-K3/include/linux/dcache.h     Wed Feb 13 16:24:09 2002
1338 +++ linux/include/linux/dcache.h        Wed Feb 13 16:23:45 2002
1339 @@ -126,31 +126,6 @@
1340  
1341  extern spinlock_t dcache_lock;
1342  
1343 -/**
1344 - * d_drop - drop a dentry
1345 - * @dentry: dentry to drop
1346 - *
1347 - * d_drop() unhashes the entry from the parent
1348 - * dentry hashes, so that it won't be found through
1349 - * a VFS lookup any more. Note that this is different
1350 - * from deleting the dentry - d_delete will try to
1351 - * mark the dentry negative if possible, giving a
1352 - * successful _negative_ lookup, while d_drop will
1353 - * just make the cache lookup fail.
1354 - *
1355 - * d_drop() is used mainly for stuff that wants
1356 - * to invalidate a dentry for some reason (NFS
1357 - * timeouts or autofs deletes).
1358 - */
1359 -
1360 -static __inline__ void d_drop(struct dentry * dentry)
1361 -{
1362 -       spin_lock(&dcache_lock);
1363 -       list_del(&dentry->d_hash);
1364 -       INIT_LIST_HEAD(&dentry->d_hash);
1365 -       spin_unlock(&dcache_lock);
1366 -}
1367 -
1368  static __inline__ int dname_external(struct dentry *d)
1369  {
1370         return d->d_name.name != d->d_iname; 
1371 @@ -275,3 +250,34 @@
1372  #endif /* __KERNEL__ */
1373  
1374  #endif /* __LINUX_DCACHE_H */
1375 +
1376 +#if !defined(__LINUX_DCACHE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1377 +#define __LINUX_DCACHE_H_INLINES
1378 +
1379 +#ifdef __KERNEL__
1380 +/**
1381 + * d_drop - drop a dentry
1382 + * @dentry: dentry to drop
1383 + *
1384 + * d_drop() unhashes the entry from the parent
1385 + * dentry hashes, so that it won't be found through
1386 + * a VFS lookup any more. Note that this is different
1387 + * from deleting the dentry - d_delete will try to
1388 + * mark the dentry negative if possible, giving a
1389 + * successful _negative_ lookup, while d_drop will
1390 + * just make the cache lookup fail.
1391 + *
1392 + * d_drop() is used mainly for stuff that wants
1393 + * to invalidate a dentry for some reason (NFS
1394 + * timeouts or autofs deletes).
1395 + */
1396 +
1397 +static __inline__ void d_drop(struct dentry * dentry)
1398 +{
1399 +       spin_lock(&dcache_lock);
1400 +       list_del(&dentry->d_hash);
1401 +       INIT_LIST_HEAD(&dentry->d_hash);
1402 +       spin_unlock(&dcache_lock);
1403 +}
1404 +#endif
1405 +#endif
1406 diff -urN linux-2.4.18-rc1-ingo-K3/include/linux/fs_struct.h linux/include/linux/fs_struct.h
1407 --- linux-2.4.18-rc1-ingo-K3/include/linux/fs_struct.h  Wed Feb 13 16:24:09 2002
1408 +++ linux/include/linux/fs_struct.h     Wed Feb 13 16:23:45 2002
1409 @@ -20,6 +20,15 @@
1410  extern void exit_fs(struct task_struct *);
1411  extern void set_fs_altroot(void);
1412  
1413 +struct fs_struct *copy_fs_struct(struct fs_struct *old);
1414 +void put_fs_struct(struct fs_struct *fs);
1415 +
1416 +#endif
1417 +#endif
1418 +
1419 +#if !defined(_LINUX_FS_STRUCT_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1420 +#define _LINUX_FS_STRUCT_H_INLINES
1421 +#ifdef __KERNEL__
1422  /*
1423   * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
1424   * It can block. Requires the big lock held.
1425 @@ -65,9 +74,5 @@
1426                 mntput(old_pwdmnt);
1427         }
1428  }
1429 -
1430 -struct fs_struct *copy_fs_struct(struct fs_struct *old);
1431 -void put_fs_struct(struct fs_struct *fs);
1432 -
1433  #endif
1434  #endif
1435 diff -urN linux-2.4.18-rc1-ingo-K3/include/linux/sched.h linux/include/linux/sched.h
1436 --- linux-2.4.18-rc1-ingo-K3/include/linux/sched.h      Wed Feb 13 16:25:18 2002
1437 +++ linux/include/linux/sched.h Wed Feb 13 16:23:45 2002
1438 @@ -91,6 +91,7 @@
1439  #define TASK_UNINTERRUPTIBLE   2
1440  #define TASK_ZOMBIE            4
1441  #define TASK_STOPPED           8
1442 +#define PREEMPT_ACTIVE         0x4000000
1443  
1444  #define __set_task_state(tsk, state_value)             \
1445         do { (tsk)->state = (state_value); } while (0)
1446 @@ -156,6 +157,9 @@
1447  #define        MAX_SCHEDULE_TIMEOUT    LONG_MAX
1448  extern signed long FASTCALL(schedule_timeout(signed long timeout));
1449  asmlinkage void schedule(void);
1450 +#ifdef CONFIG_PREEMPT
1451 +asmlinkage void preempt_schedule(void);
1452 +#endif
1453  
1454  extern int schedule_task(struct tq_struct *task);
1455  extern void flush_scheduled_tasks(void);
1456 @@ -288,7 +292,7 @@
1457          * offsets of these are hardcoded elsewhere - touch with care
1458          */
1459         volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
1460 -       unsigned long flags;    /* per process flags, defined below */
1461 +       int preempt_count;      /* 0 => preemptable, <0 => BUG */
1462         int sigpending;
1463         mm_segment_t addr_limit;        /* thread address space:
1464                                                 0-0xBFFFFFFF for user-thead
1465 @@ -321,6 +325,7 @@
1466         struct list_head local_pages;
1467  
1468         unsigned int allocation_order, nr_local_pages;
1469 +       unsigned long flags;
1470  
1471  /* task state */
1472         struct linux_binfmt *binfmt;
1473 @@ -907,6 +912,11 @@
1474         return res;
1475  }
1476  
1477 +#define _TASK_STRUCT_DEFINED
1478 +#include <linux/dcache.h>
1479 +#include <linux/tqueue.h>
1480 +#include <linux/fs_struct.h>
1481 +
1482  #endif /* __KERNEL__ */
1483  
1484  #endif
1485 diff -urN linux-2.4.18-rc1-ingo-K3/include/linux/smp.h linux/include/linux/smp.h
1486 --- linux-2.4.18-rc1-ingo-K3/include/linux/smp.h        Wed Feb 13 16:25:18 2002
1487 +++ linux/include/linux/smp.h   Wed Feb 13 16:23:45 2002
1488 @@ -81,7 +81,9 @@
1489  #define smp_processor_id()                     0
1490  #define hard_smp_processor_id()                        0
1491  #define smp_threads_ready                      1
1492 +#ifndef CONFIG_PREEMPT
1493  #define kernel_lock()
1494 +#endif
1495  #define cpu_logical_map(cpu)                   0
1496  #define cpu_number_map(cpu)                    0
1497  #define smp_call_function(func,info,retry,wait)        ({ 0; })
1498 diff -urN linux-2.4.18-rc1-ingo-K3/include/linux/smp_lock.h linux/include/linux/smp_lock.h
1499 --- linux-2.4.18-rc1-ingo-K3/include/linux/smp_lock.h   Wed Feb 13 16:24:09 2002
1500 +++ linux/include/linux/smp_lock.h      Wed Feb 13 16:23:45 2002
1501 @@ -3,7 +3,7 @@
1502  
1503  #include <linux/config.h>
1504  
1505 -#ifndef CONFIG_SMP
1506 +#if !defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT)
1507  
1508  #define lock_kernel()                          do { } while(0)
1509  #define unlock_kernel()                                do { } while(0)
1510 diff -urN linux-2.4.18-rc1-ingo-K3/include/linux/spinlock.h linux/include/linux/spinlock.h
1511 --- linux-2.4.18-rc1-ingo-K3/include/linux/spinlock.h   Wed Feb 13 16:24:09 2002
1512 +++ linux/include/linux/spinlock.h      Wed Feb 13 16:23:45 2002
1513 @@ -2,6 +2,7 @@
1514  #define __LINUX_SPINLOCK_H
1515  
1516  #include <linux/config.h>
1517 +#include <linux/compiler.h>
1518  
1519  /*
1520   * These are the generic versions of the spinlocks and read-write
1521 @@ -45,8 +46,10 @@
1522  
1523  #if (DEBUG_SPINLOCKS < 1)
1524  
1525 +#ifndef CONFIG_PREEMPT
1526  #define atomic_dec_and_lock(atomic,lock) atomic_dec_and_test(atomic)
1527  #define ATOMIC_DEC_AND_LOCK
1528 +#endif
1529  
1530  /*
1531   * Your basic spinlocks, allowing only a single CPU anywhere
1532 @@ -62,11 +65,11 @@
1533  #endif
1534  
1535  #define spin_lock_init(lock)   do { } while(0)
1536 -#define spin_lock(lock)                (void)(lock) /* Not "unused variable". */
1537 +#define _raw_spin_lock(lock)   (void)(lock) /* Not "unused variable". */
1538  #define spin_is_locked(lock)   (0)
1539 -#define spin_trylock(lock)     ({1; })
1540 +#define _raw_spin_trylock(lock)        ({1; })
1541  #define spin_unlock_wait(lock) do { } while(0)
1542 -#define spin_unlock(lock)      do { } while(0)
1543 +#define _raw_spin_unlock(lock) do { } while(0)
1544  
1545  #elif (DEBUG_SPINLOCKS < 2)
1546  
1547 @@ -125,13 +128,76 @@
1548  #endif
1549  
1550  #define rwlock_init(lock)      do { } while(0)
1551 -#define read_lock(lock)                (void)(lock) /* Not "unused variable". */
1552 -#define read_unlock(lock)      do { } while(0)
1553 -#define write_lock(lock)       (void)(lock) /* Not "unused variable". */
1554 -#define write_unlock(lock)     do { } while(0)
1555 +#define _raw_read_lock(lock)   (void)(lock) /* Not "unused variable". */
1556 +#define _raw_read_unlock(lock) do { } while(0)
1557 +#define _raw_write_lock(lock)  (void)(lock) /* Not "unused variable". */
1558 +#define _raw_write_unlock(lock)        do { } while(0)
1559  
1560  #endif /* !SMP */
1561  
1562 +#ifdef CONFIG_PREEMPT
1563 +
1564 +#define preempt_get_count() (current->preempt_count)
1565 +
1566 +#define preempt_disable() \
1567 +do { \
1568 +       ++current->preempt_count; \
1569 +       barrier(); \
1570 +} while (0)
1571 +
1572 +#define preempt_enable_no_resched() \
1573 +do { \
1574 +       --current->preempt_count; \
1575 +       barrier(); \
1576 +} while (0)
1577 +
1578 +#define preempt_enable() \
1579 +do { \
1580 +       --current->preempt_count; \
1581 +       barrier(); \
1582 +       if (unlikely(current->preempt_count < current->need_resched)) \
1583 +               preempt_schedule(); \
1584 +} while (0)
1585 +
1586 +#define spin_lock(lock)        \
1587 +do { \
1588 +       preempt_disable(); \
1589 +       _raw_spin_lock(lock); \
1590 +} while(0)
1591 +
1592 +#define spin_trylock(lock)     ({preempt_disable(); _raw_spin_trylock(lock) ? \
1593 +                               1 : ({preempt_enable(); 0;});})
1594 +#define spin_unlock(lock) \
1595 +do { \
1596 +       _raw_spin_unlock(lock); \
1597 +       preempt_enable(); \
1598 +} while (0)
1599 +
1600 +#define read_lock(lock)                ({preempt_disable(); _raw_read_lock(lock);})
1601 +#define read_unlock(lock)      ({_raw_read_unlock(lock); preempt_enable();})
1602 +#define write_lock(lock)       ({preempt_disable(); _raw_write_lock(lock);})
1603 +#define write_unlock(lock)     ({_raw_write_unlock(lock); preempt_enable();})
1604 +#define write_trylock(lock)    ({preempt_disable();_raw_write_trylock(lock) ? \
1605 +                               1 : ({preempt_enable(); 0;});})
1606 +
1607 +#else
1608 +
1609 +#define preempt_get_count()    do { } while (0)
1610 +#define preempt_disable()      do { } while (0)
1611 +#define preempt_enable_no_resched()    do {} while(0)
1612 +#define preempt_enable()       do { } while (0)
1613 +
1614 +#define spin_lock(lock)                _raw_spin_lock(lock)
1615 +#define spin_trylock(lock)     _raw_spin_trylock(lock)
1616 +#define spin_unlock(lock)      _raw_spin_unlock(lock)
1617 +
1618 +#define read_lock(lock)                _raw_read_lock(lock)
1619 +#define read_unlock(lock)      _raw_read_unlock(lock)
1620 +#define write_lock(lock)       _raw_write_lock(lock)
1621 +#define write_unlock(lock)     _raw_write_unlock(lock)
1622 +#define write_trylock(lock)    _raw_write_trylock(lock)
1623 +#endif
1624 +
1625  /* "lock on reference count zero" */
1626  #ifndef ATOMIC_DEC_AND_LOCK
1627  #include <asm/atomic.h>
1628 diff -urN linux-2.4.18-rc1-ingo-K3/include/linux/tqueue.h linux/include/linux/tqueue.h
1629 --- linux-2.4.18-rc1-ingo-K3/include/linux/tqueue.h     Wed Feb 13 16:24:09 2002
1630 +++ linux/include/linux/tqueue.h        Wed Feb 13 16:23:45 2002
1631 @@ -94,6 +94,22 @@
1632  extern spinlock_t tqueue_lock;
1633  
1634  /*
1635 + * Call all "bottom halfs" on a given list.
1636 + */
1637 +
1638 +extern void __run_task_queue(task_queue *list);
1639 +
1640 +static inline void run_task_queue(task_queue *list)
1641 +{
1642 +       if (TQ_ACTIVE(*list))
1643 +               __run_task_queue(list);
1644 +}
1645 +
1646 +#endif /* _LINUX_TQUEUE_H */
1647 +
1648 +#if !defined(_LINUX_TQUEUE_H_INLINES) && defined(_TASK_STRUCT_DEFINED)
1649 +#define _LINUX_TQUEUE_H_INLINES
1650 +/*
1651   * Queue a task on a tq.  Return non-zero if it was successfully
1652   * added.
1653   */
1654 @@ -109,17 +125,4 @@
1655         }
1656         return ret;
1657  }
1658 -
1659 -/*
1660 - * Call all "bottom halfs" on a given list.
1661 - */
1662 -
1663 -extern void __run_task_queue(task_queue *list);
1664 -
1665 -static inline void run_task_queue(task_queue *list)
1666 -{
1667 -       if (TQ_ACTIVE(*list))
1668 -               __run_task_queue(list);
1669 -}
1670 -
1671 -#endif /* _LINUX_TQUEUE_H */
1672 +#endif
1673 diff -urN linux-2.4.18-rc1-ingo-K3/kernel/exit.c linux/kernel/exit.c
1674 --- linux-2.4.18-rc1-ingo-K3/kernel/exit.c      Wed Feb 13 16:25:18 2002
1675 +++ linux/kernel/exit.c Wed Feb 13 16:23:45 2002
1676 @@ -366,8 +366,8 @@
1677                 /* more a memory barrier than a real lock */
1678                 task_lock(tsk);
1679                 tsk->mm = NULL;
1680 -               task_unlock(tsk);
1681                 enter_lazy_tlb(mm, current, smp_processor_id());
1682 +               task_unlock(tsk);
1683                 mmput(mm);
1684         }
1685  }
1686 diff -urN linux-2.4.18-rc1-ingo-K3/kernel/fork.c linux/kernel/fork.c
1687 --- linux-2.4.18-rc1-ingo-K3/kernel/fork.c      Wed Feb 13 16:25:18 2002
1688 +++ linux/kernel/fork.c Wed Feb 13 16:23:45 2002
1689 @@ -614,6 +614,13 @@
1690         if (p->binfmt && p->binfmt->module)
1691                 __MOD_INC_USE_COUNT(p->binfmt->module);
1692  
1693 +#ifdef CONFIG_PREEMPT
1694 +       /*
1695 +        * schedule_tail drops this_rq()->lock so compensate with a count
1696 +        * of 1.  Also, we want to start with kernel preemption disabled.
1697 +        */
1698 +       p->preempt_count = 1;
1699 +#endif
1700         p->did_exec = 0;
1701         p->swappable = 0;
1702         p->state = TASK_UNINTERRUPTIBLE;
1703 diff -urN linux-2.4.18-rc1-ingo-K3/kernel/ksyms.c linux/kernel/ksyms.c
1704 --- linux-2.4.18-rc1-ingo-K3/kernel/ksyms.c     Wed Feb 13 16:25:18 2002
1705 +++ linux/kernel/ksyms.c        Wed Feb 13 16:23:45 2002
1706 @@ -437,6 +437,9 @@
1707  EXPORT_SYMBOL(interruptible_sleep_on);
1708  EXPORT_SYMBOL(interruptible_sleep_on_timeout);
1709  EXPORT_SYMBOL(schedule);
1710 +#ifdef CONFIG_PREEMPT
1711 +EXPORT_SYMBOL(preempt_schedule);
1712 +#endif
1713  EXPORT_SYMBOL(schedule_timeout);
1714  EXPORT_SYMBOL(sys_sched_yield);
1715  EXPORT_SYMBOL(set_user_nice);
1716 diff -urN linux-2.4.18-rc1-ingo-K3/kernel/sched.c linux/kernel/sched.c
1717 --- linux-2.4.18-rc1-ingo-K3/kernel/sched.c     Wed Feb 13 16:25:18 2002
1718 +++ linux/kernel/sched.c        Wed Feb 13 16:23:45 2002
1719 @@ -159,10 +159,12 @@
1720         struct runqueue *__rq;
1721  
1722  repeat_lock_task:
1723 +       preempt_disable();
1724         __rq = task_rq(p);
1725         spin_lock_irqsave(&__rq->lock, *flags);
1726         if (unlikely(__rq != task_rq(p))) {
1727                 spin_unlock_irqrestore(&__rq->lock, *flags);
1728 +               preempt_enable();
1729                 goto repeat_lock_task;
1730         }
1731         return __rq;
1732 @@ -171,6 +173,7 @@
1733  static inline void unlock_task_rq(runqueue_t *rq, unsigned long *flags)
1734  {
1735         spin_unlock_irqrestore(&rq->lock, *flags);
1736 +       preempt_enable();
1737  }
1738  
1739  /*
1740 @@ -251,11 +254,13 @@
1741  {
1742         int need_resched;
1743  
1744 +       preempt_disable();
1745         need_resched = p->need_resched;
1746         wmb();
1747         p->need_resched = 1;
1748         if (!need_resched && (p->cpu != smp_processor_id()))
1749                 smp_send_reschedule(p->cpu);
1750 +       preempt_enable();
1751  }
1752  
1753  #ifdef CONFIG_SMP
1754 @@ -270,6 +275,7 @@
1755         runqueue_t *rq;
1756  
1757  repeat:
1758 +       preempt_disable();
1759         rq = task_rq(p);
1760         while (unlikely(rq->curr == p)) {
1761                 cpu_relax();
1762 @@ -278,9 +284,11 @@
1763         rq = lock_task_rq(p, &flags);
1764         if (unlikely(rq->curr == p)) {
1765                 unlock_task_rq(rq, &flags);
1766 +               preempt_enable();
1767                 goto repeat;
1768         }
1769         unlock_task_rq(rq, &flags);
1770 +       preempt_enable();
1771  }
1772  
1773  /*
1774 @@ -346,7 +354,10 @@
1775  
1776  void wake_up_forked_process(task_t * p)
1777  {
1778 -       runqueue_t *rq = this_rq();
1779 +       runqueue_t *rq;
1780 +       
1781 +       preempt_disable();
1782 +       rq = this_rq();
1783  
1784         p->state = TASK_RUNNING;
1785         if (!rt_task(p)) {
1786 @@ -363,6 +374,7 @@
1787         p->cpu = smp_processor_id();
1788         activate_task(p, rq);
1789         spin_unlock_irq(&rq->lock);
1790 +       preempt_enable();
1791  }
1792  
1793  /*
1794 @@ -743,18 +755,32 @@
1795   */
1796  asmlinkage void schedule(void)
1797  {
1798 -       task_t *prev = current, *next;
1799 -       runqueue_t *rq = this_rq();
1800 +       task_t *prev, *next;
1801 +       runqueue_t *rq;
1802         prio_array_t *array;
1803         list_t *queue;
1804         int idx;
1805  
1806         if (unlikely(in_interrupt()))
1807                 BUG();
1808 +
1809 +       preempt_disable();
1810 +       prev = current;
1811 +       rq = this_rq();
1812 +
1813         release_kernel_lock(prev, smp_processor_id());
1814         prev->sleep_timestamp = jiffies;
1815         spin_lock_irq(&rq->lock);
1816  
1817 +#ifdef CONFIG_PREEMPT
1818 +       /*
1819 +        * if entering from preempt_schedule, off a kernel preemption,
1820 +        * go straight to picking the next task.
1821 +        */
1822 +       if (unlikely(preempt_get_count() & PREEMPT_ACTIVE))
1823 +               goto pick_next_task;
1824 +#endif
1825 +
1826         switch (prev->state) {
1827         case TASK_INTERRUPTIBLE:
1828                 if (unlikely(signal_pending(prev))) {
1829 @@ -766,7 +792,7 @@
1830         case TASK_RUNNING:
1831                 ;
1832         }
1833 -#if CONFIG_SMP
1834 +#if CONFIG_SMP || CONFIG_PREEMPT
1835  pick_next_task:
1836  #endif
1837         if (unlikely(!rq->nr_running)) {
1838 @@ -814,9 +840,25 @@
1839         spin_unlock_irq(&rq->lock);
1840  
1841         reacquire_kernel_lock(current);
1842 +       preempt_enable_no_resched();
1843         return;
1844  }
1845  
1846 +#ifdef CONFIG_PREEMPT
1847 +/*
1848 + * this is is the entry point to schedule() from in-kernel preemption.
1849 + */
1850 +asmlinkage void preempt_schedule(void)
1851 +{
1852 +       do {
1853 +               current->preempt_count += PREEMPT_ACTIVE;
1854 +               schedule();
1855 +               current->preempt_count -= PREEMPT_ACTIVE;
1856 +               barrier();
1857 +       } while (current->need_resched);
1858 +}
1859 +#endif /* CONFIG_PREEMPT */
1860 +
1861  /*
1862   * The core wakeup function.  Non-exclusive wakeups (nr_exclusive == 0) just
1863   * wake everything up.  If it's an exclusive wakeup (nr_exclusive == small +ve
1864 @@ -1229,12 +1271,17 @@
1865  
1866  asmlinkage long sys_sched_yield(void)
1867  {
1868 -       task_t *prev = current, *next;
1869 -       runqueue_t *rq = this_rq();
1870 +       task_t *prev, *next;
1871 +       runqueue_t *rq;
1872         prio_array_t *array;
1873         list_t *queue;
1874  
1875 +       preempt_disable();
1876 +       prev = current;
1877 +       rq = this_rq();
1878 +
1879         if (unlikely(prev->state != TASK_RUNNING)) {
1880 +               preempt_enable_no_resched();
1881                 schedule();
1882                 return 0;
1883         }
1884 @@ -1286,6 +1333,7 @@
1885         spin_unlock_irq(&rq->lock);
1886  
1887         reacquire_kernel_lock(current);
1888 +       preempt_enable_no_resched();
1889  
1890         return 0;
1891  }
1892 diff -urN linux-2.4.18-rc1-ingo-K3/lib/dec_and_lock.c linux/lib/dec_and_lock.c
1893 --- linux-2.4.18-rc1-ingo-K3/lib/dec_and_lock.c Wed Feb 13 16:24:09 2002
1894 +++ linux/lib/dec_and_lock.c    Wed Feb 13 16:23:45 2002
1895 @@ -1,5 +1,6 @@
1896  #include <linux/module.h>
1897  #include <linux/spinlock.h>
1898 +#include <linux/sched.h>
1899  #include <asm/atomic.h>
1900  
1901  /*
1902 diff -urN linux-2.4.18-rc1-ingo-K3/mm/slab.c linux/mm/slab.c
1903 --- linux-2.4.18-rc1-ingo-K3/mm/slab.c  Wed Feb 13 16:24:09 2002
1904 +++ linux/mm/slab.c     Wed Feb 13 16:23:45 2002
1905 @@ -49,7 +49,8 @@
1906   *  constructors and destructors are called without any locking.
1907   *  Several members in kmem_cache_t and slab_t never change, they
1908   *     are accessed without any locking.
1909 - *  The per-cpu arrays are never accessed from the wrong cpu, no locking.
1910 + *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
1911 + *     and local interrupts are disabled so slab code is preempt-safe.
1912   *  The non-constant members are protected with a per-cache irq spinlock.
1913   *
1914   * Further notes from the original documentation:
1915 diff -urN linux-2.4.18-rc1-ingo-K3/net/socket.c linux/net/socket.c
1916 --- linux-2.4.18-rc1-ingo-K3/net/socket.c       Wed Feb 13 16:25:18 2002
1917 +++ linux/net/socket.c  Wed Feb 13 16:23:45 2002
1918 @@ -133,7 +133,7 @@
1919  
1920  static struct net_proto_family *net_families[NPROTO];
1921  
1922 -#ifdef CONFIG_SMP
1923 +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
1924  static atomic_t net_family_lockct = ATOMIC_INIT(0);
1925  static spinlock_t net_family_lock = SPIN_LOCK_UNLOCKED;
1926  
1927 diff -urN linux-2.4.18-rc1-ingo-K3/net/sunrpc/pmap_clnt.c linux/net/sunrpc/pmap_clnt.c
1928 --- linux-2.4.18-rc1-ingo-K3/net/sunrpc/pmap_clnt.c     Wed Feb 13 16:24:20 2002
1929 +++ linux/net/sunrpc/pmap_clnt.c        Wed Feb 13 16:23:45 2002
1930 @@ -12,6 +12,7 @@
1931  #include <linux/config.h>
1932  #include <linux/types.h>
1933  #include <linux/socket.h>
1934 +#include <linux/sched.h>
1935  #include <linux/kernel.h>
1936  #include <linux/errno.h>
1937  #include <linux/uio.h>
This page took 0.159312 seconds and 3 git commands to generate.