]> git.pld-linux.org Git - packages/kernel.git/blob - jam-30-smptimers-A0.patch
- obsolete
[packages/kernel.git] / jam-30-smptimers-A0.patch
1         Scalable timer implementation. Lock per-CPU instead of global.
2         Author: Ingo Molnar <mingo@elte.hu>
3         URL: http://redhat.com/~mingo/scalable-timers-patches/
4
5 --- linux/kernel/timer.c.orig   Tue Nov 27 13:29:34 2001
6 +++ linux/kernel/timer.c        Tue Nov 27 13:30:37 2001
7 @@ -13,10 +13,15 @@
8   *              serialize accesses to xtime/lost_ticks).
9   *                              Copyright (C) 1998  Andrea Arcangeli
10   *  1999-03-10  Improved NTP compatibility by Ulrich Windl
11 + *  2000-10-05  Implemented scalable SMP per-CPU timer handling.
12 + *                              Copyright (C) 2000  Ingo Molnar
13 + *              Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
14   */
15  
16  #include <linux/config.h>
17 +
18  #include <linux/mm.h>
19 +#include <linux/init.h>
20  #include <linux/timex.h>
21  #include <linux/delay.h>
22  #include <linux/smp_lock.h>
23 @@ -73,87 +78,49 @@
24  unsigned long prof_len;
25  unsigned long prof_shift;
26  
27 -/*
28 - * Event timer code
29 - */
30 -#define TVN_BITS 6
31 -#define TVR_BITS 8
32 -#define TVN_SIZE (1 << TVN_BITS)
33 -#define TVR_SIZE (1 << TVR_BITS)
34 -#define TVN_MASK (TVN_SIZE - 1)
35 -#define TVR_MASK (TVR_SIZE - 1)
36 -
37 -struct timer_vec {
38 -       int index;
39 -       struct list_head vec[TVN_SIZE];
40 -};
41 -
42 -struct timer_vec_root {
43 -       int index;
44 -       struct list_head vec[TVR_SIZE];
45 -};
46 -
47 -static struct timer_vec tv5;
48 -static struct timer_vec tv4;
49 -static struct timer_vec tv3;
50 -static struct timer_vec tv2;
51 -static struct timer_vec_root tv1;
52 -
53 -static struct timer_vec * const tvecs[] = {
54 -       (struct timer_vec *)&tv1, &tv2, &tv3, &tv4, &tv5
55 -};
56 -
57 -static struct list_head * run_timer_list_running;
58 -
59 -#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
60 -
61 -void init_timervecs (void)
62 -{
63 -       int i;
64 +tvec_base_t tvec_bases[NR_CPUS];
65  
66 -       for (i = 0; i < TVN_SIZE; i++) {
67 -               INIT_LIST_HEAD(tv5.vec + i);
68 -               INIT_LIST_HEAD(tv4.vec + i);
69 -               INIT_LIST_HEAD(tv3.vec + i);
70 -               INIT_LIST_HEAD(tv2.vec + i);
71 -       }
72 -       for (i = 0; i < TVR_SIZE; i++)
73 -               INIT_LIST_HEAD(tv1.vec + i);
74 -}
75 +/* jiffies at the most recent update of wall time */
76 +unsigned long wall_jiffies;
77  
78 -static unsigned long timer_jiffies;
79 +/*
80 + * This spinlock protect us from races in SMP while playing with xtime. -arca
81 + */
82 +rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
83  
84 -static inline void internal_add_timer(struct timer_list *timer)
85 +/*
86 + * This is the 'global' timer BH. This gets called only if one of
87 + * the local timer interrupts couldnt run timers.
88 + */
89 +static inline void internal_add_timer(tvec_base_t *base, timer_t *timer)
90  {
91         /*
92          * must be cli-ed when calling this
93          */
94         unsigned long expires = timer->expires;
95 -       unsigned long idx = expires - timer_jiffies;
96 +       unsigned long idx = expires - base->timer_jiffies;
97         struct list_head * vec;
98  
99 -       if (run_timer_list_running)
100 -               vec = run_timer_list_running;
101 -       else if (idx < TVR_SIZE) {
102 +       if (idx < TVR_SIZE) {
103                 int i = expires & TVR_MASK;
104 -               vec = tv1.vec + i;
105 +               vec = base->tv1.vec + i;
106         } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
107                 int i = (expires >> TVR_BITS) & TVN_MASK;
108 -               vec = tv2.vec + i;
109 +               vec = base->tv2.vec + i;
110         } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
111                 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
112 -               vec =  tv3.vec + i;
113 +               vec = base->tv3.vec + i;
114         } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
115                 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
116 -               vec = tv4.vec + i;
117 +               vec = base->tv4.vec + i;
118         } else if ((signed long) idx < 0) {
119                 /* can happen if you add a timer with expires == jiffies,
120                  * or you set a timer to go off in the past
121                  */
122 -               vec = tv1.vec + tv1.index;
123 +               vec = base->tv1.vec + base->tv1.index;
124         } else if (idx <= 0xffffffffUL) {
125                 int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
126 -               vec = tv5.vec + i;
127 +               vec = base->tv5.vec + i;
128         } else {
129                 /* Can only get here on architectures with 64-bit jiffies */
130                 INIT_LIST_HEAD(&timer->list);
131 @@ -165,37 +136,27 @@
132         list_add(&timer->list, vec->prev);
133  }
134  
135 -/* Initialize both explicitly - let's try to have them in the same cache line */
136 -spinlock_t timerlist_lock = SPIN_LOCK_UNLOCKED;
137 -
138 -#ifdef CONFIG_SMP
139 -volatile struct timer_list * volatile running_timer;
140 -#define timer_enter(t) do { running_timer = t; mb(); } while (0)
141 -#define timer_exit() do { running_timer = NULL; } while (0)
142 -#define timer_is_running(t) (running_timer == t)
143 -#define timer_synchronize(t) while (timer_is_running(t)) barrier()
144 -#else
145 -#define timer_enter(t)         do { } while (0)
146 -#define timer_exit()           do { } while (0)
147 -#endif
148 -
149 -void add_timer(struct timer_list *timer)
150 +void add_timer(timer_t *timer)
151  {
152 +       tvec_base_t * base = tvec_bases + smp_processor_id();
153         unsigned long flags;
154  
155 -       spin_lock_irqsave(&timerlist_lock, flags);
156 +       CHECK_BASE(base);
157 +       CHECK_BASE(timer->base);
158 +       spin_lock_irqsave(&base->lock, flags);
159         if (timer_pending(timer))
160                 goto bug;
161 -       internal_add_timer(timer);
162 -       spin_unlock_irqrestore(&timerlist_lock, flags);
163 +       internal_add_timer(base, timer);
164 +       timer->base = base;
165 +       spin_unlock_irqrestore(&base->lock, flags);
166         return;
167  bug:
168 -       spin_unlock_irqrestore(&timerlist_lock, flags);
169 +       spin_unlock_irqrestore(&base->lock, flags);
170         printk("bug: kernel timer added twice at %p.\n",
171                         __builtin_return_address(0));
172  }
173  
174 -static inline int detach_timer (struct timer_list *timer)
175 +static inline int detach_timer(timer_t *timer)
176  {
177         if (!timer_pending(timer))
178                 return 0;
179 @@ -203,28 +164,81 @@
180         return 1;
181  }
182  
183 -int mod_timer(struct timer_list *timer, unsigned long expires)
184 +/*
185 + * mod_timer() has subtle locking semantics because parallel
186 + * calls to it must happen serialized.
187 + */
188 +int mod_timer(timer_t *timer, unsigned long expires)
189  {
190 -       int ret;
191 +       tvec_base_t *old_base, *new_base;
192         unsigned long flags;
193 +       int ret;
194 +
195 +       new_base = tvec_bases + smp_processor_id();
196 +       CHECK_BASE(new_base);
197 +
198 +       __save_flags(flags);
199 +       __cli();
200 +repeat:
201 +       old_base = timer->base;
202 +       CHECK_BASE(old_base);
203 +
204 +       /*
205 +        * Prevent deadlocks via ordering by old_base < new_base.
206 +        */
207 +       if (old_base && (new_base != old_base)) {
208 +               if (old_base < new_base) {
209 +                       spin_lock(&new_base->lock);
210 +                       spin_lock(&old_base->lock);
211 +               } else {
212 +                       spin_lock(&old_base->lock);
213 +                       spin_lock(&new_base->lock);
214 +               }
215 +               /*
216 +                * Subtle, we rely on timer->base being always
217 +                * valid and being updated atomically.
218 +                */
219 +               if (timer->base != old_base) {
220 +                       spin_unlock(&new_base->lock);
221 +                       spin_unlock(&old_base->lock);
222 +                       goto repeat;
223 +               }
224 +       } else
225 +               spin_lock(&new_base->lock);
226  
227 -       spin_lock_irqsave(&timerlist_lock, flags);
228         timer->expires = expires;
229         ret = detach_timer(timer);
230 -       internal_add_timer(timer);
231 -       spin_unlock_irqrestore(&timerlist_lock, flags);
232 +       internal_add_timer(new_base, timer);
233 +       timer->base = new_base;
234 +
235 +
236 +       if (old_base && (new_base != old_base))
237 +               spin_unlock(&old_base->lock);
238 +       spin_unlock_irqrestore(&new_base->lock, flags);
239 +
240         return ret;
241  }
242  
243 -int del_timer(struct timer_list * timer)
244 +int del_timer(timer_t * timer)
245  {
246 -       int ret;
247         unsigned long flags;
248 +       tvec_base_t * base;
249 +       int ret;
250  
251 -       spin_lock_irqsave(&timerlist_lock, flags);
252 +       CHECK_BASE(timer->base);
253 +       if (!timer->base)
254 +               return 0;
255 +repeat:
256 +       base = timer->base;
257 +       spin_lock_irqsave(&base->lock, flags);
258 +       if (base != timer->base) {
259 +               spin_unlock_irqrestore(&base->lock, flags);
260 +               goto repeat;
261 +       }
262         ret = detach_timer(timer);
263         timer->list.next = timer->list.prev = NULL;
264 -       spin_unlock_irqrestore(&timerlist_lock, flags);
265 +       spin_unlock_irqrestore(&base->lock, flags);
266 +
267         return ret;
268  }
269  
270 @@ -242,24 +256,34 @@
271   * (for reference counting).
272   */
273  
274 -int del_timer_sync(struct timer_list * timer)
275 +int del_timer_sync(timer_t * timer)
276  {
277 +       tvec_base_t * base;
278         int ret = 0;
279  
280 +       CHECK_BASE(timer->base);
281 +       if (!timer->base)
282 +               return 0;
283         for (;;) {
284                 unsigned long flags;
285                 int running;
286  
287 -               spin_lock_irqsave(&timerlist_lock, flags);
288 +repeat:
289 +               base = timer->base;
290 +               spin_lock_irqsave(&base->lock, flags);
291 +               if (base != timer->base) {
292 +                       spin_unlock_irqrestore(&base->lock, flags);
293 +                       goto repeat;
294 +               }
295                 ret += detach_timer(timer);
296                 timer->list.next = timer->list.prev = 0;
297 -               running = timer_is_running(timer);
298 -               spin_unlock_irqrestore(&timerlist_lock, flags);
299 +               running = timer_is_running(base, timer);
300 +               spin_unlock_irqrestore(&base->lock, flags);
301  
302                 if (!running)
303                         break;
304  
305 -               timer_synchronize(timer);
306 +               timer_synchronize(base, timer);
307         }
308  
309         return ret;
310 @@ -267,7 +291,7 @@
311  #endif
312  
313  
314 -static inline void cascade_timers(struct timer_vec *tv)
315 +static void cascade(tvec_base_t *base, tvec_t *tv)
316  {
317         /* cascade all the timers from tv up one level */
318         struct list_head *head, *curr, *next;
319 @@ -279,66 +303,68 @@
320          * detach them individually, just clear the list afterwards.
321          */
322         while (curr != head) {
323 -               struct timer_list *tmp;
324 +               timer_t *tmp;
325  
326 -               tmp = list_entry(curr, struct timer_list, list);
327 +               tmp = list_entry(curr, timer_t, list);
328 +               CHECK_BASE(tmp->base);
329 +               if (tmp->base != base)
330 +                       BUG();
331                 next = curr->next;
332                 list_del(curr); // not needed
333 -               internal_add_timer(tmp);
334 +               internal_add_timer(base, tmp);
335                 curr = next;
336         }
337         INIT_LIST_HEAD(head);
338         tv->index = (tv->index + 1) & TVN_MASK;
339  }
340  
341 -static inline void run_timer_list(void)
342 +static void __run_timers(tvec_base_t *base)
343  {
344 -       spin_lock_irq(&timerlist_lock);
345 -       while ((long)(jiffies - timer_jiffies) >= 0) {
346 -               LIST_HEAD(queued);
347 +       unsigned long flags;
348 +
349 +       spin_lock_irqsave(&base->lock, flags);
350 +       while ((long)(jiffies - base->timer_jiffies) >= 0) {
351                 struct list_head *head, *curr;
352 -               if (!tv1.index) {
353 -                       int n = 1;
354 -                       do {
355 -                               cascade_timers(tvecs[n]);
356 -                       } while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
357 +
358 +               /*
359 +                * Cascade timers:
360 +                */
361 +               if (!base->tv1.index) {
362 +                       cascade(base, &base->tv2);
363 +                       if (base->tv2.index == 1) {
364 +                               cascade(base, &base->tv3);
365 +                               if (base->tv3.index == 1) {
366 +                                       cascade(base, &base->tv4);
367 +                                       if (base->tv4.index == 1)
368 +                                               cascade(base, &base->tv5);
369 +                               }
370 +                       }
371                 }
372 -               run_timer_list_running = &queued;
373  repeat:
374 -               head = tv1.vec + tv1.index;
375 +               head = base->tv1.vec + base->tv1.index;
376                 curr = head->next;
377                 if (curr != head) {
378 -                       struct timer_list *timer;
379                         void (*fn)(unsigned long);
380                         unsigned long data;
381 +                       timer_t *timer;
382  
383 -                       timer = list_entry(curr, struct timer_list, list);
384 +                       timer = list_entry(curr, timer_t, list);
385                         fn = timer->function;
386 -                       data= timer->data;
387 +                       data = timer->data;
388  
389                         detach_timer(timer);
390                         timer->list.next = timer->list.prev = NULL;
391 -                       timer_enter(timer);
392 -                       spin_unlock_irq(&timerlist_lock);
393 +                       timer_enter(base, timer);
394 +                       spin_unlock_irq(&base->lock);
395                         fn(data);
396 -                       spin_lock_irq(&timerlist_lock);
397 -                       timer_exit();
398 +                       spin_lock_irq(&base->lock);
399 +                       timer_exit(base);
400                         goto repeat;
401                 }
402 -               run_timer_list_running = NULL;
403 -               ++timer_jiffies; 
404 -               tv1.index = (tv1.index + 1) & TVR_MASK;
405 -
406 -               curr = queued.next;
407 -               while (curr != &queued) {
408 -                       struct timer_list *timer;
409 -
410 -                       timer = list_entry(curr, struct timer_list, list);
411 -                       curr = curr->next;
412 -                       internal_add_timer(timer);
413 -               }                       
414 +               ++base->timer_jiffies;
415 +               base->tv1.index = (base->tv1.index + 1) & TVR_MASK;
416         }
417 -       spin_unlock_irq(&timerlist_lock);
418 +       spin_unlock_irqrestore(&base->lock, flags);
419  }
420  
421  spinlock_t tqueue_lock = SPIN_LOCK_UNLOCKED;
422 @@ -632,42 +671,76 @@
423         }
424  }
425  
426 -/* jiffies at the most recent update of wall time */
427 -unsigned long wall_jiffies;
428 +static void run_all_timers(void)
429 +{
430 +       int i;
431 +
432 +       for (i = 0; i < smp_num_cpus; i++) {
433 +               tvec_base_t *base = tvec_bases + i;
434 +               if ((long)(jiffies - base->timer_jiffies) >= 0)
435 +                       __run_timers(base);
436 +       }
437 +}
438  
439  /*
440 - * This spinlock protect us from races in SMP while playing with xtime. -arca
441 + * Called by the local, per-CPU timer interrupt on SMP.
442 + *
443 + * This function has to do all sorts of locking to make legacy
444 + * cli()-users and BH-disablers work. If locking doesnt succeed
445 + * now then we fall back to TIMER_BH.
446   */
447 -rwlock_t xtime_lock = RW_LOCK_UNLOCKED;
448 +void run_local_timers(void)
449 +{
450 +       int cpu = smp_processor_id();
451 +       tvec_base_t *base = tvec_bases + cpu;
452 +
453 +       if (in_interrupt())
454 +               goto out_mark;
455 +
456 +       local_bh_disable();
457 +       local_irq_disable();
458 +       if (!spin_trylock(&global_bh_lock))
459 +               goto out_enable_mark;
460 +
461 +       if (!hardirq_trylock(cpu))
462 +               goto out_unlock_enable_mark;
463  
464 -static inline void update_times(void)
465 +       if ((long)(jiffies - base->timer_jiffies) >= 0)
466 +               __run_timers(base);
467 +
468 +       hardirq_endlock(cpu);
469 +       spin_unlock(&global_bh_lock);
470 +       local_irq_enable();
471 +       local_bh_enable();
472 +       return;
473 +
474 +out_unlock_enable_mark:
475 +       spin_unlock(&global_bh_lock);
476 +
477 +out_enable_mark:
478 +       local_irq_enable();
479 +       local_bh_enable();
480 +
481 +out_mark:
482 +       mark_bh(TIMER_BH);
483 +}
484 +
485 +/*
486 + * Called by the timer interrupt. xtime_lock must already be taken
487 + * by the timer IRQ!
488 + */
489 +static void update_times(void)
490  {
491         unsigned long ticks;
492  
493 -       /*
494 -        * update_times() is run from the raw timer_bh handler so we
495 -        * just know that the irqs are locally enabled and so we don't
496 -        * need to save/restore the flags of the local CPU here. -arca
497 -        */
498 -       write_lock_irq(&xtime_lock);
499 -       vxtime_lock();
500 -
501         ticks = jiffies - wall_jiffies;
502         if (ticks) {
503                 wall_jiffies += ticks;
504                 update_wall_time(ticks);
505         }
506 -       vxtime_unlock();
507 -       write_unlock_irq(&xtime_lock);
508         calc_load(ticks);
509  }
510  
511 -void timer_bh(void)
512 -{
513 -       update_times();
514 -       run_timer_list();
515 -}
516 -
517  void do_timer(struct pt_regs *regs)
518  {
519         (*(unsigned long *)&jiffies)++;
520 @@ -675,8 +750,18 @@
521         /* SMP process accounting uses the local APIC timer */
522  
523         update_process_times(user_mode(regs));
524 +#ifdef CONFIG_X86
525 +       mark_bh(TIMER_BH);
526  #endif
527 +#endif
528 +       /*
529 +        * Right now only x86-SMP calls run_local_timers() from a
530 +        * per-CPU interrupt.
531 +        */
532 +#ifndef CONFIG_X86
533         mark_bh(TIMER_BH);
534 +#endif
535 +       update_times();
536         if (TQ_ACTIVE(tq_timer))
537                 mark_bh(TQUEUE_BH);
538  }
539 @@ -938,3 +1022,23 @@
540         }
541         return 0;
542  }
543 +
544 +void __init init_timers(void)
545 +{
546 +       int i, j;
547 +
548 +       for (i = 0; i < NR_CPUS; i++) {
549 +               tvec_base_t *base = tvec_bases + i;
550 +
551 +               spin_lock_init(&base->lock);
552 +               for (j = 0; j < TVN_SIZE; j++) {
553 +                       INIT_LIST_HEAD(base->tv5.vec + j);
554 +                       INIT_LIST_HEAD(base->tv4.vec + j);
555 +                       INIT_LIST_HEAD(base->tv3.vec + j);
556 +                       INIT_LIST_HEAD(base->tv2.vec + j);
557 +               }
558 +               for (j = 0; j < TVR_SIZE; j++)
559 +                       INIT_LIST_HEAD(base->tv1.vec + j);
560 +       }
561 +       init_bh(TIMER_BH, run_all_timers);
562 +}
563 --- linux/kernel/sched.c.orig   Tue Nov 27 13:29:39 2001
564 +++ linux/kernel/sched.c        Tue Nov 27 13:30:37 2001
565 @@ -1749,7 +1749,6 @@
566         idle->preempt_count = (idle->lock_depth >= 0);
567  }
568  
569 -extern void init_timervecs(void);
570  extern void timer_bh(void);
571  extern void tqueue_bh(void);
572  extern void immediate_bh(void);
573 @@ -1790,8 +1789,7 @@
574         current->cpu = smp_processor_id();
575         wake_up_process(current);
576  
577 -       init_timervecs();
578 -       init_bh(TIMER_BH, timer_bh);
579 +       init_timers();
580         init_bh(TQUEUE_BH, tqueue_bh);
581         init_bh(IMMEDIATE_BH, immediate_bh);
582  
583 --- linux/kernel/ksyms.c.orig   Tue Nov 27 13:29:39 2001
584 +++ linux/kernel/ksyms.c        Tue Nov 27 13:30:37 2001
585 @@ -419,6 +419,7 @@
586  EXPORT_SYMBOL(del_timer_sync);
587  #endif
588  EXPORT_SYMBOL(mod_timer);
589 +EXPORT_SYMBOL(tvec_bases);
590  EXPORT_SYMBOL(tq_timer);
591  EXPORT_SYMBOL(tq_immediate);
592  
593 --- linux/include/linux/timer.h.orig    Tue Nov 27 13:29:37 2001
594 +++ linux/include/linux/timer.h Tue Nov 27 13:30:37 2001
595 @@ -1,9 +1,6 @@
596  #ifndef _LINUX_TIMER_H
597  #define _LINUX_TIMER_H
598  
599 -#include <linux/config.h>
600 -#include <linux/list.h>
601 -
602  /*
603   * In Linux 2.4, static timers have been removed from the kernel.
604   * Timers may be dynamically created and destroyed, and should be initialized
605 @@ -13,22 +10,78 @@
606   * timeouts. You can use this field to distinguish between the different
607   * invocations.
608   */
609 +
610 +#include <linux/config.h>
611 +#include <linux/smp.h>
612 +#include <linux/list.h>
613 +#include <linux/spinlock.h>
614 +#include <linux/threads.h>
615 +
616 +/*
617 + * Event timer code
618 + */
619 +#define TVN_BITS 6
620 +#define TVR_BITS 8
621 +#define TVN_SIZE (1 << TVN_BITS)
622 +#define TVR_SIZE (1 << TVR_BITS)
623 +#define TVN_MASK (TVN_SIZE - 1)
624 +#define TVR_MASK (TVR_SIZE - 1)
625 +
626 +typedef struct tvec_s {
627 +       int index;
628 +       struct list_head vec[TVN_SIZE];
629 +} tvec_t;
630 +
631 +typedef struct tvec_root_s {
632 +       int index;
633 +       struct list_head vec[TVR_SIZE];
634 +} tvec_root_t;
635 +
636 +#define NOOF_TVECS 5
637 +
638 +typedef struct timer_list timer_t;
639 +
640 +typedef struct tvec_t_base_s {
641 +       spinlock_t lock;
642 +       unsigned long timer_jiffies;
643 +       volatile timer_t * volatile running_timer;
644 +       tvec_root_t tv1;
645 +       tvec_t tv2;
646 +       tvec_t tv3;
647 +       tvec_t tv4;
648 +       tvec_t tv5;
649 +} tvec_base_t;
650 +
651 +/*
652 + * This is the new and improved way of handling timers.
653 + *
654 + * The "data" field is in case you want to use the same
655 + * timeout function for several timeouts. You can use this
656 + * to distinguish between the different invocations.
657 + */
658  struct timer_list {
659         struct list_head list;
660         unsigned long expires;
661         unsigned long data;
662         void (*function)(unsigned long);
663 +       tvec_base_t *base;
664  };
665  
666 -extern void add_timer(struct timer_list * timer);
667 -extern int del_timer(struct timer_list * timer);
668 +extern void add_timer(timer_t * timer);
669 +extern int del_timer(timer_t * timer);
670  
671  #ifdef CONFIG_SMP
672 -extern int del_timer_sync(struct timer_list * timer);
673 +extern int del_timer_sync(timer_t * timer);
674  extern void sync_timers(void);
675 +#define timer_enter(base, t) do { base->running_timer = t; mb(); } while (0)
676 +#define timer_exit(base) do { base->running_timer = NULL; } while (0)
677 +#define timer_is_running(base,t) (base->running_timer == t)
678 +#define timer_synchronize(base,t) while (timer_is_running(base,t)) barrier()
679  #else
680  #define del_timer_sync(t)      del_timer(t)
681  #define sync_timers()          do { } while (0)
682 +#define timer_enter(base,t)          do { } while (0)
683 +#define timer_exit(base)            do { } while (0)
684  #endif
685  
686  /*
687 @@ -40,17 +92,33 @@
688   * If the timer is known to be not pending (ie, in the handler), mod_timer
689   * is less efficient than a->expires = b; add_timer(a).
690   */
691 -int mod_timer(struct timer_list *timer, unsigned long expires);
692 +int mod_timer(timer_t *timer, unsigned long expires);
693  
694  extern void it_real_fn(unsigned long);
695  
696 -static inline void init_timer(struct timer_list * timer)
697 +extern void init_timers(void);
698 +extern void run_local_timers(void);
699 +
700 +extern tvec_base_t tvec_bases[NR_CPUS];
701 +
702 +static inline void init_timer(timer_t * timer)
703  {
704         timer->list.next = timer->list.prev = NULL;
705 +       timer->base = tvec_bases + 0;
706  }
707  
708 -static inline int timer_pending (const struct timer_list * timer)
709 +#define TIMER_DEBUG 0
710 +#if TIMER_DEBUG
711 +# define CHECK_BASE(base) \
712 +       if (base && ((base < tvec_bases) || (base >= tvec_bases + NR_CPUS))) \
713 +               BUG()
714 +#else
715 +# define CHECK_BASE(base)
716 +#endif
717 +
718 +static inline int timer_pending(const timer_t * timer)
719  {
720 +       CHECK_BASE(timer->base);
721         return timer->list.next != NULL;
722  }
723  
724 --- linux/include/linux/smp.h.orig      Sun Dec 31 20:10:17 2000
725 +++ linux/include/linux/smp.h   Tue Nov 27 13:30:37 2001
726 @@ -76,7 +76,8 @@
727  /*
728   *     These macros fold the SMP functionality into a single CPU system
729   */
730
731 +
732 +#define NR_CPUS                                        1 
733  #define smp_num_cpus                           1
734  #define smp_processor_id()                     0
735  #define hard_smp_processor_id()                        0
736 --- linux/drivers/net/eepro100.c.orig   Tue Nov 27 13:29:38 2001
737 +++ linux/drivers/net/eepro100.c        Tue Nov 27 13:30:37 2001
738 @@ -1219,9 +1219,6 @@
739         /* We must continue to monitor the media. */
740         sp->timer.expires = RUN_AT(2*HZ);                       /* 2.0 sec. */
741         add_timer(&sp->timer);
742 -#if defined(timer_exit)
743 -       timer_exit(&sp->timer);
744 -#endif
745  }
746  
747  static void speedo_show_state(struct net_device *dev)
748 --- linux/arch/i386/mm/fault.c.orig     Tue Nov 27 13:29:31 2001
749 +++ linux/arch/i386/mm/fault.c  Tue Nov 27 13:30:37 2001
750 @@ -104,16 +104,12 @@
751         goto bad_area;
752  }
753  
754 -extern spinlock_t timerlist_lock;
755 -
756  /*
757   * Unlock any spinlocks which will prevent us from getting the
758 - * message out (timerlist_lock is acquired through the
759 - * console unblank code)
760 + * message out
761   */
762  void bust_spinlocks(int yes)
763  {
764 -       spin_lock_init(&timerlist_lock);
765         if (yes) {
766                 oops_in_progress = 1;
767  #ifdef CONFIG_SMP
768 --- linux/arch/i386/kernel/apic.c.orig  Tue Nov 27 13:29:38 2001
769 +++ linux/arch/i386/kernel/apic.c       Tue Nov 27 13:30:37 2001
770 @@ -1078,7 +1078,9 @@
771         irq_enter(cpu, 0);
772         smp_local_timer_interrupt(regs);
773         irq_exit(cpu, 0);
774 -
775 +#if CONFIG_SMP
776 +       run_local_timers();
777 +#endif
778         if (softirq_pending(cpu))
779                 do_softirq();
780  }
781 --- linux-2.4.20/lib/bust_spinlocks.c.orig      Mon Sep 17 06:22:40 2001
782 +++ linux-2.4.20/lib/bust_spinlocks.c   Sun Mar  9 13:26:33 2003
783 @@ -14,11 +14,8 @@
784  #include <linux/wait.h>
785  #include <linux/vt_kern.h>
786  
787 -extern spinlock_t timerlist_lock;
788 -
789  void bust_spinlocks(int yes)
790  {
791 -       spin_lock_init(&timerlist_lock);
792         if (yes) {
793                 oops_in_progress = 1;
794         } else {
This page took 0.11444 seconds and 3 git commands to generate.