1 diff -urN linux-2.4.24.org/arch/alpha/kernel/entry.S linux-2.4.24/arch/alpha/kernel/entry.S
2 --- linux-2.4.24.org/arch/alpha/kernel/entry.S 2004-02-04 20:50:50.273627588 +0100
3 +++ linux-2.4.24/arch/alpha/kernel/entry.S 2004-02-04 20:52:52.801142450 +0100
6 lda $26,ret_from_sys_call
14 diff -urN linux-2.4.24.org/arch/alpha/kernel/process.c linux-2.4.24/arch/alpha/kernel/process.c
15 --- linux-2.4.24.org/arch/alpha/kernel/process.c 2004-02-04 20:50:48.800933904 +0100
16 +++ linux-2.4.24/arch/alpha/kernel/process.c 2004-02-04 20:52:52.805141619 +0100
20 /* An endless idle loop with no priority at all. */
22 - current->counter = -100;
25 /* FIXME -- EV6 and LCA45 know how to power down
27 diff -urN linux-2.4.24.org/arch/alpha/kernel/smp.c linux-2.4.24/arch/alpha/kernel/smp.c
28 --- linux-2.4.24.org/arch/alpha/kernel/smp.c 2004-02-04 20:50:49.083875053 +0100
29 +++ linux-2.4.24/arch/alpha/kernel/smp.c 2004-02-04 20:52:52.820138499 +0100
31 int smp_num_probed; /* Internal processor count */
32 int smp_num_cpus = 1; /* Number that came online. */
33 int smp_threads_ready; /* True once the per process idle is forked. */
34 +cycles_t cacheflush_time;
35 +unsigned long cache_decay_ticks;
37 int __cpu_number_map[NR_CPUS];
38 int __cpu_logical_map[NR_CPUS];
41 int cpuid = hard_smp_processor_id();
43 - if (current != init_tasks[cpu_number_map(cpuid)]) {
44 - printk("BUG: smp_calling: cpu %d current %p init_tasks[cpu_number_map(cpuid)] %p\n",
45 - cpuid, current, init_tasks[cpu_number_map(cpuid)]);
48 DBGS(("CALLIN %d state 0x%lx\n", cpuid, current->state));
50 /* Turn on machine checks. */
52 DBGS(("smp_callin: commencing CPU %d current %p\n",
55 - /* Setup the scheduler for this processor. */
58 /* ??? This should be in init_idle. */
59 atomic_inc(&init_mm.mm_count);
60 current->active_mm = &init_mm;
67 + * Rough estimation for SMP scheduling, this is the number of cycles it
68 + * takes for a fully memory-limited process to flush the SMP-local cache.
70 + * We are not told how much cache there is, so we have to guess.
73 +smp_tune_scheduling (int cpuid)
75 + struct percpu_struct *cpu;
76 + unsigned long on_chip_cache; /* kB */
77 + unsigned long freq; /* Hz */
78 + unsigned long bandwidth = 350; /* MB/s */
80 + cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset
81 + + cpuid * hwrpb->processor_size);
85 + on_chip_cache = 16 + 16;
90 + on_chip_cache = 8 + 8 + 96;
94 + on_chip_cache = 16 + 8;
100 + on_chip_cache = 64 + 64;
104 + freq = hwrpb->cycle_freq ? : est_cycle_freq;
106 + cacheflush_time = (freq / 1000000) * (on_chip_cache << 10) / bandwidth;
107 + cache_decay_ticks = cacheflush_time / (freq / 1000) * HZ / 1000;
109 + printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
110 + cacheflush_time/(freq/1000000),
111 + (cacheflush_time*100/(freq/1000000)) % 100);
112 + printk("task migration cache decay timeout: %ld msecs.\n",
113 + (cache_decay_ticks + 1) * 1000 / HZ);
117 * Send a message to a secondary's console. "START" is one such
118 * interesting message. ;-)
119 @@ -449,14 +494,11 @@
120 if (idle == &init_task)
121 panic("idle process is init_task for CPU %d", cpuid);
123 - idle->processor = cpuid;
124 - idle->cpus_runnable = 1 << cpuid; /* we schedule the first task manually */
125 + init_idle(idle, cpuid);
126 + unhash_process(idle);
128 __cpu_logical_map[cpunum] = cpuid;
129 __cpu_number_map[cpuid] = cpunum;
131 - del_from_runqueue(idle);
132 - unhash_process(idle);
133 - init_tasks[cpunum] = idle;
135 DBGS(("smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx\n",
136 cpuid, idle->state, idle->flags));
137 @@ -563,13 +605,11 @@
139 __cpu_number_map[boot_cpuid] = 0;
140 __cpu_logical_map[0] = boot_cpuid;
141 - current->processor = boot_cpuid;
143 smp_store_cpu_info(boot_cpuid);
144 + smp_tune_scheduling(boot_cpuid);
145 smp_setup_percpu_timer(boot_cpuid);
149 /* ??? This should be in init_idle. */
150 atomic_inc(&init_mm.mm_count);
151 current->active_mm = &init_mm;
152 diff -urN linux-2.4.24.org/arch/arm/kernel/process.c linux-2.4.24/arch/arm/kernel/process.c
153 --- linux-2.4.24.org/arch/arm/kernel/process.c 2004-02-04 20:51:34.213488266 +0100
154 +++ linux-2.4.24/arch/arm/kernel/process.c 2004-02-04 20:52:52.824137668 +0100
157 /* endless idle loop with no priority at all */
159 - current->nice = 20;
160 - current->counter = -100;
163 void (*idle)(void) = pm_idle;
164 diff -urN linux-2.4.24.org/arch/i386/kernel/entry.S linux-2.4.24/arch/i386/kernel/entry.S
165 --- linux-2.4.24.org/arch/i386/kernel/entry.S 2004-02-04 20:50:47.376230238 +0100
166 +++ linux-2.4.24/arch/i386/kernel/entry.S 2004-02-04 20:52:52.828136836 +0100
182 call SYMBOL_NAME(schedule_tail)
186 testb $0x02,tsk_ptrace(%ebx) # PT_TRACESYS
188 diff -urN linux-2.4.24.org/arch/i386/kernel/process.c linux-2.4.24/arch/i386/kernel/process.c
189 --- linux-2.4.24.org/arch/i386/kernel/process.c 2004-02-04 20:50:46.799350227 +0100
190 +++ linux-2.4.24/arch/i386/kernel/process.c 2004-02-04 20:52:52.833135796 +0100
193 if (current_cpu_data.hlt_works_ok && !hlt_counter) {
195 - if (!current->need_resched)
196 + if (!need_resched())
203 /* endless idle loop with no priority at all */
205 - current->nice = 20;
206 - current->counter = -100;
209 void (*idle)(void) = pm_idle;
210 @@ -665,15 +662,17 @@
211 asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
214 - * Restore %fs and %gs.
215 + * Restore %fs and %gs if needed.
217 - loadsegment(fs, next->fs);
218 - loadsegment(gs, next->gs);
219 + if (unlikely(prev->fs | prev->gs | next->fs | next->gs)) {
220 + loadsegment(fs, next->fs);
221 + loadsegment(gs, next->gs);
225 * Now maybe reload the debug registers
227 - if (next->debugreg[7]){
228 + if (unlikely(next->debugreg[7])) {
236 - if (prev->ioperm || next->ioperm) {
237 + if (unlikely(prev->ioperm || next->ioperm)) {
240 * 4 cachelines copy ... not good, but not that
241 diff -urN linux-2.4.24.org/arch/i386/kernel/setup.c linux-2.4.24/arch/i386/kernel/setup.c
242 --- linux-2.4.24.org/arch/i386/kernel/setup.c 2004-02-04 20:50:46.790352099 +0100
243 +++ linux-2.4.24/arch/i386/kernel/setup.c 2004-02-04 20:52:52.840134340 +0100
244 @@ -3193,9 +3193,10 @@
246 load_LDT(&init_mm.context);
249 - * Clear all 6 debug registers:
251 + /* Clear %fs and %gs. */
252 + asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
254 + /* Clear all 6 debug registers: */
256 #define CD(register) __asm__("movl %0,%%db" #register ::"r"(0) );
258 diff -urN linux-2.4.24.org/arch/i386/kernel/smpboot.c linux-2.4.24/arch/i386/kernel/smpboot.c
259 --- linux-2.4.24.org/arch/i386/kernel/smpboot.c 2004-02-04 20:50:46.762357921 +0100
260 +++ linux-2.4.24/arch/i386/kernel/smpboot.c 2004-02-04 20:52:52.864129350 +0100
261 @@ -308,14 +308,14 @@
262 if (tsc_values[i] < avg)
263 realdelta = -realdelta;
265 - printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n",
267 + printk("BIOS BUG: CPU#%d improperly initialized, has %ld usecs TSC skew! FIXED.\n", i, realdelta);
277 static void __init synchronize_tsc_ap (void)
279 * (This works even if the APIC is not enabled.)
281 phys_id = GET_APIC_ID(apic_read(APIC_ID));
282 - cpuid = current->processor;
284 if (test_and_set_bit(cpuid, &cpu_online_map)) {
285 printk("huh, phys CPU#%d, CPU#%d already present??\n",
289 smp_store_cpu_info(cpuid);
291 + disable_APIC_timer();
293 * Allow the master to continue.
297 while (!atomic_read(&smp_commenced))
299 + enable_APIC_timer();
301 * low-memory mappings have been cleared, flush them from
302 * the local TLBs too.
303 @@ -803,16 +805,13 @@
305 panic("No idle process for CPU %d", cpu);
307 - idle->processor = cpu;
308 - idle->cpus_runnable = 1 << cpu; /* we schedule the first task manually */
309 + init_idle(idle, cpu);
311 map_cpu_to_boot_apicid(cpu, apicid);
313 idle->thread.eip = (unsigned long) start_secondary;
315 - del_from_runqueue(idle);
316 unhash_process(idle);
317 - init_tasks[cpu] = idle;
319 /* start_eip had better be page-aligned! */
320 start_eip = setup_trampoline();
324 cycles_t cacheflush_time;
325 +unsigned long cache_decay_ticks;
327 static void smp_tune_scheduling (void)
330 cacheflush_time = (cpu_khz>>10) * (cachesize<<10) / bandwidth;
333 + cache_decay_ticks = (long)cacheflush_time/cpu_khz * HZ / 1000;
335 printk("per-CPU timeslice cutoff: %ld.%02ld usecs.\n",
336 (long)cacheflush_time/(cpu_khz/1000),
337 ((long)cacheflush_time*100/(cpu_khz/1000)) % 100);
338 + printk("task migration cache decay timeout: %ld msecs.\n",
339 + (cache_decay_ticks + 1) * 1000 / HZ);
343 @@ -1026,8 +1030,7 @@
344 map_cpu_to_boot_apicid(0, boot_cpu_apicid);
346 global_irq_holder = 0;
347 - current->processor = 0;
350 smp_tune_scheduling();
353 diff -urN linux-2.4.24.org/arch/i386/kernel/smp.c linux-2.4.24/arch/i386/kernel/smp.c
354 --- linux-2.4.24.org/arch/i386/kernel/smp.c 2004-02-04 20:50:47.312243547 +0100
355 +++ linux-2.4.24/arch/i386/kernel/smp.c 2004-02-04 20:52:52.868128518 +0100
356 @@ -496,13 +496,23 @@
357 * it goes straight through and wastes no time serializing
358 * anything. Worst case is that we lose a reschedule ...
361 void smp_send_reschedule(int cpu)
363 send_IPI_mask(1 << cpu, RESCHEDULE_VECTOR);
367 + * this function sends a reschedule IPI to all (other) CPUs.
368 + * This should only be used if some 'global' task became runnable,
369 + * such as a RT task, that must be handled now. The first CPU
370 + * that manages to grab the task will run it.
372 +void smp_send_reschedule_all(void)
374 + send_IPI_allbutself(RESCHEDULE_VECTOR);
378 * Structure and data for smp_call_function(). This is designed to minimise
379 * static memory requirements. It also looks cleaner.
381 diff -urN linux-2.4.24.org/arch/mips64/kernel/process.c linux-2.4.24/arch/mips64/kernel/process.c
382 --- linux-2.4.24.org/arch/mips64/kernel/process.c 2004-02-04 20:51:53.268524907 +0100
383 +++ linux-2.4.24/arch/mips64/kernel/process.c 2004-02-04 20:52:52.872127686 +0100
386 /* endless idle loop with no priority at all */
388 - current->nice = 20;
389 - current->counter = -100;
392 while (!current->need_resched)
394 diff -urN linux-2.4.24.org/arch/parisc/kernel/process.c linux-2.4.24/arch/parisc/kernel/process.c
395 --- linux-2.4.24.org/arch/parisc/kernel/process.c 2004-02-04 20:51:58.602415484 +0100
396 +++ linux-2.4.24/arch/parisc/kernel/process.c 2004-02-04 20:52:52.876126854 +0100
399 /* endless idle loop with no priority at all */
401 - current->nice = 20;
402 - current->counter = -100;
405 while (!current->need_resched) {
406 diff -urN linux-2.4.24.org/arch/ppc/kernel/entry.S linux-2.4.24/arch/ppc/kernel/entry.S
407 --- linux-2.4.24.org/arch/ppc/kernel/entry.S 2004-02-04 20:51:15.913294629 +0100
408 +++ linux-2.4.24/arch/ppc/kernel/entry.S 2004-02-04 20:52:52.903121239 +0100
416 lwz r0,TASK_PTRACE(r2)
417 andi. r0,r0,PT_TRACESYS
419 diff -urN linux-2.4.24.org/arch/ppc/kernel/idle.c linux-2.4.24/arch/ppc/kernel/idle.c
420 --- linux-2.4.24.org/arch/ppc/kernel/idle.c 2004-02-04 20:51:16.300214151 +0100
421 +++ linux-2.4.24/arch/ppc/kernel/idle.c 2004-02-04 20:52:52.908120200 +0100
425 /* endless loop with no priority at all */
426 - current->nice = 20;
427 - current->counter = -100;
432 if (!do_power_save) {
433 diff -urN linux-2.4.24.org/arch/ppc/kernel/mk_defs.c linux-2.4.24/arch/ppc/kernel/mk_defs.c
434 --- linux-2.4.24.org/arch/ppc/kernel/mk_defs.c 2004-02-04 20:51:14.150661249 +0100
435 +++ linux-2.4.24/arch/ppc/kernel/mk_defs.c 2004-02-04 20:52:52.913119160 +0100
437 /*DEFINE(KERNELBASE, KERNELBASE);*/
438 DEFINE(STATE, offsetof(struct task_struct, state));
439 DEFINE(NEXT_TASK, offsetof(struct task_struct, next_task));
440 - DEFINE(COUNTER, offsetof(struct task_struct, counter));
441 - DEFINE(PROCESSOR, offsetof(struct task_struct, processor));
442 + DEFINE(COUNTER, offsetof(struct task_struct, time_slice));
443 + DEFINE(PROCESSOR, offsetof(struct task_struct, cpu));
444 DEFINE(SIGPENDING, offsetof(struct task_struct, sigpending));
445 DEFINE(THREAD, offsetof(struct task_struct, thread));
446 DEFINE(MM, offsetof(struct task_struct, mm));
447 diff -urN linux-2.4.24.org/arch/ppc/kernel/process.c linux-2.4.24/arch/ppc/kernel/process.c
448 --- linux-2.4.24.org/arch/ppc/kernel/process.c 2004-02-04 20:51:14.062679549 +0100
449 +++ linux-2.4.24/arch/ppc/kernel/process.c 2004-02-04 20:52:52.917118328 +0100
454 - printk(" CPU: %d", current->processor);
455 + printk(" CPU: %d", current->cpu);
456 #endif /* CONFIG_SMP */
459 diff -urN linux-2.4.24.org/arch/ppc/kernel/smp.c linux-2.4.24/arch/ppc/kernel/smp.c
460 --- linux-2.4.24.org/arch/ppc/kernel/smp.c 2004-02-04 20:51:15.993277992 +0100
461 +++ linux-2.4.24/arch/ppc/kernel/smp.c 2004-02-04 20:52:52.923117080 +0100
463 unsigned long cpu_online_map;
464 int smp_hw_index[NR_CPUS];
465 static struct smp_ops_t *smp_ops;
466 +unsigned long cache_decay_ticks = HZ/100;
468 /* all cpu mappings are 1-1 -- Cort */
469 volatile unsigned long cpu_callin_map[NR_CPUS];
471 * cpu 0, the master -- Cort
473 cpu_callin_map[0] = 1;
474 - current->processor = 0;
479 for (i = 0; i < NR_CPUS; i++) {
482 p = init_task.prev_task;
484 panic("No idle task for CPU %d", i);
485 - del_from_runqueue(p);
491 - p->cpus_runnable = 1 << i; /* we schedule the first task manually */
497 void __init smp_callin(void)
499 - int cpu = current->processor;
500 + int cpu = current->cpu;
502 smp_store_cpu_info(cpu);
503 smp_ops->setup_cpu(cpu);
504 diff -urN linux-2.4.24.org/arch/ppc/lib/dec_and_lock.c linux-2.4.24/arch/ppc/lib/dec_and_lock.c
505 --- linux-2.4.24.org/arch/ppc/lib/dec_and_lock.c 2004-02-04 20:51:18.406775995 +0100
506 +++ linux-2.4.24/arch/ppc/lib/dec_and_lock.c 2004-02-04 20:52:52.927116249 +0100
508 #include <linux/module.h>
509 +#include <linux/sched.h>
510 #include <linux/spinlock.h>
511 #include <asm/atomic.h>
512 #include <asm/system.h>
513 diff -urN linux-2.4.24.org/arch/ppc/mm/init.c linux-2.4.24/arch/ppc/mm/init.c
514 --- linux-2.4.24.org/arch/ppc/mm/init.c 2004-02-04 20:51:13.814731121 +0100
515 +++ linux-2.4.24/arch/ppc/mm/init.c 2004-02-04 20:52:52.931115417 +0100
520 - printk("%3d ", p->processor);
521 - if ( (p->processor != NO_PROC_ID) &&
522 - (p == current_set[p->processor]) )
523 + printk("%3d ", p->cpu);
524 + if ( (p->cpu != NO_PROC_ID) &&
525 + (p == current_set[p->cpu]) )
529 diff -urN linux-2.4.24.org/arch/ppc64/kernel/entry.S linux-2.4.24/arch/ppc64/kernel/entry.S
530 --- linux-2.4.24.org/arch/ppc64/kernel/entry.S 2004-02-04 20:50:43.056128805 +0100
531 +++ linux-2.4.24/arch/ppc64/kernel/entry.S 2004-02-04 20:53:40.136297052 +0100
535 _GLOBAL(ret_from_fork)
539 ld r4,PACACURRENT(r13)
540 ld r0,TASK_PTRACE(r4)
541 andi. r0,r0,PT_TRACESYS
542 diff -urN linux-2.4.24.org/arch/ppc64/kernel/idle.c linux-2.4.24/arch/ppc64/kernel/idle.c
543 --- linux-2.4.24.org/arch/ppc64/kernel/idle.c 2004-02-04 20:50:43.329072034 +0100
544 +++ linux-2.4.24/arch/ppc64/kernel/idle.c 2004-02-04 20:55:09.907625341 +0100
548 /* endless loop with no priority at all */
549 - current->nice = 20;
550 - current->counter = -100;
553 /* ensure iSeries run light will be out when idle */
554 current->thread.flags &= ~PPC_FLAG_RUN_LIGHT;
562 diff -urN linux-2.4.24.org/arch/ppc64/kernel/process.c linux-2.4.24/arch/ppc64/kernel/process.c
563 --- linux-2.4.24.org/arch/ppc64/kernel/process.c 2004-02-04 20:50:42.774187448 +0100
564 +++ linux-2.4.24/arch/ppc64/kernel/process.c 2004-02-04 20:52:52.986103980 +0100
566 #ifdef SHOW_TASK_SWITCHES
567 printk("%s/%d -> %s/%d NIP %08lx cpu %d root %x/%x\n",
568 prev->comm,prev->pid,
569 - new->comm,new->pid,new->thread.regs->nip,new->processor,
570 + new->comm,new->pid,new->thread.regs->nip,new->cpu,
571 new->fs->root,prev->fs->root);
574 diff -urN linux-2.4.24.org/arch/ppc64/kernel/smp.c linux-2.4.24/arch/ppc64/kernel/smp.c
575 --- linux-2.4.24.org/arch/ppc64/kernel/smp.c 2004-02-04 20:50:43.176103851 +0100
576 +++ linux-2.4.24/arch/ppc64/kernel/smp.c 2004-02-04 20:52:52.990103148 +0100
578 extern atomic_t ipi_sent;
579 spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
580 cycles_t cacheflush_time;
581 +unsigned long cache_decay_ticks = HZ/100;
582 static int max_cpus __initdata = NR_CPUS;
584 unsigned long cpu_online_map;
586 * cpu 0, the master -- Cort
588 cpu_callin_map[0] = 1;
589 - current->processor = 0;
594 for (i = 0; i < NR_CPUS; i++) {
595 paca[i].prof_counter = 1;
598 PPCDBG(PPCDBG_SMP,"\tProcessor %d, task = 0x%lx\n", i, p);
600 - del_from_runqueue(p);
606 - p->cpus_runnable = 1 << i; /* we schedule the first task manually */
607 current_set[i].task = p;
608 sp = ((unsigned long)p) + sizeof(union task_union)
609 - STACK_FRAME_OVERHEAD;
612 void __init smp_callin(void)
614 - int cpu = current->processor;
615 + int cpu = current->cpu;
617 smp_store_cpu_info(cpu);
618 set_dec(paca[cpu].default_decr);
621 ppc_md.smp_setup_cpu(cpu);
625 set_bit(smp_processor_id(), &cpu_online_map);
627 while(!smp_commenced) {
632 - cpu = current->processor;
633 + cpu = current->cpu;
634 atomic_inc(&init_mm.mm_count);
635 current->active_mm = &init_mm;
637 diff -urN linux-2.4.24.org/arch/s390/kernel/process.c linux-2.4.24/arch/s390/kernel/process.c
638 --- linux-2.4.24.org/arch/s390/kernel/process.c 2004-02-04 20:51:56.088938275 +0100
639 +++ linux-2.4.24/arch/s390/kernel/process.c 2004-02-04 20:52:52.994102316 +0100
642 /* endless idle loop with no priority at all */
644 - current->nice = 20;
645 - current->counter = -100;
649 if (current->need_resched) {
650 diff -urN linux-2.4.24.org/arch/s390x/kernel/process.c linux-2.4.24/arch/s390x/kernel/process.c
651 --- linux-2.4.24.org/arch/s390x/kernel/process.c 2004-02-04 20:52:03.781338295 +0100
652 +++ linux-2.4.24/arch/s390x/kernel/process.c 2004-02-04 20:52:52.997101692 +0100
655 /* endless idle loop with no priority at all */
657 - current->nice = 20;
658 - current->counter = -100;
662 if (current->need_resched) {
663 diff -urN linux-2.4.24.org/arch/sh/kernel/process.c linux-2.4.24/arch/sh/kernel/process.c
664 --- linux-2.4.24.org/arch/sh/kernel/process.c 2004-02-04 20:51:43.820490054 +0100
665 +++ linux-2.4.24/arch/sh/kernel/process.c 2004-02-04 20:52:53.000101068 +0100
668 /* endless idle loop with no priority at all */
670 - current->nice = 20;
671 - current->counter = -100;
675 diff -urN linux-2.4.24.org/arch/sparc/kernel/entry.S linux-2.4.24/arch/sparc/kernel/entry.S
676 --- linux-2.4.24.org/arch/sparc/kernel/entry.S 2004-02-04 20:50:51.877294031 +0100
677 +++ linux-2.4.24/arch/sparc/kernel/entry.S 2004-02-04 20:52:53.005100028 +0100
678 @@ -1471,7 +1471,9 @@
680 .globl C_LABEL(ret_from_fork)
681 C_LABEL(ret_from_fork):
686 b C_LABEL(ret_sys_call)
687 ld [%sp + STACKFRAME_SZ + PT_I0], %o0
688 diff -urN linux-2.4.24.org/arch/sparc/kernel/process.c linux-2.4.24/arch/sparc/kernel/process.c
689 --- linux-2.4.24.org/arch/sparc/kernel/process.c 2004-02-04 20:50:51.550362032 +0100
690 +++ linux-2.4.24/arch/sparc/kernel/process.c 2004-02-04 20:52:53.009099197 +0100
694 /* endless idle loop with no priority at all */
695 - current->nice = 20;
696 - current->counter = -100;
700 if (ARCH_SUN4C_SUN4) {
704 /* endless idle loop with no priority at all */
705 - current->nice = 20;
706 - current->counter = -100;
710 if(current->need_resched) {
711 diff -urN linux-2.4.24.org/arch/sparc/kernel/smp.c linux-2.4.24/arch/sparc/kernel/smp.c
712 --- linux-2.4.24.org/arch/sparc/kernel/smp.c 2004-02-04 20:50:51.522367854 +0100
713 +++ linux-2.4.24/arch/sparc/kernel/smp.c 2004-02-04 20:52:53.013098365 +0100
715 volatile int __cpu_number_map[NR_CPUS];
716 volatile int __cpu_logical_map[NR_CPUS];
717 cycles_t cacheflush_time = 0; /* XXX */
718 +unsigned long cache_decay_ticks = HZ/100; /* XXX */
720 /* The only guaranteed locking primitive available on all Sparc
721 * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically
722 diff -urN linux-2.4.24.org/arch/sparc/kernel/sun4d_smp.c linux-2.4.24/arch/sparc/kernel/sun4d_smp.c
723 --- linux-2.4.24.org/arch/sparc/kernel/sun4d_smp.c 2004-02-04 20:50:51.254423586 +0100
724 +++ linux-2.4.24/arch/sparc/kernel/sun4d_smp.c 2004-02-04 20:52:53.027095454 +0100
726 * the SMP initialization the master will be just allowed
727 * to call the scheduler code.
731 /* Get our local ticker going. */
732 smp_setup_percpu_timer();
734 while((unsigned long)current_set[cpuid] < PAGE_OFFSET)
737 - while(current_set[cpuid]->processor != cpuid)
738 + while(current_set[cpuid]->cpu != cpuid)
741 /* Fix idle thread fields. */
744 __cpu_number_map[boot_cpu_id] = 0;
745 __cpu_logical_map[0] = boot_cpu_id;
746 - current->processor = boot_cpu_id;
747 smp_store_cpu_info(boot_cpu_id);
748 smp_setup_percpu_timer();
750 local_flush_cache_all();
751 if(linux_num_cpus == 1)
752 return; /* Not an MP box. */
753 @@ -222,14 +219,10 @@
756 p = init_task.prev_task;
760 - p->cpus_runnable = 1 << i; /* we schedule the first task manually */
764 - del_from_runqueue(p);
768 for (no = 0; no < linux_num_cpus; no++)
769 diff -urN linux-2.4.24.org/arch/sparc/kernel/sun4m_smp.c linux-2.4.24/arch/sparc/kernel/sun4m_smp.c
770 --- linux-2.4.24.org/arch/sparc/kernel/sun4m_smp.c 2004-02-04 20:50:52.194228110 +0100
771 +++ linux-2.4.24/arch/sparc/kernel/sun4m_smp.c 2004-02-04 20:52:53.030094830 +0100
773 * the SMP initialization the master will be just allowed
774 * to call the scheduler code.
778 /* Allow master to continue. */
779 swap((unsigned long *)&cpu_callin_map[cpuid], 1);
780 @@ -170,12 +169,10 @@
781 mid_xlate[boot_cpu_id] = (linux_cpus[boot_cpu_id].mid & ~8);
782 __cpu_number_map[boot_cpu_id] = 0;
783 __cpu_logical_map[0] = boot_cpu_id;
784 - current->processor = boot_cpu_id;
786 smp_store_cpu_info(boot_cpu_id);
787 set_irq_udt(mid_xlate[boot_cpu_id]);
788 smp_setup_percpu_timer();
790 local_flush_cache_all();
791 if(linux_num_cpus == 1)
792 return; /* Not an MP box. */
793 @@ -195,14 +192,10 @@
796 p = init_task.prev_task;
800 - p->cpus_runnable = 1 << i; /* we schedule the first task manually */
804 - del_from_runqueue(p);
808 /* See trampoline.S for details... */
809 diff -urN linux-2.4.24.org/arch/sparc64/kernel/entry.S linux-2.4.24/arch/sparc64/kernel/entry.S
810 --- linux-2.4.24.org/arch/sparc64/kernel/entry.S 2004-02-04 20:51:29.076556726 +0100
811 +++ linux-2.4.24/arch/sparc64/kernel/entry.S 2004-02-04 20:52:53.039092958 +0100
812 @@ -1627,7 +1627,9 @@
814 andn %o7, SPARC_FLAG_NEWCHILD, %l0
815 mov %g5, %o0 /* 'prev' */
819 stb %l0, [%g6 + AOFF_task_thread + AOFF_thread_flags]
820 andcc %l0, SPARC_FLAG_PERFCTR, %g0
822 diff -urN linux-2.4.24.org/arch/sparc64/kernel/irq.c linux-2.4.24/arch/sparc64/kernel/irq.c
823 --- linux-2.4.24.org/arch/sparc64/kernel/irq.c 2004-02-04 20:51:28.993573986 +0100
824 +++ linux-2.4.24/arch/sparc64/kernel/irq.c 2004-02-04 20:52:53.044091918 +0100
826 tid = ((tid & UPA_CONFIG_MID) << 9);
829 - tid = (starfire_translate(imap, current->processor) << 26);
830 + tid = (starfire_translate(imap, current->cpu) << 26);
834 diff -urN linux-2.4.24.org/arch/sparc64/kernel/process.c linux-2.4.24/arch/sparc64/kernel/process.c
835 --- linux-2.4.24.org/arch/sparc64/kernel/process.c 2004-02-04 20:51:29.998364993 +0100
836 +++ linux-2.4.24/arch/sparc64/kernel/process.c 2004-02-04 20:52:53.049090879 +0100
840 /* endless idle loop with no priority at all */
841 - current->nice = 20;
842 - current->counter = -100;
846 /* If current->need_resched is zero we should really
849 * the idle loop on a UltraMultiPenguin...
851 -#define idle_me_harder() (cpu_data[current->processor].idle_volume += 1)
852 -#define unidle_me() (cpu_data[current->processor].idle_volume = 0)
853 +#define idle_me_harder() (cpu_data[current->cpu].idle_volume += 1)
854 +#define unidle_me() (cpu_data[current->cpu].idle_volume = 0)
857 - current->nice = 20;
858 - current->counter = -100;
862 if (current->need_resched != 0) {
864 diff -urN linux-2.4.24.org/arch/sparc64/kernel/rtrap.S linux-2.4.24/arch/sparc64/kernel/rtrap.S
865 --- linux-2.4.24.org/arch/sparc64/kernel/rtrap.S 2004-02-04 20:51:29.910383293 +0100
866 +++ linux-2.4.24/arch/sparc64/kernel/rtrap.S 2004-02-04 20:52:53.053090047 +0100
869 .globl rtrap_clr_l6, rtrap, irqsz_patchme, rtrap_xcall
870 rtrap_clr_l6: clr %l6
871 -rtrap: lduw [%g6 + AOFF_task_processor], %l0
872 +rtrap: lduw [%g6 + AOFF_task_cpu], %l0
873 sethi %hi(irq_stat), %l2 ! &softirq_active
874 or %l2, %lo(irq_stat), %l2 ! &softirq_active
875 irqsz_patchme: sllx %l0, 0, %l0
876 diff -urN linux-2.4.24.org/arch/sparc64/kernel/smp.c linux-2.4.24/arch/sparc64/kernel/smp.c
877 --- linux-2.4.24.org/arch/sparc64/kernel/smp.c 2004-02-04 20:51:28.749624726 +0100
878 +++ linux-2.4.24/arch/sparc64/kernel/smp.c 2004-02-04 20:52:53.068086928 +0100
880 printk("Entering UltraSMPenguin Mode...\n");
882 smp_store_cpu_info(boot_cpu_id);
884 + smp_tune_scheduling();
886 if (linux_num_cpus == 1)
891 p = init_task.prev_task;
892 - init_tasks[cpucount] = p;
895 - p->cpus_runnable = 1UL << i; /* we schedule the first task manually */
897 - del_from_runqueue(p);
902 @@ -1214,10 +1210,96 @@
903 __cpu_number_map[boot_cpu_id] = 0;
904 prom_cpu_nodes[boot_cpu_id] = linux_cpus[0].prom_node;
905 __cpu_logical_map[0] = boot_cpu_id;
906 - current->processor = boot_cpu_id;
907 prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
910 +cycles_t cacheflush_time;
911 +unsigned long cache_decay_ticks;
913 +extern unsigned long cheetah_tune_scheduling(void);
915 +static void __init smp_tune_scheduling(void)
917 + unsigned long orig_flush_base, flush_base, flags, *p;
918 + unsigned int ecache_size, order;
919 + cycles_t tick1, tick2, raw;
921 + /* Approximate heuristic for SMP scheduling. It is an
922 + * estimation of the time it takes to flush the L2 cache
923 + * on the local processor.
925 + * The ia32 chooses to use the L1 cache flush time instead,
926 + * and I consider this complete nonsense. The Ultra can service
927 + * a miss to the L1 with a hit to the L2 in 7 or 8 cycles, and
928 + * L2 misses are what create extra bus traffic (ie. the "cost"
929 + * of moving a process from one cpu to another).
931 + printk("SMP: Calibrating ecache flush... ");
932 + if (tlb_type == cheetah || tlb_type == cheetah_plus) {
933 + cacheflush_time = cheetah_tune_scheduling();
937 + ecache_size = prom_getintdefault(linux_cpus[0].prom_node,
938 + "ecache-size", (512 * 1024));
939 + if (ecache_size > (4 * 1024 * 1024))
940 + ecache_size = (4 * 1024 * 1024);
941 + orig_flush_base = flush_base =
942 + __get_free_pages(GFP_KERNEL, order = get_order(ecache_size));
944 + if (flush_base != 0UL) {
945 + local_irq_save(flags);
947 + /* Scan twice the size once just to get the TLB entries
948 + * loaded and make sure the second scan measures pure misses.
950 + for (p = (unsigned long *)flush_base;
951 + ((unsigned long)p) < (flush_base + (ecache_size<<1));
952 + p += (64 / sizeof(unsigned long)))
953 + *((volatile unsigned long *)p);
955 + tick1 = tick_ops->get_tick();
957 + __asm__ __volatile__("1:\n\t"
958 + "ldx [%0 + 0x000], %%g1\n\t"
959 + "ldx [%0 + 0x040], %%g2\n\t"
960 + "ldx [%0 + 0x080], %%g3\n\t"
961 + "ldx [%0 + 0x0c0], %%g5\n\t"
962 + "add %0, 0x100, %0\n\t"
964 + "bne,pt %%xcc, 1b\n\t"
966 + : "=&r" (flush_base)
967 + : "0" (flush_base),
968 + "r" (flush_base + ecache_size)
969 + : "g1", "g2", "g3", "g5");
971 + tick2 = tick_ops->get_tick();
973 + local_irq_restore(flags);
975 + raw = (tick2 - tick1);
977 + /* Dampen it a little, considering two processes
978 + * sharing the cache and fitting.
980 + cacheflush_time = (raw - (raw >> 2));
982 + free_pages(orig_flush_base, order);
984 + cacheflush_time = ((ecache_size << 2) +
985 + (ecache_size << 1));
988 + /* Convert ticks/sticks to jiffies. */
989 + cache_decay_ticks = cacheflush_time / timer_tick_offset;
990 + if (cache_decay_ticks < 1)
991 + cache_decay_ticks = 1;
993 + printk("Using heuristic of %ld cycles, %ld ticks.\n",
994 + cacheflush_time, cache_decay_ticks);
997 static inline unsigned long find_flush_base(unsigned long size)
999 struct page *p = mem_map;
1000 diff -urN linux-2.4.24.org/arch/sparc64/kernel/trampoline.S linux-2.4.24/arch/sparc64/kernel/trampoline.S
1001 --- linux-2.4.24.org/arch/sparc64/kernel/trampoline.S 2004-02-04 20:51:29.425484150 +0100
1002 +++ linux-2.4.24/arch/sparc64/kernel/trampoline.S 2004-02-04 20:52:53.073085888 +0100
1004 wrpr %o1, PSTATE_IG, %pstate
1006 /* Get our UPA MID. */
1007 - lduw [%o2 + AOFF_task_processor], %g1
1008 + lduw [%o2 + AOFF_task_cpu], %g1
1009 sethi %hi(cpu_data), %g5
1010 or %g5, %lo(cpu_data), %g5
1012 diff -urN linux-2.4.24.org/arch/sparc64/kernel/traps.c linux-2.4.24/arch/sparc64/kernel/traps.c
1013 --- linux-2.4.24.org/arch/sparc64/kernel/traps.c 2004-02-04 20:51:28.672640738 +0100
1014 +++ linux-2.4.24/arch/sparc64/kernel/traps.c 2004-02-04 20:52:53.078084848 +0100
1016 #include <linux/smp.h>
1017 #include <linux/smp_lock.h>
1018 #include <linux/mm.h>
1019 +#include <linux/init.h>
1021 #include <asm/delay.h>
1022 #include <asm/system.h>
1023 @@ -755,6 +756,48 @@
1024 "i" (ASI_PHYS_USE_EC));
1028 +unsigned long __init cheetah_tune_scheduling(void)
1030 + unsigned long tick1, tick2, raw;
1031 + unsigned long flush_base = ecache_flush_physbase;
1032 + unsigned long flush_linesize = ecache_flush_linesize;
1033 + unsigned long flush_size = ecache_flush_size;
1035 + /* Run through the whole cache to guarentee the timed loop
1036 + * is really displacing cache lines.
1038 + __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
1039 + " bne,pt %%xcc, 1b\n\t"
1040 + " ldxa [%2 + %0] %3, %%g0\n\t"
1041 + : "=&r" (flush_size)
1042 + : "0" (flush_size), "r" (flush_base),
1043 + "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
1045 + /* The flush area is 2 X Ecache-size, so cut this in half for
1048 + flush_base = ecache_flush_physbase;
1049 + flush_linesize = ecache_flush_linesize;
1050 + flush_size = ecache_flush_size >> 1;
1052 + __asm__ __volatile__("rd %%tick, %0" : "=r" (tick1));
1054 + __asm__ __volatile__("1: subcc %0, %4, %0\n\t"
1055 + " bne,pt %%xcc, 1b\n\t"
1056 + " ldxa [%2 + %0] %3, %%g0\n\t"
1057 + : "=&r" (flush_size)
1058 + : "0" (flush_size), "r" (flush_base),
1059 + "i" (ASI_PHYS_USE_EC), "r" (flush_linesize));
1061 + __asm__ __volatile__("rd %%tick, %0" : "=r" (tick2));
1063 + raw = (tick2 - tick1);
1065 + return (raw - (raw >> 2));
1069 /* Unfortunately, the diagnostic access to the I-cache tags we need to
1070 * use to clear the thing interferes with I-cache coherency transactions.
1072 diff -urN linux-2.4.24.org/Documentation/sched-coding.txt linux-2.4.24/Documentation/sched-coding.txt
1073 --- linux-2.4.24.org/Documentation/sched-coding.txt 1970-01-01 01:00:00.000000000 +0100
1074 +++ linux-2.4.24/Documentation/sched-coding.txt 2004-02-04 20:52:53.082084016 +0100
1076 + Reference for various scheduler-related methods in the O(1) scheduler
1077 + Robert Love <rml@tech9.net>, MontaVista Software
1080 +Note most of these methods are local to kernel/sched.c - this is by design.
1081 +The scheduler is meant to be self-contained and abstracted away. This document
1082 +is primarily for understanding the scheduler, not interfacing to it. Some of
1083 +the discussed interfaces, however, are general process/scheduling methods.
1084 +They are typically defined in include/linux/sched.h.
1087 +Main Scheduling Methods
1088 +-----------------------
1090 +void load_balance(runqueue_t *this_rq, int idle)
1091 + Attempts to pull tasks from one cpu to another to balance cpu usage,
1092 + if needed. This method is called explicitly if the runqueues are
1093 + inbalanced or periodically by the timer tick. Prior to calling,
1094 + the current runqueue must be locked and interrupts disabled.
1097 + The main scheduling function. Upon return, the highest priority
1098 + process will be active.
1104 +Each runqueue has its own lock, rq->lock. When multiple runqueues need
1105 +to be locked, lock acquires must be ordered by ascending &runqueue value.
1107 +A specific runqueue is locked via
1109 + task_rq_lock(task_t pid, unsigned long *flags)
1111 +which disables preemption, disables interrupts, and locks the runqueue pid is
1112 +running on. Likewise,
1114 + task_rq_unlock(task_t pid, unsigned long *flags)
1116 +unlocks the runqueue pid is running on, restores interrupts to their previous
1117 +state, and reenables preemption.
1121 + double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
1125 + double_rq_unlock(runqueue_t *rq1, runqueue_t rq2)
1127 +safely lock and unlock, respectively, the two specified runqueues. They do
1128 +not, however, disable and restore interrupts. Users are required to do so
1129 +manually before and after calls.
1136 + The maximum priority of the system, stored in the task as task->prio.
1137 + Lower priorities are higher. Normal (non-RT) priorities range from
1138 + MAX_RT_PRIO to (MAX_PRIO - 1).
1140 + The maximum real-time priority of the system. Valid RT priorities
1141 + range from 0 to (MAX_RT_PRIO - 1).
1143 + The maximum real-time priority that is exported to user-space. Should
1144 + always be equal to or less than MAX_RT_PRIO. Setting it less allows
1145 + kernel threads to have higher priorities than any user-space task.
1148 + Respectively, the minimum and maximum timeslices (quanta) of a process.
1154 + The main per-CPU runqueue data structure.
1156 + The main per-process data structure.
1163 + Returns the runqueue of the specified cpu.
1165 + Returns the runqueue of the current cpu.
1167 + Returns the runqueue which holds the specified pid.
1169 + Returns the task currently running on the given cpu.
1171 + Returns true if pid is real-time, false if not.
1174 +Process Control Methods
1175 +-----------------------
1177 +void set_user_nice(task_t *p, long nice)
1178 + Sets the "nice" value of task p to the given value.
1179 +int setscheduler(pid_t pid, int policy, struct sched_param *param)
1180 + Sets the scheduling policy and parameters for the given pid.
1181 +void set_cpus_allowed(task_t *p, unsigned long new_mask)
1182 + Sets a given task's CPU affinity and migrates it to a proper cpu.
1183 + Callers must have a valid reference to the task and assure the
1184 + task not exit prematurely. No locks can be held during the call.
1185 +set_task_state(tsk, state_value)
1186 + Sets the given task's state to the given value.
1187 +set_current_state(state_value)
1188 + Sets the current task's state to the given value.
1189 +void set_tsk_need_resched(struct task_struct *tsk)
1190 + Sets need_resched in the given task.
1191 +void clear_tsk_need_resched(struct task_struct *tsk)
1192 + Clears need_resched in the given task.
1193 +void set_need_resched()
1194 + Sets need_resched in the current task.
1195 +void clear_need_resched()
1196 + Clears need_resched in the current task.
1198 + Returns true if need_resched is set in the current task, false
1201 + Place the current process at the end of the runqueue and call schedule.
1202 diff -urN linux-2.4.24.org/Documentation/sched-design.txt linux-2.4.24/Documentation/sched-design.txt
1203 --- linux-2.4.24.org/Documentation/sched-design.txt 1970-01-01 01:00:00.000000000 +0100
1204 +++ linux-2.4.24/Documentation/sched-design.txt 2004-02-04 20:52:53.088082769 +0100
1206 + Goals, Design and Implementation of the
1207 + new ultra-scalable O(1) scheduler
1210 + This is an edited version of an email Ingo Molnar sent to
1211 + lkml on 4 Jan 2002. It describes the goals, design, and
1212 + implementation of Ingo's new ultra-scalable O(1) scheduler.
1213 + Last Updated: 18 April 2002.
1219 +The main goal of the new scheduler is to keep all the good things we know
1220 +and love about the current Linux scheduler:
1222 + - good interactive performance even during high load: if the user
1223 + types or clicks then the system must react instantly and must execute
1224 + the user tasks smoothly, even during considerable background load.
1226 + - good scheduling/wakeup performance with 1-2 runnable processes.
1228 + - fairness: no process should stay without any timeslice for any
1229 + unreasonable amount of time. No process should get an unjustly high
1230 + amount of CPU time.
1232 + - priorities: less important tasks can be started with lower priority,
1233 + more important tasks with higher priority.
1235 + - SMP efficiency: no CPU should stay idle if there is work to do.
1237 + - SMP affinity: processes which run on one CPU should stay affine to
1238 + that CPU. Processes should not bounce between CPUs too frequently.
1240 + - plus additional scheduler features: RT scheduling, CPU binding.
1242 +and the goal is also to add a few new things:
1244 + - fully O(1) scheduling. Are you tired of the recalculation loop
1245 + blowing the L1 cache away every now and then? Do you think the goodness
1246 + loop is taking a bit too long to finish if there are lots of runnable
1247 + processes? This new scheduler takes no prisoners: wakeup(), schedule(),
1248 + the timer interrupt are all O(1) algorithms. There is no recalculation
1249 + loop. There is no goodness loop either.
1251 + - 'perfect' SMP scalability. With the new scheduler there is no 'big'
1252 + runqueue_lock anymore - it's all per-CPU runqueues and locks - two
1253 + tasks on two separate CPUs can wake up, schedule and context-switch
1254 + completely in parallel, without any interlocking. All
1255 + scheduling-relevant data is structured for maximum scalability.
1257 + - better SMP affinity. The old scheduler has a particular weakness that
1258 + causes the random bouncing of tasks between CPUs if/when higher
1259 + priority/interactive tasks, this was observed and reported by many
1260 + people. The reason is that the timeslice recalculation loop first needs
1261 + every currently running task to consume its timeslice. But when this
1262 + happens on eg. an 8-way system, then this property starves an
1263 + increasing number of CPUs from executing any process. Once the last
1264 + task that has a timeslice left has finished using up that timeslice,
1265 + the recalculation loop is triggered and other CPUs can start executing
1266 + tasks again - after having idled around for a number of timer ticks.
1267 + The more CPUs, the worse this effect.
1269 + Furthermore, this same effect causes the bouncing effect as well:
1270 + whenever there is such a 'timeslice squeeze' of the global runqueue,
1271 + idle processors start executing tasks which are not affine to that CPU.
1272 + (because the affine tasks have finished off their timeslices already.)
1274 + The new scheduler solves this problem by distributing timeslices on a
1275 + per-CPU basis, without having any global synchronization or
1278 + - batch scheduling. A significant proportion of computing-intensive tasks
1279 + benefit from batch-scheduling, where timeslices are long and processes
1280 + are roundrobin scheduled. The new scheduler does such batch-scheduling
1281 + of the lowest priority tasks - so nice +19 jobs will get
1282 + 'batch-scheduled' automatically. With this scheduler, nice +19 jobs are
1283 + in essence SCHED_IDLE, from an interactiveness point of view.
1285 + - handle extreme loads more smoothly, without breakdown and scheduling
1288 + - O(1) RT scheduling. For those RT folks who are paranoid about the
1289 + O(nr_running) property of the goodness loop and the recalculation loop.
1291 + - run fork()ed children before the parent. Andrea has pointed out the
1292 + advantages of this a few months ago, but patches for this feature
1293 + do not work with the old scheduler as well as they should,
1294 + because idle processes often steal the new child before the fork()ing
1295 + CPU gets to execute it.
1301 +the core of the new scheduler are the following mechanizms:
1303 + - *two*, priority-ordered 'priority arrays' per CPU. There is an 'active'
1304 + array and an 'expired' array. The active array contains all tasks that
1305 + are affine to this CPU and have timeslices left. The expired array
1306 + contains all tasks which have used up their timeslices - but this array
1307 + is kept sorted as well. The active and expired array is not accessed
1308 + directly, it's accessed through two pointers in the per-CPU runqueue
1309 + structure. If all active tasks are used up then we 'switch' the two
1310 + pointers and from now on the ready-to-go (former-) expired array is the
1311 + active array - and the empty active array serves as the new collector
1312 + for expired tasks.
1314 + - there is a 64-bit bitmap cache for array indices. Finding the highest
1315 + priority task is thus a matter of two x86 BSFL bit-search instructions.
1317 +the split-array solution enables us to have an arbitrary number of active
1318 +and expired tasks, and the recalculation of timeslices can be done
1319 +immediately when the timeslice expires. Because the arrays are always
1320 +access through the pointers in the runqueue, switching the two arrays can
1321 +be done very quickly.
1323 +this is a hybride priority-list approach coupled with roundrobin
1324 +scheduling and the array-switch method of distributing timeslices.
1326 + - there is a per-task 'load estimator'.
1328 +one of the toughest things to get right is good interactive feel during
1329 +heavy system load. While playing with various scheduler variants i found
1330 +that the best interactive feel is achieved not by 'boosting' interactive
1331 +tasks, but by 'punishing' tasks that want to use more CPU time than there
1332 +is available. This method is also much easier to do in an O(1) fashion.
1334 +to establish the actual 'load' the task contributes to the system, a
1335 +complex-looking but pretty accurate method is used: there is a 4-entry
1336 +'history' ringbuffer of the task's activities during the last 4 seconds.
1337 +This ringbuffer is operated without much overhead. The entries tell the
1338 +scheduler a pretty accurate load-history of the task: has it used up more
1339 +CPU time or less during the past N seconds. [the size '4' and the interval
1340 +of 4x 1 seconds was found by lots of experimentation - this part is
1341 +flexible and can be changed in both directions.]
1343 +the penalty a task gets for generating more load than the CPU can handle
1344 +is a priority decrease - there is a maximum amount to this penalty
1345 +relative to their static priority, so even fully CPU-bound tasks will
1346 +observe each other's priorities, and will share the CPU accordingly.
1348 +the SMP load-balancer can be extended/switched with additional parallel
1349 +computing and cache hierarchy concepts: NUMA scheduling, multi-core CPUs
1350 +can be supported easily by changing the load-balancer. Right now it's
1351 +tuned for my SMP systems.
1353 +i skipped the prev->mm == next->mm advantage - no workload i know of shows
1354 +any sensitivity to this. It can be added back by sacrificing O(1)
1355 +schedule() [the current and one-lower priority list can be searched for a
1356 +that->mm == current->mm condition], but costs a fair number of cycles
1357 +during a number of important workloads, so i wanted to avoid this as much
1360 +- the SMP idle-task startup code was still racy and the new scheduler
1361 +triggered this. So i streamlined the idle-setup code a bit. We do not call
1362 +into schedule() before all processors have started up fully and all idle
1363 +threads are in place.
1365 +- the patch also cleans up a number of aspects of sched.c - moves code
1366 +into other areas of the kernel where it's appropriate, and simplifies
1367 +certain code paths and data constructs. As a result, the new scheduler's
1368 +code is smaller than the old one.
1371 diff -urN linux-2.4.24.org/drivers/char/drm-4.0/tdfx_drv.c linux-2.4.24/drivers/char/drm-4.0/tdfx_drv.c
1372 --- linux-2.4.24.org/drivers/char/drm-4.0/tdfx_drv.c 2004-02-04 20:49:21.677055474 +0100
1373 +++ linux-2.4.24/drivers/char/drm-4.0/tdfx_drv.c 2004-02-04 20:52:53.236051992 +0100
1375 lock.context, current->pid, j,
1376 dev->lock.lock_time, jiffies);
1377 current->state = TASK_INTERRUPTIBLE;
1378 - current->policy |= SCHED_YIELD;
1379 schedule_timeout(DRM_LOCK_SLICE-j);
1380 DRM_DEBUG("jiffies=%d\n", jiffies);
1382 diff -urN linux-2.4.24.org/drivers/char/mwave/mwavedd.c linux-2.4.24/drivers/char/mwave/mwavedd.c
1383 --- linux-2.4.24.org/drivers/char/mwave/mwavedd.c 2004-02-04 20:49:18.334750669 +0100
1384 +++ linux-2.4.24/drivers/char/mwave/mwavedd.c 2004-02-04 20:52:53.321034316 +0100
1386 pDrvData->IPCs[ipcnum].bIsHere = FALSE;
1387 pDrvData->IPCs[ipcnum].bIsEnabled = TRUE;
1388 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,0)
1389 - current->nice = -20; /* boost to provide priority timing */
1391 current->priority = 0x28; /* boost to provide priority timing */
1393 diff -urN linux-2.4.24.org/drivers/char/serial_txx927.c linux-2.4.24/drivers/char/serial_txx927.c
1394 --- linux-2.4.24.org/drivers/char/serial_txx927.c 2004-02-04 20:49:11.902088655 +0100
1395 +++ linux-2.4.24/drivers/char/serial_txx927.c 2004-02-04 20:52:53.361025998 +0100
1396 @@ -1533,7 +1533,6 @@
1397 printk("cisr = %d (jiff=%lu)...", cisr, jiffies);
1399 current->state = TASK_INTERRUPTIBLE;
1400 - current->counter = 0; /* make us low-priority */
1401 schedule_timeout(char_time);
1402 if (signal_pending(current))
1404 diff -urN linux-2.4.24.org/drivers/md/md.c linux-2.4.24/drivers/md/md.c
1405 --- linux-2.4.24.org/drivers/md/md.c 2004-02-04 20:50:32.930234961 +0100
1406 +++ linux-2.4.24/drivers/md/md.c 2004-02-04 20:52:53.369024334 +0100
1407 @@ -2939,8 +2939,6 @@
1408 * bdflush, otherwise bdflush will deadlock if there are too
1409 * many dirty RAID5 blocks.
1411 - current->policy = SCHED_OTHER;
1412 - current->nice = -20;
1415 complete(thread->event);
1416 @@ -3464,11 +3462,6 @@
1417 "(but not more than %d KB/sec) for reconstruction.\n",
1418 sysctl_speed_limit_max);
1421 - * Resync has low priority.
1423 - current->nice = 19;
1425 is_mddev_idle(mddev); /* this also initializes IO event counters */
1426 for (m = 0; m < SYNC_MARKS; m++) {
1428 @@ -3546,16 +3539,13 @@
1429 currspeed = (j-mddev->resync_mark_cnt)/2/((jiffies-mddev->resync_mark)/HZ +1) +1;
1431 if (currspeed > sysctl_speed_limit_min) {
1432 - current->nice = 19;
1434 if ((currspeed > sysctl_speed_limit_max) ||
1435 !is_mddev_idle(mddev)) {
1436 current->state = TASK_INTERRUPTIBLE;
1437 md_schedule_timeout(HZ/4);
1441 - current->nice = -20;
1444 printk(KERN_INFO "md: md%d: sync done.\n",mdidx(mddev));
1446 diff -urN linux-2.4.24.org/fs/binfmt_elf.c linux-2.4.24/fs/binfmt_elf.c
1447 --- linux-2.4.24.org/fs/binfmt_elf.c 2004-02-04 20:47:14.464515701 +0100
1448 +++ linux-2.4.24/fs/binfmt_elf.c 2004-02-04 20:52:53.390019967 +0100
1449 @@ -1173,7 +1173,7 @@
1450 psinfo.pr_state = i;
1451 psinfo.pr_sname = (i < 0 || i > 5) ? '.' : "RSDZTD"[i];
1452 psinfo.pr_zomb = psinfo.pr_sname == 'Z';
1453 - psinfo.pr_nice = current->nice;
1454 + psinfo.pr_nice = task_nice(current);
1455 psinfo.pr_flag = current->flags;
1456 psinfo.pr_uid = NEW_TO_OLD_UID(current->uid);
1457 psinfo.pr_gid = NEW_TO_OLD_GID(current->gid);
1458 diff -urN linux-2.4.24.org/fs/jffs2/background.c linux-2.4.24/fs/jffs2/background.c
1459 --- linux-2.4.24.org/fs/jffs2/background.c 2004-02-04 20:47:24.029526165 +0100
1460 +++ linux-2.4.24/fs/jffs2/background.c 2004-02-04 20:52:53.418014145 +0100
1463 sprintf(current->comm, "jffs2_gcd_mtd%d", c->mtd->index);
1465 - /* FIXME in the 2.2 backport */
1466 - current->nice = 10;
1469 spin_lock_irq(¤t->sigmask_lock);
1470 siginitsetinv (¤t->blocked, sigmask(SIGHUP) | sigmask(SIGKILL) | sigmask(SIGSTOP) | sigmask(SIGCONT));
1471 diff -urN linux-2.4.24.org/fs/proc/array.c linux-2.4.24/fs/proc/array.c
1472 --- linux-2.4.24.org/fs/proc/array.c 2004-02-04 20:47:14.980408395 +0100
1473 +++ linux-2.4.24/fs/proc/array.c 2004-02-04 20:52:53.447008114 +0100
1476 /* scale priority and nice values from timeslices to -20..20 */
1477 /* to make it look like a "normal" Unix priority/nice value */
1478 - priority = task->counter;
1479 - priority = 20 - (priority * 10 + DEF_COUNTER / 2) / DEF_COUNTER;
1480 - nice = task->nice;
1481 + priority = task_prio(task);
1482 + nice = task_nice(task);
1484 read_lock(&tasklist_lock);
1485 ppid = task->pid ? task->p_opptr->pid : 0;
1495 diff -urN linux-2.4.24.org/fs/proc/proc_misc.c linux-2.4.24/fs/proc/proc_misc.c
1496 --- linux-2.4.24.org/fs/proc/proc_misc.c 2004-02-04 20:47:14.897425655 +0100
1497 +++ linux-2.4.24/fs/proc/proc_misc.c 2004-02-04 20:52:53.485000212 +0100
1498 @@ -109,11 +109,11 @@
1499 a = avenrun[0] + (FIXED_1/200);
1500 b = avenrun[1] + (FIXED_1/200);
1501 c = avenrun[2] + (FIXED_1/200);
1502 - len = sprintf(page,"%d.%02d %d.%02d %d.%02d %d/%d %d\n",
1503 + len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1504 LOAD_INT(a), LOAD_FRAC(a),
1505 LOAD_INT(b), LOAD_FRAC(b),
1506 LOAD_INT(c), LOAD_FRAC(c),
1507 - nr_running, nr_threads, last_pid);
1508 + nr_running(), nr_threads, last_pid);
1509 return proc_calc_metrics(page, start, off, count, eof, len);
1516 - idle = init_tasks[0]->times.tms_utime + init_tasks[0]->times.tms_stime;
1517 + idle = init_task.times.tms_utime + init_task.times.tms_stime;
1519 /* The formula for the fraction parts really is ((t * 100) / HZ) % 100, but
1520 that would overflow about every five days at HZ == 100.
1521 @@ -374,10 +374,10 @@
1524 proc_sprintf(page, &off, &len,
1529 - kstat.context_swtch,
1530 + nr_context_switches(),
1531 xtime.tv_sec - jif / HZ,
1534 diff -urN linux-2.4.24.org/fs/reiserfs/buffer2.c linux-2.4.24/fs/reiserfs/buffer2.c
1535 --- linux-2.4.24.org/fs/reiserfs/buffer2.c 2004-02-04 20:47:23.322673191 +0100
1536 +++ linux-2.4.24/fs/reiserfs/buffer2.c 2004-02-04 20:52:53.511994597 +0100
1538 struct buffer_head * reiserfs_bread (struct super_block *super, int n_block, int n_size)
1540 struct buffer_head *result;
1541 - PROC_EXP( unsigned int ctx_switches = kstat.context_swtch );
1542 + PROC_EXP( unsigned int ctx_switches = nr_context_switches(); );
1544 result = bread (super -> s_dev, n_block, n_size);
1545 PROC_INFO_INC( super, breads );
1546 - PROC_EXP( if( kstat.context_swtch != ctx_switches )
1547 + PROC_EXP( if( nr_context_switches() != ctx_switches )
1548 PROC_INFO_INC( super, bread_miss ) );
1551 diff -urN linux-2.4.24.org/include/asm-alpha/bitops.h linux-2.4.24/include/asm-alpha/bitops.h
1552 --- linux-2.4.24.org/include/asm-alpha/bitops.h 2004-02-04 20:47:46.527846489 +0100
1553 +++ linux-2.4.24/include/asm-alpha/bitops.h 2004-02-04 20:52:53.537989191 +0100
1556 #include <linux/config.h>
1557 #include <linux/kernel.h>
1558 +#include <asm/compiler.h>
1561 * Copyright 1994, Linus Torvalds.
1564 __asm__ __volatile__(
1573 :"=&r" (temp), "=m" (*m)
1574 - :"Ir" (~(1UL << (nr & 31))), "m" (*m));
1575 + :"Ir" (1UL << (nr & 31)), "m" (*m));
1579 * WARNING: non atomic version.
1581 static __inline__ void
1582 -__change_bit(unsigned long nr, volatile void * addr)
1583 +__clear_bit(unsigned long nr, volatile void * addr)
1585 int *m = ((int *) addr) + (nr >> 5);
1587 - *m ^= 1 << (nr & 31);
1588 + *m &= ~(1 << (nr & 31));
1593 :"Ir" (1UL << (nr & 31)), "m" (*m));
1597 + * WARNING: non atomic version.
1599 +static __inline__ void
1600 +__change_bit(unsigned long nr, volatile void * addr)
1602 + int *m = ((int *) addr) + (nr >> 5);
1604 + *m ^= 1 << (nr & 31);
1608 test_and_set_bit(unsigned long nr, volatile void *addr)
1610 @@ -181,20 +193,6 @@
1611 return (old & mask) != 0;
1615 - * WARNING: non atomic version.
1617 -static __inline__ int
1618 -__test_and_change_bit(unsigned long nr, volatile void * addr)
1620 - unsigned long mask = 1 << (nr & 0x1f);
1621 - int *m = ((int *) addr) + (nr >> 5);
1625 - return (old & mask) != 0;
1629 test_and_change_bit(unsigned long nr, volatile void * addr)
1631 @@ -220,6 +218,20 @@
1636 + * WARNING: non atomic version.
1638 +static __inline__ int
1639 +__test_and_change_bit(unsigned long nr, volatile void * addr)
1641 + unsigned long mask = 1 << (nr & 0x1f);
1642 + int *m = ((int *) addr) + (nr >> 5);
1646 + return (old & mask) != 0;
1650 test_bit(int nr, volatile void * addr)
1652 @@ -235,12 +247,15 @@
1654 static inline unsigned long ffz_b(unsigned long x)
1656 - unsigned long sum = 0;
1657 + unsigned long sum, x1, x2, x4;
1659 x = ~x & -~x; /* set first 0 bit, clear others */
1660 - if (x & 0xF0) sum += 4;
1661 - if (x & 0xCC) sum += 2;
1662 - if (x & 0xAA) sum += 1;
1667 + sum += (x4 != 0) * 4;
1672 @@ -257,24 +272,46 @@
1674 __asm__("cmpbge %1,%2,%0" : "=r"(bits) : "r"(word), "r"(~0UL));
1676 - __asm__("extbl %1,%2,%0" : "=r"(bits) : "r"(word), "r"(qofs));
1677 + bits = __kernel_extbl(word, qofs);
1680 return qofs*8 + bofs;
1685 + * __ffs = Find First set bit in word. Undefined if no set bit exists.
1687 +static inline unsigned long __ffs(unsigned long word)
1689 +#if defined(__alpha_cix__) && defined(__alpha_fix__)
1690 + /* Whee. EV67 can calculate it directly. */
1691 + unsigned long result;
1692 + __asm__("cttz %1,%0" : "=r"(result) : "r"(word));
1695 + unsigned long bits, qofs, bofs;
1697 + __asm__("cmpbge $31,%1,%0" : "=r"(bits) : "r"(word));
1698 + qofs = ffz_b(bits);
1699 + bits = __kernel_extbl(word, qofs);
1700 + bofs = ffz_b(~bits);
1702 + return qofs*8 + bofs;
1709 * ffs: find first bit set. This is defined the same way as
1710 * the libc and compiler builtin ffs routines, therefore
1711 - * differs in spirit from the above ffz (man ffs).
1712 + * differs in spirit from the above __ffs.
1715 static inline int ffs(int word)
1717 - int result = ffz(~word);
1718 + int result = __ffs(word);
1719 return word ? result+1 : 0;
1722 @@ -316,6 +353,14 @@
1723 #define hweight16(x) hweight64((x) & 0xfffful)
1724 #define hweight8(x) hweight64((x) & 0xfful)
1726 +static inline unsigned long hweight64(unsigned long w)
1728 + unsigned long result;
1729 + for (result = 0; w ; w >>= 1)
1730 + result += (w & 1);
1734 #define hweight32(x) generic_hweight32(x)
1735 #define hweight16(x) generic_hweight16(x)
1736 #define hweight8(x) generic_hweight8(x)
1737 @@ -365,13 +410,77 @@
1741 - * The optimizer actually does good code for this case..
1742 + * Find next one bit in a bitmap reasonably efficiently.
1744 +static inline unsigned long
1745 +find_next_bit(void * addr, unsigned long size, unsigned long offset)
1747 + unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
1748 + unsigned long result = offset & ~63UL;
1749 + unsigned long tmp;
1751 + if (offset >= size)
1757 + tmp &= ~0UL << offset;
1761 + goto found_middle;
1765 + while (size & ~63UL) {
1766 + if ((tmp = *(p++)))
1767 + goto found_middle;
1775 + tmp &= ~0UL >> (64 - size);
1777 + return result + size;
1779 + return result + __ffs(tmp);
1783 + * The optimizer actually does good code for this case.
1785 #define find_first_zero_bit(addr, size) \
1786 find_next_zero_bit((addr), (size), 0)
1787 +#define find_first_bit(addr, size) \
1788 + find_next_bit((addr), (size), 0)
1793 + * Every architecture must define this function. It's the fastest
1794 + * way of searching a 140-bit bitmap where the first 100 bits are
1795 + * unlikely to be set. It's guaranteed that at least one of the 140
1798 +static inline unsigned long
1799 +sched_find_first_bit(unsigned long b[3])
1801 + unsigned long b0 = b[0], b1 = b[1], b2 = b[2];
1802 + unsigned long ofs;
1804 + ofs = (b1 ? 64 : 128);
1805 + b1 = (b1 ? b1 : b2);
1806 + ofs = (b0 ? 0 : ofs);
1807 + b0 = (b0 ? b0 : b1);
1809 + return __ffs(b0) + ofs;
1813 #define ext2_set_bit __test_and_set_bit
1814 #define ext2_clear_bit __test_and_clear_bit
1815 #define ext2_test_bit test_bit
1816 diff -urN linux-2.4.24.org/include/asm-alpha/smp.h linux-2.4.24/include/asm-alpha/smp.h
1817 --- linux-2.4.24.org/include/asm-alpha/smp.h 2004-02-04 20:47:46.648821326 +0100
1818 +++ linux-2.4.24/include/asm-alpha/smp.h 2004-02-04 20:52:53.540988567 +0100
1820 #define cpu_logical_map(cpu) __cpu_logical_map[cpu]
1822 #define hard_smp_processor_id() __hard_smp_processor_id()
1823 -#define smp_processor_id() (current->processor)
1824 +#define smp_processor_id() (current->cpu)
1826 extern unsigned long cpu_present_mask;
1827 #define cpu_online_map cpu_present_mask
1828 diff -urN linux-2.4.24.org/include/asm-alpha/system.h linux-2.4.24/include/asm-alpha/system.h
1829 --- linux-2.4.24.org/include/asm-alpha/system.h 2004-02-04 20:47:45.924971887 +0100
1830 +++ linux-2.4.24/include/asm-alpha/system.h 2004-02-04 20:52:53.545987527 +0100
1832 extern void halt(void) __attribute__((noreturn));
1833 #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
1835 -#define prepare_to_switch() do { } while(0)
1836 #define switch_to(prev,next,last) \
1838 unsigned long pcbb; \
1839 diff -urN linux-2.4.24.org/include/asm-arm/bitops.h linux-2.4.24/include/asm-arm/bitops.h
1840 --- linux-2.4.24.org/include/asm-arm/bitops.h 2004-02-04 20:48:05.614876374 +0100
1841 +++ linux-2.4.24/include/asm-arm/bitops.h 2004-02-04 20:52:53.589978377 +0100
1843 * Copyright 1995, Russell King.
1844 * Various bits and pieces copyrights include:
1845 * Linus Torvalds (test_bit).
1846 + * Big endian support: Copyright 2001, Nicolas Pitre
1847 + * reworked by rmk.
1849 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
1851 @@ -17,81 +19,271 @@
1855 +#include <asm/system.h>
1857 #define smp_mb__before_clear_bit() do { } while (0)
1858 #define smp_mb__after_clear_bit() do { } while (0)
1861 - * Function prototypes to keep gcc -Wall happy.
1862 + * These functions are the basis of our bit ops.
1863 + * First, the atomic bitops.
1865 + * The endian issue for these functions is handled by the macros below.
1867 -extern void set_bit(int nr, volatile void * addr);
1869 +____atomic_set_bit_mask(unsigned int mask, volatile unsigned char *p)
1871 + unsigned long flags;
1873 + local_irq_save(flags);
1875 + local_irq_restore(flags);
1879 +____atomic_clear_bit_mask(unsigned int mask, volatile unsigned char *p)
1881 + unsigned long flags;
1883 + local_irq_save(flags);
1885 + local_irq_restore(flags);
1889 +____atomic_change_bit_mask(unsigned int mask, volatile unsigned char *p)
1891 + unsigned long flags;
1893 + local_irq_save(flags);
1895 + local_irq_restore(flags);
1898 -static inline void __set_bit(int nr, volatile void *addr)
1900 +____atomic_test_and_set_bit_mask(unsigned int mask, volatile unsigned char *p)
1902 - ((unsigned char *) addr)[nr >> 3] |= (1U << (nr & 7));
1903 + unsigned long flags;
1906 + local_irq_save(flags);
1909 + local_irq_restore(flags);
1911 + return res & mask;
1914 -extern void clear_bit(int nr, volatile void * addr);
1916 +____atomic_test_and_clear_bit_mask(unsigned int mask, volatile unsigned char *p)
1918 + unsigned long flags;
1921 + local_irq_save(flags);
1924 + local_irq_restore(flags);
1926 + return res & mask;
1929 -static inline void __clear_bit(int nr, volatile void *addr)
1931 +____atomic_test_and_change_bit_mask(unsigned int mask, volatile unsigned char *p)
1933 - ((unsigned char *) addr)[nr >> 3] &= ~(1U << (nr & 7));
1934 + unsigned long flags;
1937 + local_irq_save(flags);
1940 + local_irq_restore(flags);
1942 + return res & mask;
1945 -extern void change_bit(int nr, volatile void * addr);
1947 + * Now the non-atomic variants. We let the compiler handle all optimisations
1950 +static inline void ____nonatomic_set_bit(int nr, volatile void *p)
1952 + ((unsigned char *) p)[nr >> 3] |= (1U << (nr & 7));
1955 -static inline void __change_bit(int nr, volatile void *addr)
1956 +static inline void ____nonatomic_clear_bit(int nr, volatile void *p)
1958 - ((unsigned char *) addr)[nr >> 3] ^= (1U << (nr & 7));
1959 + ((unsigned char *) p)[nr >> 3] &= ~(1U << (nr & 7));
1962 -extern int test_and_set_bit(int nr, volatile void * addr);
1963 +static inline void ____nonatomic_change_bit(int nr, volatile void *p)
1965 + ((unsigned char *) p)[nr >> 3] ^= (1U << (nr & 7));
1968 -static inline int __test_and_set_bit(int nr, volatile void *addr)
1969 +static inline int ____nonatomic_test_and_set_bit(int nr, volatile void *p)
1971 unsigned int mask = 1 << (nr & 7);
1972 unsigned int oldval;
1974 - oldval = ((unsigned char *) addr)[nr >> 3];
1975 - ((unsigned char *) addr)[nr >> 3] = oldval | mask;
1976 + oldval = ((unsigned char *) p)[nr >> 3];
1977 + ((unsigned char *) p)[nr >> 3] = oldval | mask;
1978 return oldval & mask;
1981 -extern int test_and_clear_bit(int nr, volatile void * addr);
1983 -static inline int __test_and_clear_bit(int nr, volatile void *addr)
1984 +static inline int ____nonatomic_test_and_clear_bit(int nr, volatile void *p)
1986 unsigned int mask = 1 << (nr & 7);
1987 unsigned int oldval;
1989 - oldval = ((unsigned char *) addr)[nr >> 3];
1990 - ((unsigned char *) addr)[nr >> 3] = oldval & ~mask;
1991 + oldval = ((unsigned char *) p)[nr >> 3];
1992 + ((unsigned char *) p)[nr >> 3] = oldval & ~mask;
1993 return oldval & mask;
1996 -extern int test_and_change_bit(int nr, volatile void * addr);
1998 -static inline int __test_and_change_bit(int nr, volatile void *addr)
1999 +static inline int ____nonatomic_test_and_change_bit(int nr, volatile void *p)
2001 unsigned int mask = 1 << (nr & 7);
2002 unsigned int oldval;
2004 - oldval = ((unsigned char *) addr)[nr >> 3];
2005 - ((unsigned char *) addr)[nr >> 3] = oldval ^ mask;
2006 + oldval = ((unsigned char *) p)[nr >> 3];
2007 + ((unsigned char *) p)[nr >> 3] = oldval ^ mask;
2008 return oldval & mask;
2011 -extern int find_first_zero_bit(void * addr, unsigned size);
2012 -extern int find_next_zero_bit(void * addr, int size, int offset);
2015 * This routine doesn't need to be atomic.
2017 -static inline int test_bit(int nr, const void * addr)
2018 +static inline int ____test_bit(int nr, const void * p)
2020 - return (((unsigned char *) addr)[nr >> 3] >> (nr & 7)) & 1;
2021 + return (((volatile unsigned char *) p)[nr >> 3] >> (nr & 7)) & 1;
2025 + * A note about Endian-ness.
2026 + * -------------------------
2028 + * When the ARM is put into big endian mode via CR15, the processor
2029 + * merely swaps the order of bytes within words, thus:
2031 + * ------------ physical data bus bits -----------
2032 + * D31 ... D24 D23 ... D16 D15 ... D8 D7 ... D0
2033 + * little byte 3 byte 2 byte 1 byte 0
2034 + * big byte 0 byte 1 byte 2 byte 3
2036 + * This means that reading a 32-bit word at address 0 returns the same
2037 + * value irrespective of the endian mode bit.
2039 + * Peripheral devices should be connected with the data bus reversed in
2040 + * "Big Endian" mode. ARM Application Note 61 is applicable, and is
2041 + * available from http://www.arm.com/.
2043 + * The following assumes that the data bus connectivity for big endian
2044 + * mode has been followed.
2046 + * Note that bit 0 is defined to be 32-bit word bit 0, not byte 0 bit 0.
2050 + * Little endian assembly bitops. nr = 0 -> byte 0 bit 0.
2052 +extern void _set_bit_le(int nr, volatile void * p);
2053 +extern void _clear_bit_le(int nr, volatile void * p);
2054 +extern void _change_bit_le(int nr, volatile void * p);
2055 +extern int _test_and_set_bit_le(int nr, volatile void * p);
2056 +extern int _test_and_clear_bit_le(int nr, volatile void * p);
2057 +extern int _test_and_change_bit_le(int nr, volatile void * p);
2058 +extern int _find_first_zero_bit_le(void * p, unsigned size);
2059 +extern int _find_next_zero_bit_le(void * p, int size, int offset);
2062 + * Big endian assembly bitops. nr = 0 -> byte 3 bit 0.
2064 +extern void _set_bit_be(int nr, volatile void * p);
2065 +extern void _clear_bit_be(int nr, volatile void * p);
2066 +extern void _change_bit_be(int nr, volatile void * p);
2067 +extern int _test_and_set_bit_be(int nr, volatile void * p);
2068 +extern int _test_and_clear_bit_be(int nr, volatile void * p);
2069 +extern int _test_and_change_bit_be(int nr, volatile void * p);
2070 +extern int _find_first_zero_bit_be(void * p, unsigned size);
2071 +extern int _find_next_zero_bit_be(void * p, int size, int offset);
2075 + * The __* form of bitops are non-atomic and may be reordered.
2077 +#define ATOMIC_BITOP_LE(name,nr,p) \
2078 + (__builtin_constant_p(nr) ? \
2079 + ____atomic_##name##_mask(1 << ((nr) & 7), \
2080 + ((unsigned char *)(p)) + ((nr) >> 3)) : \
2081 + _##name##_le(nr,p))
2083 +#define ATOMIC_BITOP_BE(name,nr,p) \
2084 + (__builtin_constant_p(nr) ? \
2085 + ____atomic_##name##_mask(1 << ((nr) & 7), \
2086 + ((unsigned char *)(p)) + (((nr) >> 3) ^ 3)) : \
2087 + _##name##_be(nr,p))
2089 +#define NONATOMIC_BITOP_LE(name,nr,p) \
2090 + (____nonatomic_##name(nr, p))
2092 +#define NONATOMIC_BITOP_BE(name,nr,p) \
2093 + (____nonatomic_##name(nr ^ 0x18, p))
2097 + * These are the little endian, atomic definitions.
2099 +#define set_bit(nr,p) ATOMIC_BITOP_LE(set_bit,nr,p)
2100 +#define clear_bit(nr,p) ATOMIC_BITOP_LE(clear_bit,nr,p)
2101 +#define change_bit(nr,p) ATOMIC_BITOP_LE(change_bit,nr,p)
2102 +#define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p)
2103 +#define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
2104 +#define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p)
2105 +#define test_bit(nr,p) ____test_bit(nr,p)
2106 +#define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
2107 +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
2110 + * These are the little endian, non-atomic definitions.
2112 +#define __set_bit(nr,p) NONATOMIC_BITOP_LE(set_bit,nr,p)
2113 +#define __clear_bit(nr,p) NONATOMIC_BITOP_LE(clear_bit,nr,p)
2114 +#define __change_bit(nr,p) NONATOMIC_BITOP_LE(change_bit,nr,p)
2115 +#define __test_and_set_bit(nr,p) NONATOMIC_BITOP_LE(test_and_set_bit,nr,p)
2116 +#define __test_and_clear_bit(nr,p) NONATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
2117 +#define __test_and_change_bit(nr,p) NONATOMIC_BITOP_LE(test_and_change_bit,nr,p)
2118 +#define __test_bit(nr,p) ____test_bit(nr,p)
2123 + * These are the big endian, atomic definitions.
2125 +#define set_bit(nr,p) ATOMIC_BITOP_BE(set_bit,nr,p)
2126 +#define clear_bit(nr,p) ATOMIC_BITOP_BE(clear_bit,nr,p)
2127 +#define change_bit(nr,p) ATOMIC_BITOP_BE(change_bit,nr,p)
2128 +#define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p)
2129 +#define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
2130 +#define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p)
2131 +#define test_bit(nr,p) ____test_bit((nr) ^ 0x18, p)
2132 +#define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz)
2133 +#define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off)
2136 + * These are the big endian, non-atomic definitions.
2138 +#define __set_bit(nr,p) NONATOMIC_BITOP_BE(set_bit,nr,p)
2139 +#define __clear_bit(nr,p) NONATOMIC_BITOP_BE(clear_bit,nr,p)
2140 +#define __change_bit(nr,p) NONATOMIC_BITOP_BE(change_bit,nr,p)
2141 +#define __test_and_set_bit(nr,p) NONATOMIC_BITOP_BE(test_and_set_bit,nr,p)
2142 +#define __test_and_clear_bit(nr,p) NONATOMIC_BITOP_BE(test_and_clear_bit,nr,p)
2143 +#define __test_and_change_bit(nr,p) NONATOMIC_BITOP_BE(test_and_change_bit,nr,p)
2144 +#define __test_bit(nr,p) ____test_bit((nr) ^ 0x18, p)
2149 * ffz = Find First Zero in word. Undefined if no zero exists,
2150 * so code should check against ~0UL first..
2152 @@ -110,6 +302,29 @@
2156 + * ffz = Find First Zero in word. Undefined if no zero exists,
2157 + * so code should check against ~0UL first..
2159 +static inline unsigned long __ffs(unsigned long word)
2164 + if (word & 0x0000ffff) { k -= 16; word <<= 16; }
2165 + if (word & 0x00ff0000) { k -= 8; word <<= 8; }
2166 + if (word & 0x0f000000) { k -= 4; word <<= 4; }
2167 + if (word & 0x30000000) { k -= 2; word <<= 2; }
2168 + if (word & 0x40000000) { k -= 1; }
2173 + * fls: find last bit set.
2176 +#define fls(x) generic_fls(x)
2179 * ffs: find first bit set. This is defined the same way as
2180 * the libc and compiler builtin ffs routines, therefore
2181 * differs in spirit from the above ffz (man ffs).
2182 @@ -118,6 +333,22 @@
2183 #define ffs(x) generic_ffs(x)
2186 + * Find first bit set in a 168-bit bitmap, where the first
2187 + * 128 bits are unlikely to be set.
2189 +static inline int sched_find_first_bit(unsigned long *b)
2194 + for (off = 0; v = b[off], off < 4; off++) {
2198 + return __ffs(v) + off * 32;
2202 * hweightN: returns the hamming weight (i.e. the number
2203 * of bits set) of a N-bit word
2205 @@ -126,18 +357,25 @@
2206 #define hweight16(x) generic_hweight16(x)
2207 #define hweight8(x) generic_hweight8(x)
2209 -#define ext2_set_bit test_and_set_bit
2210 -#define ext2_clear_bit test_and_clear_bit
2211 -#define ext2_test_bit test_bit
2212 -#define ext2_find_first_zero_bit find_first_zero_bit
2213 -#define ext2_find_next_zero_bit find_next_zero_bit
2215 -/* Bitmap functions for the minix filesystem. */
2216 -#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
2217 -#define minix_set_bit(nr,addr) set_bit(nr,addr)
2218 -#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
2219 -#define minix_test_bit(nr,addr) test_bit(nr,addr)
2220 -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
2222 + * Ext2 is defined to use little-endian byte ordering.
2223 + * These do not need to be atomic.
2225 +#define ext2_set_bit(nr,p) NONATOMIC_BITOP_LE(test_and_set_bit,nr,p)
2226 +#define ext2_clear_bit(nr,p) NONATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
2227 +#define ext2_test_bit(nr,p) __test_bit(nr,p)
2228 +#define ext2_find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
2229 +#define ext2_find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off)
2232 + * Minix is defined to use little-endian byte ordering.
2233 + * These do not need to be atomic.
2235 +#define minix_set_bit(nr,p) NONATOMIC_BITOP_LE(set_bit,nr,p)
2236 +#define minix_test_bit(nr,p) __test_bit(nr,p)
2237 +#define minix_test_and_set_bit(nr,p) NONATOMIC_BITOP_LE(test_and_set_bit,nr,p)
2238 +#define minix_test_and_clear_bit(nr,p) NONATOMIC_BITOP_LE(test_and_clear_bit,nr,p)
2239 +#define minix_find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz)
2241 #endif /* __KERNEL__ */
2243 diff -urN linux-2.4.24.org/include/asm-cris/bitops.h linux-2.4.24/include/asm-cris/bitops.h
2244 --- linux-2.4.24.org/include/asm-cris/bitops.h 2004-02-04 20:48:26.679494929 +0100
2245 +++ linux-2.4.24/include/asm-cris/bitops.h 2004-02-04 20:52:53.595977130 +0100
2247 /* We use generic_ffs so get it; include guards resolve the possible
2248 mutually inclusion. */
2249 #include <linux/bitops.h>
2250 +#include <linux/compiler.h>
2253 * Some hacks to defeat gcc over-optimizations..
2255 #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr)
2256 #define __set_bit(nr, addr) (void)__test_and_set_bit(nr, addr)
2258 +#define __set_bit(nr, addr) (void)__test_and_set_bit(nr, addr)
2261 * clear_bit - Clears a bit in memory
2264 #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr)
2265 #define __clear_bit(nr, addr) (void)__test_and_clear_bit(nr, addr)
2267 +#define __clear_bit(nr, addr) (void)__test_and_clear_bit(nr, addr)
2270 * change_bit - Toggle a bit in memory
2271 * @nr: Bit to change
2273 * It also implies a memory barrier.
2276 -extern __inline__ int test_and_set_bit(int nr, void *addr)
2277 +extern inline int test_and_set_bit(int nr, void *addr)
2279 unsigned int mask, retval;
2280 unsigned long flags;
2281 @@ -119,6 +124,18 @@
2285 +extern inline int __test_and_set_bit(int nr, void *addr)
2287 + unsigned int mask, retval;
2288 + unsigned int *adr = (unsigned int *)addr;
2291 + mask = 1 << (nr & 0x1f);
2292 + retval = (mask & *adr) != 0;
2298 * clear_bit() doesn't provide any barrier for the compiler.
2301 * It also implies a memory barrier.
2304 -extern __inline__ int test_and_clear_bit(int nr, void *addr)
2305 +extern inline int test_and_clear_bit(int nr, void *addr)
2307 unsigned int mask, retval;
2308 unsigned long flags;
2310 * but actually fail. You must protect multiple accesses with a lock.
2313 -extern __inline__ int __test_and_clear_bit(int nr, void *addr)
2314 +extern inline int __test_and_clear_bit(int nr, void *addr)
2316 unsigned int mask, retval;
2317 unsigned int *adr = (unsigned int *)addr;
2319 * It also implies a memory barrier.
2322 -extern __inline__ int test_and_change_bit(int nr, void *addr)
2323 +extern inline int test_and_change_bit(int nr, void *addr)
2325 unsigned int mask, retval;
2326 unsigned long flags;
2329 /* WARNING: non atomic and it can be reordered! */
2331 -extern __inline__ int __test_and_change_bit(int nr, void *addr)
2332 +extern inline int __test_and_change_bit(int nr, void *addr)
2334 unsigned int mask, retval;
2335 unsigned int *adr = (unsigned int *)addr;
2337 * This routine doesn't need to be atomic.
2340 -extern __inline__ int test_bit(int nr, const void *addr)
2341 +extern inline int test_bit(int nr, const void *addr)
2344 unsigned int *adr = (unsigned int *)addr;
2346 * number. They differ in that the first function also inverts all bits
2349 -extern __inline__ unsigned long cris_swapnwbrlz(unsigned long w)
2350 +extern inline unsigned long cris_swapnwbrlz(unsigned long w)
2352 /* Let's just say we return the result in the same register as the
2353 input. Saying we clobber the input but can return the result
2358 -extern __inline__ unsigned long cris_swapwbrlz(unsigned long w)
2359 +extern inline unsigned long cris_swapwbrlz(unsigned long w)
2362 __asm__ ("swapwbr %0 \n\t"
2364 * ffz = Find First Zero in word. Undefined if no zero exists,
2365 * so code should check against ~0UL first..
2367 -extern __inline__ unsigned long ffz(unsigned long w)
2368 +extern inline unsigned long ffz(unsigned long w)
2370 /* The generic_ffs function is used to avoid the asm when the
2371 argument is a constant. */
2373 * Somewhat like ffz but the equivalent of generic_ffs: in contrast to
2374 * ffz we return the first one-bit *plus one*.
2376 -extern __inline__ unsigned long kernel_ffs(unsigned long w)
2377 +extern inline unsigned long kernel_ffs(unsigned long w)
2379 /* The generic_ffs function is used to avoid the asm when the
2380 argument is a constant. */
2382 * @offset: The bitnumber to start searching at
2383 * @size: The maximum size to search
2385 -extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
2386 +extern inline int find_next_zero_bit (void * addr, int size, int offset)
2388 unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
2389 unsigned long result = offset & ~31UL;
2390 @@ -375,7 +392,45 @@
2391 #define minix_test_bit(nr,addr) test_bit(nr,addr)
2392 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
2394 -#endif /* __KERNEL__ */
2396 +/* TODO: see below */
2397 +#define sched_find_first_zero_bit(addr) find_first_zero_bit(addr, 168)
2400 +/* TODO: left out pending where to put it.. (there are .h dependencies) */
2403 + * Every architecture must define this function. It's the fastest
2404 + * way of searching a 168-bit bitmap where the first 128 bits are
2405 + * unlikely to be set. It's guaranteed that at least one of the 168
2406 + * bits is cleared.
2409 +#if MAX_RT_PRIO != 128 || MAX_PRIO != 168
2410 +# error update this function.
2413 +#define MAX_RT_PRIO 128
2414 +#define MAX_PRIO 168
2417 +static inline int sched_find_first_zero_bit(char *bitmap)
2419 + unsigned int *b = (unsigned int *)bitmap;
2422 + rt = b[0] & b[1] & b[2] & b[3];
2423 + if (unlikely(rt != 0xffffffff))
2424 + return find_first_zero_bit(bitmap, MAX_RT_PRIO);
2427 + return ffz(b[4]) + MAX_RT_PRIO;
2428 + return ffz(b[5]) + 32 + MAX_RT_PRIO;
2434 +#endif /* __KERNEL__ */
2436 #endif /* _CRIS_BITOPS_H */
2437 diff -urN linux-2.4.24.org/include/asm-generic/bitops.h linux-2.4.24/include/asm-generic/bitops.h
2438 --- linux-2.4.24.org/include/asm-generic/bitops.h 2004-02-04 20:47:40.855026441 +0100
2439 +++ linux-2.4.24/include/asm-generic/bitops.h 2004-02-04 20:52:53.630969851 +0100
2441 return ((mask & *addr) != 0);
2445 + * fls: find last bit set.
2448 +#define fls(x) generic_fls(x)
2453 diff -urN linux-2.4.24.org/include/asm-i386/bitops.h linux-2.4.24/include/asm-i386/bitops.h
2454 --- linux-2.4.24.org/include/asm-i386/bitops.h 2004-02-04 20:47:40.983999614 +0100
2455 +++ linux-2.4.24/include/asm-i386/bitops.h 2004-02-04 20:52:53.655964653 +0100
2459 #include <linux/config.h>
2460 +#include <linux/compiler.h>
2463 * These have to be done with inline assembly: that way the bit-setting
2469 +static __inline__ void __clear_bit(int nr, volatile void * addr)
2471 + __asm__ __volatile__(
2476 #define smp_mb__before_clear_bit() barrier()
2477 #define smp_mb__after_clear_bit() barrier()
2479 @@ -284,6 +293,34 @@
2483 + * find_first_bit - find the first set bit in a memory region
2484 + * @addr: The address to start the search at
2485 + * @size: The maximum size to search
2487 + * Returns the bit-number of the first set bit, not the number of the byte
2488 + * containing a bit.
2490 +static __inline__ int find_first_bit(void * addr, unsigned size)
2495 + /* This looks at memory. Mark it volatile to tell gcc not to move it around */
2496 + __asm__ __volatile__(
2497 + "xorl %%eax,%%eax\n\t"
2500 + "leal -4(%%edi),%%edi\n\t"
2501 + "bsfl (%%edi),%%eax\n"
2502 + "1:\tsubl %%ebx,%%edi\n\t"
2503 + "shll $3,%%edi\n\t"
2504 + "addl %%edi,%%eax"
2505 + :"=a" (res), "=&c" (d0), "=&D" (d1)
2506 + :"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
2511 * find_next_zero_bit - find the first zero bit in a memory region
2512 * @addr: The address to base the search on
2513 * @offset: The bitnumber to start searching at
2518 - * Look for zero in first byte
2519 + * Look for zero in the first 32 bits.
2521 __asm__("bsfl %1,%0\n\t"
2523 @@ -317,6 +354,39 @@
2527 + * find_next_bit - find the first set bit in a memory region
2528 + * @addr: The address to base the search on
2529 + * @offset: The bitnumber to start searching at
2530 + * @size: The maximum size to search
2532 +static __inline__ int find_next_bit (void * addr, int size, int offset)
2534 + unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
2535 + int set = 0, bit = offset & 31, res;
2539 + * Look for nonzero in the first 32 bits:
2541 + __asm__("bsfl %1,%0\n\t"
2546 + : "r" (*p >> bit));
2547 + if (set < (32 - bit))
2548 + return set + offset;
2553 + * No set bit yet, search remaining full words for a bit
2555 + res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr));
2556 + return (offset + set + res);
2560 * ffz - find first zero in word.
2561 * @word: The word to search
2563 @@ -330,8 +400,41 @@
2568 + * __ffs - find first bit in word.
2569 + * @word: The word to search
2570 + * Undefined if no bit exists, so code should check against 0 first.
2572 +static __inline__ unsigned long __ffs(unsigned long word)
2574 + __asm__("bsfl %1,%0"
2579 +#define fls(x) generic_fls(x)
2584 + * Every architecture must define this function. It's the fastest
2585 + * way of searching a 140-bit bitmap where the first 100 bits are
2586 + * unlikely to be set. It's guaranteed that at least one of the 140
2587 + * bits is cleared.
2589 +static inline int sched_find_first_bit(unsigned long *b)
2591 + if (unlikely(b[0]))
2592 + return __ffs(b[0]);
2593 + if (unlikely(b[1]))
2594 + return __ffs(b[1]) + 32;
2595 + if (unlikely(b[2]))
2596 + return __ffs(b[2]) + 64;
2598 + return __ffs(b[3]) + 96;
2599 + return __ffs(b[4]) + 128;
2603 * ffs - find first bit set
2604 * @x: the word to search
2605 diff -urN linux-2.4.24.org/include/asm-i386/mmu_context.h linux-2.4.24/include/asm-i386/mmu_context.h
2606 --- linux-2.4.24.org/include/asm-i386/mmu_context.h 2004-02-04 20:47:41.355922254 +0100
2607 +++ linux-2.4.24/include/asm-i386/mmu_context.h 2004-02-04 20:57:00.389646201 +0100
2610 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
2612 - if (prev != next) {
2613 + if (likely(prev != next)) {
2614 /* stop flush ipis for the previous mm */
2615 clear_bit(cpu, &prev->cpu_vm_mask);
2618 /* load_LDT, if either the previous or next thread
2619 * has a non-default LDT.
2621 - if (next->context.size+prev->context.size)
2622 + if (unlikely(next->context.size+prev->context.size))
2623 load_LDT(&next->context);
2626 diff -urN linux-2.4.24.org/include/asm-i386/processor.h linux-2.4.24/include/asm-i386/processor.h
2627 --- linux-2.4.24.org/include/asm-i386/processor.h 2004-02-04 20:47:40.967003150 +0100
2628 +++ linux-2.4.24/include/asm-i386/processor.h 2004-02-04 20:52:53.702954879 +0100
2631 #define cpu_relax() rep_nop()
2633 +#define ARCH_HAS_SMP_BALANCE
2635 /* Prefetch instructions for Pentium III and AMD Athlon */
2636 #if defined(CONFIG_MPENTIUMIII) || defined (CONFIG_MPENTIUM4)
2638 diff -urN linux-2.4.24.org/include/asm-i386/smp_balance.h linux-2.4.24/include/asm-i386/smp_balance.h
2639 --- linux-2.4.24.org/include/asm-i386/smp_balance.h 1970-01-01 01:00:00.000000000 +0100
2640 +++ linux-2.4.24/include/asm-i386/smp_balance.h 2004-02-04 20:52:53.705954255 +0100
2642 +#ifndef _ASM_SMP_BALANCE_H
2643 +#define _ASM_SMP_BALANCE_H
2646 + * We have an architecture-specific SMP load balancer to improve
2647 + * scheduling behavior on hyperthreaded CPUs. Since only P4s have
2648 + * HT, maybe this should be conditional on CONFIG_MPENTIUM4...
2653 + * Find any idle processor package (i.e. both virtual processors are idle)
2655 +static inline int find_idle_package(int this_cpu)
2659 + this_cpu = cpu_number_map(this_cpu);
2661 + for (i = (this_cpu + 1) % smp_num_cpus;
2663 + i = (i + 1) % smp_num_cpus) {
2664 + int physical = cpu_logical_map(i);
2665 + int sibling = cpu_sibling_map[physical];
2667 + if (idle_cpu(physical) && idle_cpu(sibling))
2670 + return -1; /* not found */
2673 +static inline int arch_reschedule_idle_override(task_t * p, int idle)
2675 + if (unlikely(smp_num_siblings > 1) && !idle_cpu(cpu_sibling_map[idle])) {
2676 + int true_idle = find_idle_package(idle);
2677 + if (true_idle >= 0) {
2678 + if (likely(p->cpus_allowed & (1UL << true_idle)))
2681 + true_idle = cpu_sibling_map[true_idle];
2682 + if (p->cpus_allowed & (1UL << true_idle))
2691 +static inline int arch_load_balance(int this_cpu, int idle)
2693 + /* Special hack for hyperthreading */
2694 + if (unlikely(smp_num_siblings > 1 && idle == 2 && !idle_cpu(cpu_sibling_map[this_cpu]))) {
2696 + struct runqueue *rq_target;
2698 + if ((found = find_idle_package(this_cpu)) >= 0 ) {
2699 + rq_target = cpu_rq(found);
2700 + resched_task(rq_target->idle);
2707 +#endif /* _ASM_SMP_BALANCE_H */
2708 diff -urN linux-2.4.24.org/include/asm-i386/smp.h linux-2.4.24/include/asm-i386/smp.h
2709 --- linux-2.4.24.org/include/asm-i386/smp.h 2004-02-04 20:47:41.153964261 +0100
2710 +++ linux-2.4.24/include/asm-i386/smp.h 2004-02-04 20:52:53.733948432 +0100
2712 extern void smp_flush_tlb(void);
2713 extern void smp_message_irq(int cpl, void *dev_id, struct pt_regs *regs);
2714 extern void smp_send_reschedule(int cpu);
2715 +extern void smp_send_reschedule_all(void);
2716 extern void smp_invalidate_rcv(void); /* Process an NMI */
2717 extern void (*mtrr_hook) (void);
2718 extern void zap_low_mappings (void);
2720 * so this is correct in the x86 case.
2723 -#define smp_processor_id() (current->processor)
2724 +#define smp_processor_id() (current->cpu)
2726 static __inline int hard_smp_processor_id(void)
2730 #define NO_PROC_ID 0xFF /* No processor magic marker */
2733 - * This magic constant controls our willingness to transfer
2734 - * a process across CPUs. Such a transfer incurs misses on the L1
2735 - * cache, and on a P6 or P5 with multiple L2 caches L2 hits. My
2736 - * gut feeling is this will vary by board in value. For a board
2737 - * with separate L2 cache it probably depends also on the RSS, and
2738 - * for a board with shared L2 cache it ought to decay fast as other
2739 - * processes are run.
2742 -#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
2746 diff -urN linux-2.4.24.org/include/asm-i386/system.h linux-2.4.24/include/asm-i386/system.h
2747 --- linux-2.4.24.org/include/asm-i386/system.h 2004-02-04 20:47:40.963003981 +0100
2748 +++ linux-2.4.24/include/asm-i386/system.h 2004-02-04 20:52:53.759943026 +0100
2750 struct task_struct; /* one of the stranger aspects of C forward declarations.. */
2751 extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
2753 -#define prepare_to_switch() do { } while(0)
2754 #define switch_to(prev,next,last) do { \
2755 asm volatile("pushl %%esi\n\t" \
2758 "movl %%esp,%0\n\t" /* save ESP */ \
2759 - "movl %3,%%esp\n\t" /* restore ESP */ \
2760 + "movl %2,%%esp\n\t" /* restore ESP */ \
2761 "movl $1f,%1\n\t" /* save EIP */ \
2762 - "pushl %4\n\t" /* restore EIP */ \
2763 + "pushl %3\n\t" /* restore EIP */ \
2764 "jmp __switch_to\n" \
2769 - :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
2771 + :"=m" (prev->thread.esp),"=m" (prev->thread.eip) \
2772 :"m" (next->thread.esp),"m" (next->thread.eip), \
2773 - "a" (prev), "d" (next), \
2775 + "a" (prev), "d" (next)); \
2778 #define _set_base(addr,base) do { unsigned long __pr; \
2779 diff -urN linux-2.4.24.org/include/asm-ia64/bitops.h linux-2.4.24/include/asm-ia64/bitops.h
2780 --- linux-2.4.24.org/include/asm-ia64/bitops.h 2004-02-04 20:48:16.659579072 +0100
2781 +++ linux-2.4.24/include/asm-ia64/bitops.h 2004-02-04 20:52:53.793935955 +0100
2784 * Copyright (C) 1998-2003 Hewlett-Packard Co
2785 * David Mosberger-Tang <davidm@hpl.hp.com>
2787 + * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1)
2791 #include <linux/types.h>
2796 + * __clear_bit - Clears a bit in memory (non-atomic version)
2798 +static __inline__ void
2799 +__clear_bit (int nr, volatile void *addr)
2801 + volatile __u32 *p = (__u32 *) addr + (nr >> 5);
2802 + __u32 m = 1 << (nr & 31);
2807 * change_bit - Toggle a bit in memory
2809 * @addr: Address to start counting from
2810 @@ -266,12 +280,11 @@
2814 - * ffz - find the first zero bit in a memory region
2815 - * @x: The address to start the search at
2816 + * ffz - find the first zero bit in a long word
2817 + * @x: The long word to find the bit in
2819 - * Returns the bit-number (0..63) of the first (least significant) zero bit, not
2820 - * the number of the byte containing a bit. Undefined if no zero exists, so
2821 - * code should check against ~0UL first...
2822 + * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if
2823 + * no zero exists, so code should check against ~0UL first...
2825 static inline unsigned long
2826 ffz (unsigned long x)
2827 @@ -297,6 +310,21 @@
2832 + * __ffs - find first bit in word.
2833 + * @x: The word to search
2835 + * Undefined if no bit exists, so code should check against 0 first.
2837 +static __inline__ unsigned long
2838 +__ffs (unsigned long x)
2840 + unsigned long result;
2842 + __asm__ ("popcnt %0=%1" : "=r" (result) : "r" ((x - 1) & ~x));
2849 @@ -313,6 +341,12 @@
2850 return exp - 0xffff;
2856 + return ia64_fls((unsigned int) x);
2860 * ffs: find first bit set. This is defined the same way as the libc and compiler builtin
2861 * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on
2862 @@ -385,8 +419,53 @@
2864 #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
2867 + * Find next bit in a bitmap reasonably efficiently..
2870 +find_next_bit (void *addr, unsigned long size, unsigned long offset)
2872 + unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
2873 + unsigned long result = offset & ~63UL;
2874 + unsigned long tmp;
2876 + if (offset >= size)
2882 + tmp &= ~0UL << offset;
2886 + goto found_middle;
2890 + while (size & ~63UL) {
2891 + if ((tmp = *(p++)))
2892 + goto found_middle;
2900 + tmp &= ~0UL >> (64-size);
2901 + if (tmp == 0UL) /* Are any bits set? */
2902 + return result + size; /* Nope. */
2904 + return result + __ffs(tmp);
2907 +#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
2911 +#define __clear_bit(nr, addr) clear_bit(nr, addr)
2913 #define ext2_set_bit test_and_set_bit
2914 #define ext2_clear_bit test_and_clear_bit
2915 #define ext2_test_bit test_bit
2916 @@ -400,6 +479,16 @@
2917 #define minix_test_bit(nr,addr) test_bit(nr,addr)
2918 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
2921 +sched_find_first_bit (unsigned long *b)
2923 + if (unlikely(b[0]))
2924 + return __ffs(b[0]);
2925 + if (unlikely(b[1]))
2926 + return 64 + __ffs(b[1]);
2927 + return __ffs(b[2]) + 128;
2930 #endif /* __KERNEL__ */
2932 #endif /* _ASM_IA64_BITOPS_H */
2933 diff -urN linux-2.4.24.org/include/asm-m68k/bitops.h linux-2.4.24/include/asm-m68k/bitops.h
2934 --- linux-2.4.24.org/include/asm-m68k/bitops.h 2004-02-04 20:47:47.882564706 +0100
2935 +++ linux-2.4.24/include/asm-m68k/bitops.h 2004-02-04 20:52:53.798934916 +0100
2937 (__builtin_constant_p(nr) ? \
2938 __constant_clear_bit(nr, vaddr) : \
2939 __generic_clear_bit(nr, vaddr))
2940 +#define __clear_bit(nr,vaddr) clear_bit(nr,vaddr)
2942 static inline void __constant_clear_bit(int nr, volatile void *vaddr)
2944 @@ -238,6 +239,28 @@
2948 +#define __ffs(x) (ffs(x) - 1)
2952 + * Every architecture must define this function. It's the fastest
2953 + * way of searching a 140-bit bitmap where the first 100 bits are
2954 + * unlikely to be set. It's guaranteed that at least one of the 140
2955 + * bits is cleared.
2957 +static inline int sched_find_first_bit(unsigned long *b)
2959 + if (unlikely(b[0]))
2960 + return __ffs(b[0]);
2961 + if (unlikely(b[1]))
2962 + return __ffs(b[1]) + 32;
2963 + if (unlikely(b[2]))
2964 + return __ffs(b[2]) + 64;
2966 + return __ffs(b[3]) + 96;
2967 + return __ffs(b[4]) + 128;
2972 * hweightN: returns the hamming weight (i.e. the number
2973 diff -urN linux-2.4.24.org/include/asm-mips/bitops.h linux-2.4.24/include/asm-mips/bitops.h
2974 --- linux-2.4.24.org/include/asm-mips/bitops.h 2004-02-04 20:47:43.266524847 +0100
2975 +++ linux-2.4.24/include/asm-mips/bitops.h 2004-02-04 20:52:53.820930341 +0100
2978 #ifdef CONFIG_CPU_HAS_LLSC
2980 +#include <asm/mipsregs.h>
2983 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
2984 * interrupt friendly
2985 @@ -593,21 +595,30 @@
2987 * Undefined if no zero exists, so code should check against ~0UL first.
2989 -static __inline__ unsigned long ffz(unsigned long word)
2990 +extern __inline__ unsigned long ffz(unsigned long word)
2993 + unsigned int __res;
2994 + unsigned int mask = 1;
2997 - s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
2998 - s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
2999 - s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
3000 - s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
3001 - s = 1; if (word << 31 != 0) s = 0; b += s;
3003 + ".set\tnoreorder\n\t"
3006 + "1:\tand\t$1,%2,%1\n\t"
3014 + : "=&r" (__res), "=r" (mask)
3015 + : "r" (word), "1" (mask)
3026 diff -urN linux-2.4.24.org/include/asm-mips64/bitops.h linux-2.4.24/include/asm-mips64/bitops.h
3027 --- linux-2.4.24.org/include/asm-mips64/bitops.h 2004-02-04 20:48:21.702530138 +0100
3028 +++ linux-2.4.24/include/asm-mips64/bitops.h 2004-02-04 20:52:53.873919319 +0100
3031 #include <asm/system.h>
3032 #include <asm/sgidefs.h>
3033 +#include <asm/mipsregs.h>
3036 * set_bit - Atomically set a bit in memory
3038 * Note that @nr may be almost arbitrarily large; this function is not
3039 * restricted to acting on a single-word quantity.
3041 -static inline void set_bit(unsigned long nr, volatile void *addr)
3042 +extern __inline__ void
3043 +set_bit(unsigned long nr, volatile void *addr)
3045 unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
3048 * If it's called on the same region of memory simultaneously, the effect
3049 * may be that only one operation succeeds.
3051 -static inline void __set_bit(int nr, volatile void * addr)
3052 +extern __inline__ void __set_bit(int nr, volatile void * addr)
3054 unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
3057 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
3058 * in order to ensure changes are visible on other processors.
3060 -static inline void clear_bit(unsigned long nr, volatile void *addr)
3061 +extern __inline__ void
3062 +clear_bit(unsigned long nr, volatile void *addr)
3064 unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
3067 * Note that @nr may be almost arbitrarily large; this function is not
3068 * restricted to acting on a single-word quantity.
3070 -static inline void change_bit(unsigned long nr, volatile void *addr)
3071 +extern __inline__ void
3072 +change_bit(unsigned long nr, volatile void *addr)
3074 unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
3077 * If it's called on the same region of memory simultaneously, the effect
3078 * may be that only one operation succeeds.
3080 -static inline void __change_bit(int nr, volatile void * addr)
3081 +extern __inline__ void __change_bit(int nr, volatile void * addr)
3083 unsigned long * m = ((unsigned long *) addr) + (nr >> 6);
3086 * This operation is atomic and cannot be reordered.
3087 * It also implies a memory barrier.
3089 -static inline unsigned long test_and_set_bit(unsigned long nr,
3090 - volatile void *addr)
3091 +extern __inline__ unsigned long
3092 +test_and_set_bit(unsigned long nr, volatile void *addr)
3094 unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
3095 unsigned long temp, res;
3097 * If two examples of this operation race, one can appear to succeed
3098 * but actually fail. You must protect multiple accesses with a lock.
3100 -static inline int __test_and_set_bit(int nr, volatile void *addr)
3101 +extern __inline__ int
3102 +__test_and_set_bit(int nr, volatile void * addr)
3104 unsigned long mask, retval;
3105 long *a = (unsigned long *) addr;
3107 * This operation is atomic and cannot be reordered.
3108 * It also implies a memory barrier.
3110 -static inline unsigned long test_and_clear_bit(unsigned long nr,
3111 - volatile void *addr)
3112 +extern __inline__ unsigned long
3113 +test_and_clear_bit(unsigned long nr, volatile void *addr)
3115 unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
3116 unsigned long temp, res;
3118 * If two examples of this operation race, one can appear to succeed
3119 * but actually fail. You must protect multiple accesses with a lock.
3121 -static inline int __test_and_clear_bit(int nr, volatile void * addr)
3122 +extern __inline__ int
3123 +__test_and_clear_bit(int nr, volatile void * addr)
3125 unsigned long mask, retval;
3126 unsigned long *a = (unsigned long *) addr;
3128 * This operation is atomic and cannot be reordered.
3129 * It also implies a memory barrier.
3131 -static inline unsigned long test_and_change_bit(unsigned long nr,
3132 - volatile void *addr)
3133 +extern __inline__ unsigned long
3134 +test_and_change_bit(unsigned long nr, volatile void *addr)
3136 unsigned long *m = ((unsigned long *) addr) + (nr >> 6);
3137 unsigned long temp, res;
3139 * If two examples of this operation race, one can appear to succeed
3140 * but actually fail. You must protect multiple accesses with a lock.
3142 -static inline int __test_and_change_bit(int nr, volatile void *addr)
3143 +extern __inline__ int
3144 +__test_and_change_bit(int nr, volatile void * addr)
3146 unsigned long mask, retval;
3147 unsigned long *a = (unsigned long *) addr;
3149 * @nr: bit number to test
3150 * @addr: Address to start counting from
3152 -static inline int test_bit(int nr, volatile void * addr)
3153 +extern __inline__ unsigned long
3154 +test_bit(int nr, volatile void * addr)
3156 return 1UL & (((const volatile unsigned long *) addr)[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
3158 @@ -313,19 +321,20 @@
3160 * Undefined if no zero exists, so code should check against ~0UL first.
3162 -static __inline__ unsigned long ffz(unsigned long word)
3163 +extern __inline__ unsigned long ffz(unsigned long word)
3169 - s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
3170 - s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
3171 - s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
3172 - s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
3173 - s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
3174 - s = 1; if (word << 63 != 0) s = 0; b += s;
3176 + if (word & 0x00000000ffffffffUL) { k -= 32; word <<= 32; }
3177 + if (word & 0x0000ffff00000000UL) { k -= 16; word <<= 16; }
3178 + if (word & 0x00ff000000000000UL) { k -= 8; word <<= 8; }
3179 + if (word & 0x0f00000000000000UL) { k -= 4; word <<= 4; }
3180 + if (word & 0x3000000000000000UL) { k -= 2; word <<= 2; }
3181 + if (word & 0x4000000000000000UL) { k -= 1; }
3189 * @offset: The bitnumber to start searching at
3190 * @size: The maximum size to search
3192 -static inline unsigned long find_next_zero_bit(void *addr, unsigned long size,
3193 - unsigned long offset)
3194 +extern __inline__ unsigned long
3195 +find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
3197 unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
3198 unsigned long result = offset & ~SZLONG_MASK;
3200 #define hweight16(x) generic_hweight16(x)
3201 #define hweight8(x) generic_hweight8(x)
3203 -static inline int __test_and_set_le_bit(unsigned long nr, void * addr)
3205 +__test_and_set_le_bit(unsigned long nr, void * addr
3207 unsigned char *ADDR = (unsigned char *) addr;
3213 -static inline int __test_and_clear_le_bit(unsigned long nr, void * addr)
3215 +__test_and_clear_le_bit(unsigned long nr, void * addr)
3217 unsigned char *ADDR = (unsigned char *) addr;
3223 -static inline int test_le_bit(unsigned long nr, const void * addr)
3225 +test_le_bit(unsigned long nr, const void * addr)
3227 const unsigned char *ADDR = (const unsigned char *) addr;
3233 -static inline unsigned long find_next_zero_le_bit(void *addr,
3234 +extern inline unsigned long find_next_zero_le_bit(void *addr,
3235 unsigned long size, unsigned long offset)
3237 unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
3238 diff -urN linux-2.4.24.org/include/asm-ppc/bitops.h linux-2.4.24/include/asm-ppc/bitops.h
3239 --- linux-2.4.24.org/include/asm-ppc/bitops.h 2004-02-04 20:47:57.992461840 +0100
3240 +++ linux-2.4.24/include/asm-ppc/bitops.h 2004-02-04 20:52:53.902913289 +0100
3242 #define _PPC_BITOPS_H
3244 #include <linux/config.h>
3245 +#include <linux/compiler.h>
3246 #include <asm/byteorder.h>
3247 #include <asm/atomic.h>
3250 * These used to be if'd out here because using : "cc" as a constraint
3251 * resulted in errors from egcs. Things appear to be OK with gcc-2.95.
3253 -static __inline__ void set_bit(int nr, volatile void * addr)
3254 +static __inline__ void set_bit(int nr, volatile unsigned long * addr)
3257 unsigned long mask = 1 << (nr & 0x1f);
3260 * non-atomic version
3262 -static __inline__ void __set_bit(int nr, volatile void *addr)
3263 +static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
3265 unsigned long mask = 1 << (nr & 0x1f);
3266 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
3268 #define smp_mb__before_clear_bit() smp_mb()
3269 #define smp_mb__after_clear_bit() smp_mb()
3271 -static __inline__ void clear_bit(int nr, volatile void *addr)
3272 +static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
3275 unsigned long mask = 1 << (nr & 0x1f);
3278 * non-atomic version
3280 -static __inline__ void __clear_bit(int nr, volatile void *addr)
3281 +static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
3283 unsigned long mask = 1 << (nr & 0x1f);
3284 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
3289 -static __inline__ void change_bit(int nr, volatile void *addr)
3290 +static __inline__ void change_bit(int nr, volatile unsigned long *addr)
3293 unsigned long mask = 1 << (nr & 0x1f);
3296 * non-atomic version
3298 -static __inline__ void __change_bit(int nr, volatile void *addr)
3299 +static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
3301 unsigned long mask = 1 << (nr & 0x1f);
3302 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
3305 * test_and_*_bit do imply a memory barrier (?)
3307 -static __inline__ int test_and_set_bit(int nr, volatile void *addr)
3308 +static __inline__ int test_and_set_bit(int nr, volatile unsigned long *addr)
3310 unsigned int old, t;
3311 unsigned int mask = 1 << (nr & 0x1f);
3314 * non-atomic version
3316 -static __inline__ int __test_and_set_bit(int nr, volatile void *addr)
3317 +static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
3319 unsigned long mask = 1 << (nr & 0x1f);
3320 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
3322 return (old & mask) != 0;
3325 -static __inline__ int test_and_clear_bit(int nr, volatile void *addr)
3326 +static __inline__ int test_and_clear_bit(int nr, volatile unsigned long *addr)
3328 unsigned int old, t;
3329 unsigned int mask = 1 << (nr & 0x1f);
3332 * non-atomic version
3334 -static __inline__ int __test_and_clear_bit(int nr, volatile void *addr)
3335 +static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
3337 unsigned long mask = 1 << (nr & 0x1f);
3338 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
3340 return (old & mask) != 0;
3343 -static __inline__ int test_and_change_bit(int nr, volatile void *addr)
3344 +static __inline__ int test_and_change_bit(int nr, volatile unsigned long *addr)
3346 unsigned int old, t;
3347 unsigned int mask = 1 << (nr & 0x1f);
3350 * non-atomic version
3352 -static __inline__ int __test_and_change_bit(int nr, volatile void *addr)
3353 +static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
3355 unsigned long mask = 1 << (nr & 0x1f);
3356 unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
3358 return (old & mask) != 0;
3361 -static __inline__ int test_bit(int nr, __const__ volatile void *addr)
3362 +static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
3364 __const__ unsigned int *p = (__const__ unsigned int *) addr;
3369 /* Return the bit position of the most significant 1 bit in a word */
3370 -static __inline__ int __ilog2(unsigned int x)
3371 +static __inline__ int __ilog2(unsigned long x)
3375 @@ -234,13 +235,18 @@
3379 -static __inline__ int ffz(unsigned int x)
3380 +static __inline__ int ffz(unsigned long x)
3384 return __ilog2(x & -x);
3387 +static inline int __ffs(unsigned long x)
3389 + return __ilog2(x & -x);
3393 * ffs: find first bit set. This is defined the same way as
3394 * the libc and compiler builtin ffs routines, therefore
3395 @@ -252,6 +258,18 @@
3399 + * fls: find last (most-significant) bit set.
3400 + * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
3402 +static __inline__ int fls(unsigned int x)
3406 + asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
3411 * hweightN: returns the hamming weight (i.e. the number
3412 * of bits set) of a N-bit word
3414 @@ -261,13 +279,86 @@
3415 #define hweight8(x) generic_hweight8(x)
3418 + * Find the first bit set in a 140-bit bitmap.
3419 + * The first 100 bits are unlikely to be set.
3421 +static inline int sched_find_first_bit(unsigned long *b)
3423 + if (unlikely(b[0]))
3424 + return __ffs(b[0]);
3425 + if (unlikely(b[1]))
3426 + return __ffs(b[1]) + 32;
3427 + if (unlikely(b[2]))
3428 + return __ffs(b[2]) + 64;
3430 + return __ffs(b[3]) + 96;
3431 + return __ffs(b[4]) + 128;
3435 + * find_next_bit - find the next set bit in a memory region
3436 + * @addr: The address to base the search on
3437 + * @offset: The bitnumber to start searching at
3438 + * @size: The maximum size to search
3440 +static __inline__ unsigned long find_next_bit(unsigned long *addr,
3441 + unsigned long size, unsigned long offset)
3443 + unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
3444 + unsigned int result = offset & ~31UL;
3447 + if (offset >= size)
3453 + tmp &= ~0UL << offset;
3457 + goto found_middle;
3461 + while (size >= 32) {
3462 + if ((tmp = *p++) != 0)
3463 + goto found_middle;
3472 + tmp &= ~0UL >> (32 - size);
3473 + if (tmp == 0UL) /* Are any bits set? */
3474 + return result + size; /* Nope. */
3476 + return result + __ffs(tmp);
3480 + * find_first_bit - find the first set bit in a memory region
3481 + * @addr: The address to start the search at
3482 + * @size: The maximum size to search
3484 + * Returns the bit-number of the first set bit, not the number of the byte
3485 + * containing a bit.
3487 +#define find_first_bit(addr, size) \
3488 + find_next_bit((addr), (size), 0)
3491 * This implementation of find_{first,next}_zero_bit was stolen from
3492 * Linus' asm-alpha/bitops.h.
3494 #define find_first_zero_bit(addr, size) \
3495 find_next_zero_bit((addr), (size), 0)
3497 -static __inline__ unsigned long find_next_zero_bit(void * addr,
3498 +static __inline__ unsigned long find_next_zero_bit(unsigned long * addr,
3499 unsigned long size, unsigned long offset)
3501 unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
3506 -#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, addr)
3507 -#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, addr)
3508 +#define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 0x18, (unsigned long *)(addr))
3509 +#define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 0x18, (unsigned long *)(addr))
3511 static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
3513 diff -urN linux-2.4.24.org/include/asm-ppc/smp.h linux-2.4.24/include/asm-ppc/smp.h
3514 --- linux-2.4.24.org/include/asm-ppc/smp.h 2004-02-04 20:47:58.116436054 +0100
3515 +++ linux-2.4.24/include/asm-ppc/smp.h 2004-02-04 20:52:53.906912457 +0100
3517 #define cpu_logical_map(cpu) (cpu)
3518 #define cpu_number_map(x) (x)
3520 -#define smp_processor_id() (current->processor)
3521 +#define smp_processor_id() (current->cpu)
3523 extern int smp_hw_index[NR_CPUS];
3524 #define hard_smp_processor_id() (smp_hw_index[smp_processor_id()])
3525 diff -urN linux-2.4.24.org/include/asm-ppc64/bitops.h linux-2.4.24/include/asm-ppc64/bitops.h
3526 --- linux-2.4.24.org/include/asm-ppc64/bitops.h 2004-02-04 20:47:31.682934246 +0100
3527 +++ linux-2.4.24/include/asm-ppc64/bitops.h 2004-02-04 20:52:53.961901020 +0100
3529 #define smp_mb__before_clear_bit() smp_mb()
3530 #define smp_mb__after_clear_bit() smp_mb()
3532 -static __inline__ int test_bit(unsigned long nr, __const__ volatile void *addr)
3533 +static __inline__ int test_bit(unsigned long nr, __const__ volatile unsigned long *addr)
3535 return (1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63)));
3538 -static __inline__ void set_bit(unsigned long nr, volatile void *addr)
3539 +static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr)
3542 unsigned long mask = 1UL << (nr & 0x3f);
3547 -static __inline__ void clear_bit(unsigned long nr, volatile void *addr)
3548 +static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr)
3551 unsigned long mask = 1UL << (nr & 0x3f);
3556 -static __inline__ void change_bit(unsigned long nr, volatile void *addr)
3557 +static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr)
3560 unsigned long mask = 1UL << (nr & 0x3f);
3565 -static __inline__ int test_and_set_bit(unsigned long nr, volatile void *addr)
3566 +static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
3568 unsigned long old, t;
3569 unsigned long mask = 1UL << (nr & 0x3f);
3571 return (old & mask) != 0;
3574 -static __inline__ int test_and_clear_bit(unsigned long nr, volatile void *addr)
3575 +static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
3577 unsigned long old, t;
3578 unsigned long mask = 1UL << (nr & 0x3f);
3580 return (old & mask) != 0;
3583 -static __inline__ int test_and_change_bit(unsigned long nr, volatile void *addr)
3584 +static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
3586 unsigned long old, t;
3587 unsigned long mask = 1UL << (nr & 0x3f);
3590 * non-atomic versions
3592 -static __inline__ void __set_bit(unsigned long nr, volatile void *addr)
3593 +static __inline__ void __set_bit(unsigned long nr, volatile unsigned long *addr)
3595 unsigned long mask = 1UL << (nr & 0x3f);
3596 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
3601 -static __inline__ void __clear_bit(unsigned long nr, volatile void *addr)
3602 +static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long *addr)
3604 unsigned long mask = 1UL << (nr & 0x3f);
3605 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
3610 -static __inline__ void __change_bit(unsigned long nr, volatile void *addr)
3611 +static __inline__ void __change_bit(unsigned long nr, volatile unsigned long *addr)
3613 unsigned long mask = 1UL << (nr & 0x3f);
3614 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
3619 -static __inline__ int __test_and_set_bit(unsigned long nr, volatile void *addr)
3620 +static __inline__ int __test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
3622 unsigned long mask = 1UL << (nr & 0x3f);
3623 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
3625 return (old & mask) != 0;
3628 -static __inline__ int __test_and_clear_bit(unsigned long nr, volatile void *addr)
3629 +static __inline__ int __test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
3631 unsigned long mask = 1UL << (nr & 0x3f);
3632 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
3634 return (old & mask) != 0;
3637 -static __inline__ int __test_and_change_bit(unsigned long nr, volatile void *addr)
3638 +static __inline__ int __test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
3640 unsigned long mask = 1UL << (nr & 0x3f);
3641 unsigned long *p = ((unsigned long *)addr) + (nr >> 6);
3642 diff -urN linux-2.4.24.org/include/asm-s390/bitops.h linux-2.4.24/include/asm-s390/bitops.h
3643 --- linux-2.4.24.org/include/asm-s390/bitops.h 2004-02-04 20:48:24.809883809 +0100
3644 +++ linux-2.4.24/include/asm-s390/bitops.h 2004-02-04 20:52:53.990894989 +0100
3645 @@ -47,272 +47,217 @@
3646 extern const char _oi_bitmap[];
3647 extern const char _ni_bitmap[];
3648 extern const char _zb_findmap[];
3649 +extern const char _sb_findmap[];
3653 * SMP save set_bit routine based on compare and swap (CS)
3655 -static __inline__ void set_bit_cs(int nr, volatile void * addr)
3656 +static inline void set_bit_cs(int nr, volatile void *ptr)
3658 - unsigned long bits, mask;
3659 - __asm__ __volatile__(
3660 + unsigned long addr, old, new, mask;
3662 + addr = (unsigned long) ptr;
3664 - " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
3665 - " nr %2,%1\n" /* isolate last 2 bits of address */
3666 - " xr %1,%2\n" /* make addr % 4 == 0 */
3668 - " ar %0,%2\n" /* add alignement to bitnr */
3669 + addr ^= addr & 3; /* align address to 4 */
3670 + nr += (addr & 3) << 3; /* add alignment to bit number */
3673 - " nr %2,%0\n" /* make shift value */
3677 - " la %1,0(%0,%1)\n" /* calc. address for CS */
3678 - " sll %3,0(%2)\n" /* make OR mask */
3680 - "0: lr %2,%0\n" /* CS loop starts here */
3681 - " or %2,%3\n" /* set bit */
3682 - " cs %0,%2,0(%1)\n"
3684 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
3685 - : "cc", "memory" );
3686 + addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
3687 + mask = 1UL << (nr & 31); /* make OR mask */
3692 + " cs %0,%1,0(%4)\n"
3694 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
3695 + : "d" (mask), "a" (addr)
3700 * SMP save clear_bit routine based on compare and swap (CS)
3702 -static __inline__ void clear_bit_cs(int nr, volatile void * addr)
3703 +static inline void clear_bit_cs(int nr, volatile void *ptr)
3705 - static const int minusone = -1;
3706 - unsigned long bits, mask;
3707 - __asm__ __volatile__(
3708 + unsigned long addr, old, new, mask;
3710 + addr = (unsigned long) ptr;
3712 - " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
3713 - " nr %2,%1\n" /* isolate last 2 bits of address */
3714 - " xr %1,%2\n" /* make addr % 4 == 0 */
3716 - " ar %0,%2\n" /* add alignement to bitnr */
3717 + addr ^= addr & 3; /* align address to 4 */
3718 + nr += (addr & 3) << 3; /* add alignment to bit number */
3721 - " nr %2,%0\n" /* make shift value */
3725 - " la %1,0(%0,%1)\n" /* calc. address for CS */
3727 - " x %3,%4\n" /* make AND mask */
3729 - "0: lr %2,%0\n" /* CS loop starts here */
3730 - " nr %2,%3\n" /* clear bit */
3731 - " cs %0,%2,0(%1)\n"
3733 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask)
3734 - : "m" (minusone) : "cc", "memory" );
3735 + addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
3736 + mask = ~(1UL << (nr & 31)); /* make AND mask */
3741 + " cs %0,%1,0(%4)\n"
3743 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
3744 + : "d" (mask), "a" (addr)
3749 * SMP save change_bit routine based on compare and swap (CS)
3751 -static __inline__ void change_bit_cs(int nr, volatile void * addr)
3752 +static inline void change_bit_cs(int nr, volatile void *ptr)
3754 - unsigned long bits, mask;
3755 - __asm__ __volatile__(
3756 + unsigned long addr, old, new, mask;
3758 + addr = (unsigned long) ptr;
3760 - " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
3761 - " nr %2,%1\n" /* isolate last 2 bits of address */
3762 - " xr %1,%2\n" /* make addr % 4 == 0 */
3764 - " ar %0,%2\n" /* add alignement to bitnr */
3765 + addr ^= addr & 3; /* align address to 4 */
3766 + nr += (addr & 3) << 3; /* add alignment to bit number */
3769 - " nr %2,%0\n" /* make shift value */
3773 - " la %1,0(%0,%1)\n" /* calc. address for CS */
3774 - " sll %3,0(%2)\n" /* make XR mask */
3776 - "0: lr %2,%0\n" /* CS loop starts here */
3777 - " xr %2,%3\n" /* change bit */
3778 - " cs %0,%2,0(%1)\n"
3780 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
3781 - : "cc", "memory" );
3782 + addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
3783 + mask = 1UL << (nr & 31); /* make XOR mask */
3788 + " cs %0,%1,0(%4)\n"
3790 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
3791 + : "d" (mask), "a" (addr)
3796 * SMP save test_and_set_bit routine based on compare and swap (CS)
3798 -static __inline__ int test_and_set_bit_cs(int nr, volatile void * addr)
3799 +static inline int test_and_set_bit_cs(int nr, volatile void *ptr)
3801 - unsigned long bits, mask;
3802 - __asm__ __volatile__(
3803 + unsigned long addr, old, new, mask;
3805 + addr = (unsigned long) ptr;
3807 - " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
3808 - " nr %2,%1\n" /* isolate last 2 bits of address */
3809 - " xr %1,%2\n" /* make addr % 4 == 0 */
3811 - " ar %0,%2\n" /* add alignement to bitnr */
3812 + addr ^= addr & 3; /* align address to 4 */
3813 + nr += (addr & 3) << 3; /* add alignment to bit number */
3816 - " nr %2,%0\n" /* make shift value */
3820 - " la %1,0(%0,%1)\n" /* calc. address for CS */
3821 - " sll %3,0(%2)\n" /* make OR mask */
3823 - "0: lr %2,%0\n" /* CS loop starts here */
3824 - " or %2,%3\n" /* set bit */
3825 - " cs %0,%2,0(%1)\n"
3827 - " nr %0,%3\n" /* isolate old bit */
3828 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
3829 - : "cc", "memory" );
3831 + addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
3832 + mask = 1UL << (nr & 31); /* make OR/test mask */
3837 + " cs %0,%1,0(%4)\n"
3839 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
3840 + : "d" (mask), "a" (addr)
3842 + return (old & mask) != 0;
3846 * SMP save test_and_clear_bit routine based on compare and swap (CS)
3848 -static __inline__ int test_and_clear_bit_cs(int nr, volatile void * addr)
3849 +static inline int test_and_clear_bit_cs(int nr, volatile void *ptr)
3851 - static const int minusone = -1;
3852 - unsigned long bits, mask;
3853 - __asm__ __volatile__(
3854 + unsigned long addr, old, new, mask;
3856 + addr = (unsigned long) ptr;
3858 - " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
3859 - " nr %2,%1\n" /* isolate last 2 bits of address */
3860 - " xr %1,%2\n" /* make addr % 4 == 0 */
3862 - " ar %0,%2\n" /* add alignement to bitnr */
3863 + addr ^= addr & 3; /* align address to 4 */
3864 + nr += (addr & 3) << 3; /* add alignment to bit number */
3867 - " nr %2,%0\n" /* make shift value */
3871 - " la %1,0(%0,%1)\n" /* calc. address for CS */
3874 - " x %3,%4\n" /* make AND mask */
3875 - "0: lr %2,%0\n" /* CS loop starts here */
3876 - " nr %2,%3\n" /* clear bit */
3877 - " cs %0,%2,0(%1)\n"
3880 - " nr %0,%3\n" /* isolate old bit */
3881 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask)
3882 - : "m" (minusone) : "cc", "memory" );
3884 + addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
3885 + mask = ~(1UL << (nr & 31)); /* make AND mask */
3890 + " cs %0,%1,0(%4)\n"
3892 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
3893 + : "d" (mask), "a" (addr)
3895 + return (old ^ new) != 0;
3899 * SMP save test_and_change_bit routine based on compare and swap (CS)
3901 -static __inline__ int test_and_change_bit_cs(int nr, volatile void * addr)
3902 +static inline int test_and_change_bit_cs(int nr, volatile void *ptr)
3904 - unsigned long bits, mask;
3905 - __asm__ __volatile__(
3906 + unsigned long addr, old, new, mask;
3908 + addr = (unsigned long) ptr;
3910 - " lhi %2,3\n" /* CS must be aligned on 4 byte b. */
3911 - " nr %2,%1\n" /* isolate last 2 bits of address */
3912 - " xr %1,%2\n" /* make addr % 4 == 0 */
3914 - " ar %0,%2\n" /* add alignement to bitnr */
3915 + addr ^= addr & 3; /* align address to 4 */
3916 + nr += (addr & 3) << 3; /* add alignment to bit number */
3919 - " nr %2,%0\n" /* make shift value */
3923 - " la %1,0(%0,%1)\n" /* calc. address for CS */
3924 - " sll %3,0(%2)\n" /* make OR mask */
3926 - "0: lr %2,%0\n" /* CS loop starts here */
3927 - " xr %2,%3\n" /* change bit */
3928 - " cs %0,%2,0(%1)\n"
3930 - " nr %0,%3\n" /* isolate old bit */
3931 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
3932 - : "cc", "memory" );
3934 + addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
3935 + mask = 1UL << (nr & 31); /* make XOR mask */
3940 + " cs %0,%1,0(%4)\n"
3942 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
3943 + : "d" (mask), "a" (addr)
3945 + return (old & mask) != 0;
3947 #endif /* CONFIG_SMP */
3950 * fast, non-SMP set_bit routine
3952 -static __inline__ void __set_bit(int nr, volatile void * addr)
3953 +static inline void __set_bit(int nr, volatile void *ptr)
3955 - unsigned long reg1, reg2;
3956 - __asm__ __volatile__(
3962 - " la %1,0(%1,%3)\n"
3963 - " la %0,0(%0,%4)\n"
3964 - " oc 0(1,%1),0(%0)"
3965 - : "=&a" (reg1), "=&a" (reg2)
3966 - : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
3969 -static __inline__ void
3970 -__constant_set_bit(const int nr, volatile void * addr)
3974 - __asm__ __volatile__ ("la 1,%0\n\t"
3976 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
3977 - : : "1", "cc", "memory");
3980 - __asm__ __volatile__ ("la 1,%0\n\t"
3982 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
3983 - : : "1", "cc", "memory" );
3986 - __asm__ __volatile__ ("la 1,%0\n\t"
3988 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
3989 - : : "1", "cc", "memory" );
3992 - __asm__ __volatile__ ("la 1,%0\n\t"
3994 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
3995 - : : "1", "cc", "memory" );
3998 - __asm__ __volatile__ ("la 1,%0\n\t"
4000 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4001 - : : "1", "cc", "memory" );
4004 - __asm__ __volatile__ ("la 1,%0\n\t"
4006 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4007 - : : "1", "cc", "memory" );
4010 - __asm__ __volatile__ ("la 1,%0\n\t"
4012 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4013 - : : "1", "cc", "memory" );
4016 - __asm__ __volatile__ ("la 1,%0\n\t"
4018 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4019 - : : "1", "cc", "memory" );
4022 + unsigned long addr;
4024 + addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
4025 + asm volatile("oc 0(1,%1),0(%2)"
4026 + : "+m" (*(char *) addr)
4027 + : "a" (addr), "a" (_oi_bitmap + (nr & 7))
4032 +__constant_set_bit(const int nr, volatile void *ptr)
4034 + unsigned long addr;
4036 + addr = ((unsigned long) ptr) + ((nr >> 3) ^ 3);
4039 + asm volatile ("oi 0(%1),0x01"
4040 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4043 + asm volatile ("oi 0(%1),0x02"
4044 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4047 + asm volatile ("oi 0(%1),0x04"
4048 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4051 + asm volatile ("oi 0(%1),0x08"
4052 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4055 + asm volatile ("oi 0(%1),0x10"
4056 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4059 + asm volatile ("oi 0(%1),0x20"
4060 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4063 + asm volatile ("oi 0(%1),0x40"
4064 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4067 + asm volatile ("oi 0(%1),0x80"
4068 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4073 #define set_bit_simple(nr,addr) \
4074 @@ -323,76 +268,58 @@
4076 * fast, non-SMP clear_bit routine
4078 -static __inline__ void
4079 -__clear_bit(int nr, volatile void * addr)
4081 +__clear_bit(int nr, volatile void *ptr)
4083 - unsigned long reg1, reg2;
4084 - __asm__ __volatile__(
4090 - " la %1,0(%1,%3)\n"
4091 - " la %0,0(%0,%4)\n"
4092 - " nc 0(1,%1),0(%0)"
4093 - : "=&a" (reg1), "=&a" (reg2)
4094 - : "r" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
4097 -static __inline__ void
4098 -__constant_clear_bit(const int nr, volatile void * addr)
4102 - __asm__ __volatile__ ("la 1,%0\n\t"
4104 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4105 - : : "1", "cc", "memory" );
4108 - __asm__ __volatile__ ("la 1,%0\n\t"
4110 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4111 - : : "1", "cc", "memory" );
4114 - __asm__ __volatile__ ("la 1,%0\n\t"
4116 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4117 - : : "1", "cc", "memory" );
4120 - __asm__ __volatile__ ("la 1,%0\n\t"
4122 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4123 - : : "1", "cc", "memory" );
4126 - __asm__ __volatile__ ("la 1,%0\n\t"
4128 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4129 - : : "cc", "memory" );
4132 - __asm__ __volatile__ ("la 1,%0\n\t"
4134 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4135 - : : "1", "cc", "memory" );
4138 - __asm__ __volatile__ ("la 1,%0\n\t"
4140 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4141 - : : "1", "cc", "memory" );
4144 - __asm__ __volatile__ ("la 1,%0\n\t"
4146 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4147 - : : "1", "cc", "memory" );
4150 + unsigned long addr;
4152 + addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
4153 + asm volatile("nc 0(1,%1),0(%2)"
4154 + : "+m" (*(char *) addr)
4155 + : "a" (addr), "a" (_ni_bitmap + (nr & 7))
4160 +__constant_clear_bit(const int nr, volatile void *ptr)
4162 + unsigned long addr;
4164 + addr = ((unsigned long) ptr) + ((nr >> 3) ^ 3);
4167 + asm volatile ("ni 0(%1),0xFE"
4168 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4171 + asm volatile ("ni 0(%1),0xFD"
4172 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4175 + asm volatile ("ni 0(%1),0xFB"
4176 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4179 + asm volatile ("ni 0(%1),0xF7"
4180 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4183 + asm volatile ("ni 0(%1),0xEF"
4184 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4187 + asm volatile ("ni 0(%1),0xDF"
4188 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4191 + asm volatile ("ni 0(%1),0xBF"
4192 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4195 + asm volatile ("ni 0(%1),0x7F"
4196 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4201 #define clear_bit_simple(nr,addr) \
4202 @@ -403,75 +330,57 @@
4204 * fast, non-SMP change_bit routine
4206 -static __inline__ void __change_bit(int nr, volatile void * addr)
4207 +static inline void __change_bit(int nr, volatile void *ptr)
4209 - unsigned long reg1, reg2;
4210 - __asm__ __volatile__(
4216 - " la %1,0(%1,%3)\n"
4217 - " la %0,0(%0,%4)\n"
4218 - " xc 0(1,%1),0(%0)"
4219 - : "=&a" (reg1), "=&a" (reg2)
4220 - : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
4223 -static __inline__ void
4224 -__constant_change_bit(const int nr, volatile void * addr)
4228 - __asm__ __volatile__ ("la 1,%0\n\t"
4230 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4231 - : : "cc", "memory" );
4234 - __asm__ __volatile__ ("la 1,%0\n\t"
4236 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4237 - : : "cc", "memory" );
4240 - __asm__ __volatile__ ("la 1,%0\n\t"
4242 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4243 - : : "cc", "memory" );
4246 - __asm__ __volatile__ ("la 1,%0\n\t"
4248 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4249 - : : "cc", "memory" );
4252 - __asm__ __volatile__ ("la 1,%0\n\t"
4254 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4255 - : : "cc", "memory" );
4258 - __asm__ __volatile__ ("la 1,%0\n\t"
4260 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4261 - : : "1", "cc", "memory" );
4264 - __asm__ __volatile__ ("la 1,%0\n\t"
4266 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4267 - : : "1", "cc", "memory" );
4270 - __asm__ __volatile__ ("la 1,%0\n\t"
4272 - : "=m" (*((volatile char *) addr + ((nr>>3)^3)))
4273 - : : "1", "cc", "memory" );
4276 + unsigned long addr;
4278 + addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
4279 + asm volatile("xc 0(1,%1),0(%2)"
4280 + : "+m" (*(char *) addr)
4281 + : "a" (addr), "a" (_oi_bitmap + (nr & 7))
4286 +__constant_change_bit(const int nr, volatile void *ptr)
4288 + unsigned long addr;
4290 + addr = ((unsigned long) ptr) + ((nr >> 3) ^ 3);
4293 + asm volatile ("xi 0(%1),0x01"
4294 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4297 + asm volatile ("xi 0(%1),0x02"
4298 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4301 + asm volatile ("xi 0(%1),0x04"
4302 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4305 + asm volatile ("xi 0(%1),0x08"
4306 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4309 + asm volatile ("xi 0(%1),0x10"
4310 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4313 + asm volatile ("xi 0(%1),0x20"
4314 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4317 + asm volatile ("xi 0(%1),0x40"
4318 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4321 + asm volatile ("xi 0(%1),0x80"
4322 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
4327 #define change_bit_simple(nr,addr) \
4328 @@ -482,74 +391,54 @@
4330 * fast, non-SMP test_and_set_bit routine
4332 -static __inline__ int test_and_set_bit_simple(int nr, volatile void * addr)
4333 +static inline int test_and_set_bit_simple(int nr, volatile void *ptr)
4335 - unsigned long reg1, reg2;
4337 - __asm__ __volatile__(
4343 - " la %1,0(%1,%4)\n"
4346 - " la %2,0(%2,%5)\n"
4347 - " oc 0(1,%1),0(%2)"
4348 - : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
4349 - : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
4350 - return oldbit & 1;
4351 + unsigned long addr;
4354 + addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
4355 + ch = *(unsigned char *) addr;
4356 + asm volatile("oc 0(1,%1),0(%2)"
4357 + : "+m" (*(char *) addr)
4358 + : "a" (addr), "a" (_oi_bitmap + (nr & 7))
4360 + return (ch >> (nr & 7)) & 1;
4362 #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
4365 * fast, non-SMP test_and_clear_bit routine
4367 -static __inline__ int test_and_clear_bit_simple(int nr, volatile void * addr)
4368 +static inline int test_and_clear_bit_simple(int nr, volatile void *ptr)
4370 - unsigned long reg1, reg2;
4372 + unsigned long addr;
4375 - __asm__ __volatile__(
4381 - " la %1,0(%1,%4)\n"
4384 - " la %2,0(%2,%5)\n"
4385 - " nc 0(1,%1),0(%2)"
4386 - : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
4387 - : "r" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
4388 - return oldbit & 1;
4389 + addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
4390 + ch = *(unsigned char *) addr;
4391 + asm volatile("nc 0(1,%1),0(%2)"
4392 + : "+m" (*(char *) addr)
4393 + : "a" (addr), "a" (_ni_bitmap + (nr & 7))
4395 + return (ch >> (nr & 7)) & 1;
4397 #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
4400 * fast, non-SMP test_and_change_bit routine
4402 -static __inline__ int test_and_change_bit_simple(int nr, volatile void * addr)
4403 +static inline int test_and_change_bit_simple(int nr, volatile void *ptr)
4405 - unsigned long reg1, reg2;
4407 + unsigned long addr;
4410 - __asm__ __volatile__(
4416 - " la %1,0(%1,%4)\n"
4419 - " la %2,0(%2,%5)\n"
4420 - " xc 0(1,%1),0(%2)"
4421 - : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
4422 - : "r" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
4423 - return oldbit & 1;
4424 + addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
4425 + ch = *(unsigned char *) addr;
4426 + asm volatile("xc 0(1,%1),0(%2)"
4427 + : "+m" (*(char *) addr)
4428 + : "a" (addr), "a" (_oi_bitmap + (nr & 7))
4430 + return (ch >> (nr & 7)) & 1;
4432 #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
4434 @@ -574,25 +463,17 @@
4435 * This routine doesn't need to be atomic.
4438 -static __inline__ int __test_bit(int nr, volatile void * addr)
4439 +static inline int __test_bit(int nr, volatile void *ptr)
4441 - unsigned long reg1, reg2;
4443 + unsigned long addr;
4446 - __asm__ __volatile__(
4452 - " ic %0,0(%2,%4)\n"
4454 - : "=d&" (oldbit), "=&a" (reg1), "=&a" (reg2)
4455 - : "r" (nr), "a" (addr) : "cc" );
4456 - return oldbit & 1;
4457 + addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
4458 + ch = *(unsigned char *) addr;
4459 + return (ch >> (nr & 7)) & 1;
4462 -static __inline__ int __constant_test_bit(int nr, volatile void * addr) {
4463 +static inline int __constant_test_bit(int nr, volatile void * addr) {
4464 return (((volatile char *) addr)[(nr>>3)^3] & (1<<(nr&7))) != 0;
4469 * Find-bit routines..
4471 -static __inline__ int find_first_zero_bit(void * addr, unsigned size)
4472 +static inline int find_first_zero_bit(void * addr, unsigned size)
4474 unsigned long cmp, count;
4476 @@ -642,7 +523,45 @@
4477 return (res < size) ? res : size;
4480 -static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
4481 +static inline int find_first_bit(void * addr, unsigned size)
4483 + unsigned long cmp, count;
4488 + __asm__(" slr %1,%1\n"
4493 + "0: c %1,0(%0,%4)\n"
4499 + "1: l %2,0(%0,%4)\n"
4502 + " tml %2,0xffff\n"
4506 + "2: tml %2,0x00ff\n"
4511 + " ic %2,0(%2,%5)\n"
4514 + : "=&a" (res), "=&d" (cmp), "=&a" (count)
4515 + : "a" (size), "a" (addr), "a" (&_sb_findmap) : "cc" );
4516 + return (res < size) ? res : size;
4519 +static inline int find_next_zero_bit (void * addr, int size, int offset)
4521 unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
4522 unsigned long bitvec, reg;
4523 @@ -680,11 +599,49 @@
4524 return (offset + res);
4527 +static inline int find_next_bit (void * addr, int size, int offset)
4529 + unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
4530 + unsigned long bitvec, reg;
4531 + int set, bit = offset & 31, res;
4535 + * Look for set bit in first word
4537 + bitvec = (*p) >> bit;
4538 + __asm__(" slr %0,%0\n"
4540 + " tml %1,0xffff\n"
4544 + "0: tml %1,0x00ff\n"
4549 + " ic %1,0(%1,%3)\n"
4551 + : "=&d" (set), "+a" (bitvec), "=&d" (reg)
4552 + : "a" (&_sb_findmap) : "cc" );
4553 + if (set < (32 - bit))
4554 + return set + offset;
4555 + offset += 32 - bit;
4559 + * No set bit yet, search remaining full words for a bit
4561 + res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr));
4562 + return (offset + res);
4566 * ffz = Find First Zero in word. Undefined if no zero exists,
4567 * so code should check against ~0UL first..
4569 -static __inline__ unsigned long ffz(unsigned long word)
4570 +static inline unsigned long ffz(unsigned long word)
4574 @@ -708,40 +665,109 @@
4578 + * __ffs = find first bit in word. Undefined if no bit exists,
4579 + * so code should check against 0UL first..
4581 +static inline unsigned long __ffs(unsigned long word)
4583 + unsigned long reg, result;
4585 + __asm__(" slr %0,%0\n"
4587 + " tml %1,0xffff\n"
4591 + "0: tml %1,0x00ff\n"
4596 + " ic %1,0(%1,%3)\n"
4598 + : "=&d" (result), "+a" (word), "=&d" (reg)
4599 + : "a" (&_sb_findmap) : "cc" );
4604 + * Every architecture must define this function. It's the fastest
4605 + * way of searching a 140-bit bitmap where the first 100 bits are
4606 + * unlikely to be set. It's guaranteed that at least one of the 140
4607 + * bits is cleared.
4609 +static inline int sched_find_first_bit(unsigned long *b)
4611 + return find_first_bit(b, 140);
4615 * ffs: find first bit set. This is defined the same way as
4616 * the libc and compiler builtin ffs routines, therefore
4617 * differs in spirit from the above ffz (man ffs).
4620 -extern int __inline__ ffs (int x)
4621 +extern int inline ffs (int x)
4628 - __asm__(" slr %0,%0\n"
4629 - " tml %1,0xffff\n"
4631 + __asm__(" tml %1,0xffff\n"
4636 "0: tml %1,0x00ff\n"
4641 "1: tml %1,0x000f\n"
4646 "2: tml %1,0x0003\n"
4651 "3: tml %1,0x0001\n"
4655 : "=&d" (r), "+d" (x) : : "cc" );
4661 + * fls: find last bit set.
4663 +extern __inline__ int fls(int x)
4669 + __asm__(" tmh %1,0xffff\n"
4673 + "0: tmh %1,0xff00\n"
4677 + "1: tmh %1,0xf000\n"
4681 + "2: tmh %1,0xc000\n"
4685 + "3: tmh %1,0x8000\n"
4689 + : "+d" (r), "+d" (x) : : "cc" );
4695 #define ext2_set_bit(nr, addr) test_and_set_bit((nr)^24, addr)
4696 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr)^24, addr)
4697 #define ext2_test_bit(nr, addr) test_bit((nr)^24, addr)
4698 -static __inline__ int ext2_find_first_zero_bit(void *vaddr, unsigned size)
4699 +static inline int ext2_find_first_zero_bit(void *vaddr, unsigned size)
4701 unsigned long cmp, count;
4704 return (res < size) ? res : size;
4707 -static __inline__ int
4709 ext2_find_next_zero_bit(void *vaddr, unsigned size, unsigned offset)
4711 unsigned long *addr = vaddr;
4712 diff -urN linux-2.4.24.org/include/asm-s390x/bitops.h linux-2.4.24/include/asm-s390x/bitops.h
4713 --- linux-2.4.24.org/include/asm-s390x/bitops.h 2004-02-04 20:48:28.470122479 +0100
4714 +++ linux-2.4.24/include/asm-s390x/bitops.h 2004-02-04 20:52:54.030886671 +0100
4715 @@ -51,271 +51,220 @@
4716 extern const char _oi_bitmap[];
4717 extern const char _ni_bitmap[];
4718 extern const char _zb_findmap[];
4719 +extern const char _sb_findmap[];
4723 * SMP save set_bit routine based on compare and swap (CS)
4725 -static __inline__ void set_bit_cs(unsigned long nr, volatile void * addr)
4726 +static inline void set_bit_cs(unsigned long nr, volatile void *ptr)
4728 - unsigned long bits, mask;
4729 - __asm__ __volatile__(
4730 + unsigned long addr, old, new, mask;
4732 + addr = (unsigned long) ptr;
4734 - " lghi %2,7\n" /* CS must be aligned on 4 byte b. */
4735 - " ngr %2,%1\n" /* isolate last 2 bits of address */
4736 - " xgr %1,%2\n" /* make addr % 4 == 0 */
4738 - " agr %0,%2\n" /* add alignement to bitnr */
4739 + addr ^= addr & 7; /* align address to 8 */
4740 + nr += (addr & 7) << 3; /* add alignment to bit number */
4743 - " nr %2,%0\n" /* make shift value */
4747 - " la %1,0(%0,%1)\n" /* calc. address for CS */
4748 - " sllg %3,%3,0(%2)\n" /* make OR mask */
4750 - "0: lgr %2,%0\n" /* CS loop starts here */
4751 - " ogr %2,%3\n" /* set bit */
4752 - " csg %0,%2,0(%1)\n"
4754 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
4755 - : "cc", "memory" );
4756 + addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
4757 + mask = 1UL << (nr & 63); /* make OR mask */
4762 + " csg %0,%1,0(%4)\n"
4764 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
4765 + : "d" (mask), "a" (addr)
4770 * SMP save clear_bit routine based on compare and swap (CS)
4772 -static __inline__ void clear_bit_cs(unsigned long nr, volatile void * addr)
4773 +static inline void clear_bit_cs(unsigned long nr, volatile void *ptr)
4775 - unsigned long bits, mask;
4776 - __asm__ __volatile__(
4777 + unsigned long addr, old, new, mask;
4779 + addr = (unsigned long) ptr;
4781 - " lghi %2,7\n" /* CS must be aligned on 4 byte b. */
4782 - " ngr %2,%1\n" /* isolate last 2 bits of address */
4783 - " xgr %1,%2\n" /* make addr % 4 == 0 */
4785 - " agr %0,%2\n" /* add alignement to bitnr */
4786 + addr ^= addr & 7; /* align address to 8 */
4787 + nr += (addr & 7) << 3; /* add alignment to bit number */
4790 - " nr %2,%0\n" /* make shift value */
4794 - " la %1,0(%0,%1)\n" /* calc. address for CS */
4796 - " rllg %3,%3,0(%2)\n" /* make AND mask */
4798 - "0: lgr %2,%0\n" /* CS loop starts here */
4799 - " ngr %2,%3\n" /* clear bit */
4800 - " csg %0,%2,0(%1)\n"
4802 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
4803 - : "cc", "memory" );
4804 + addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
4805 + mask = ~(1UL << (nr & 63)); /* make AND mask */
4810 + " csg %0,%1,0(%4)\n"
4812 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
4813 + : "d" (mask), "a" (addr)
4818 * SMP save change_bit routine based on compare and swap (CS)
4820 -static __inline__ void change_bit_cs(unsigned long nr, volatile void * addr)
4821 +static inline void change_bit_cs(unsigned long nr, volatile void *ptr)
4823 - unsigned long bits, mask;
4824 - __asm__ __volatile__(
4825 + unsigned long addr, old, new, mask;
4827 + addr = (unsigned long) ptr;
4829 - " lghi %2,7\n" /* CS must be aligned on 4 byte b. */
4830 - " ngr %2,%1\n" /* isolate last 2 bits of address */
4831 - " xgr %1,%2\n" /* make addr % 4 == 0 */
4833 - " agr %0,%2\n" /* add alignement to bitnr */
4834 + addr ^= addr & 7; /* align address to 8 */
4835 + nr += (addr & 7) << 3; /* add alignment to bit number */
4838 - " nr %2,%0\n" /* make shift value */
4842 - " la %1,0(%0,%1)\n" /* calc. address for CS */
4843 - " sllg %3,%3,0(%2)\n" /* make XR mask */
4845 - "0: lgr %2,%0\n" /* CS loop starts here */
4846 - " xgr %2,%3\n" /* change bit */
4847 - " csg %0,%2,0(%1)\n"
4849 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
4850 - : "cc", "memory" );
4851 + addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
4852 + mask = 1UL << (nr & 63); /* make XOR mask */
4857 + " csg %0,%1,0(%4)\n"
4859 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
4860 + : "d" (mask), "a" (addr)
4865 * SMP save test_and_set_bit routine based on compare and swap (CS)
4867 -static __inline__ int
4868 -test_and_set_bit_cs(unsigned long nr, volatile void * addr)
4870 +test_and_set_bit_cs(unsigned long nr, volatile void *ptr)
4872 - unsigned long bits, mask;
4873 - __asm__ __volatile__(
4874 + unsigned long addr, old, new, mask;
4876 + addr = (unsigned long) ptr;
4878 - " lghi %2,7\n" /* CS must be aligned on 4 byte b. */
4879 - " ngr %2,%1\n" /* isolate last 2 bits of address */
4880 - " xgr %1,%2\n" /* make addr % 4 == 0 */
4882 - " agr %0,%2\n" /* add alignement to bitnr */
4883 + addr ^= addr & 7; /* align address to 8 */
4884 + nr += (addr & 7) << 3; /* add alignment to bit number */
4887 - " nr %2,%0\n" /* make shift value */
4891 - " la %1,0(%0,%1)\n" /* calc. address for CS */
4892 - " sllg %3,%3,0(%2)\n" /* make OR mask */
4894 - "0: lgr %2,%0\n" /* CS loop starts here */
4895 - " ogr %2,%3\n" /* set bit */
4896 - " csg %0,%2,0(%1)\n"
4898 - " ngr %0,%3\n" /* isolate old bit */
4899 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
4900 - : "cc", "memory" );
4902 + addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
4903 + mask = 1UL << (nr & 63); /* make OR/test mask */
4908 + " csg %0,%1,0(%4)\n"
4910 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
4911 + : "d" (mask), "a" (addr)
4913 + return (old & mask) != 0;
4917 * SMP save test_and_clear_bit routine based on compare and swap (CS)
4919 -static __inline__ int
4920 -test_and_clear_bit_cs(unsigned long nr, volatile void * addr)
4922 +test_and_clear_bit_cs(unsigned long nr, volatile void *ptr)
4924 - unsigned long bits, mask;
4925 - __asm__ __volatile__(
4926 + unsigned long addr, old, new, mask;
4928 + addr = (unsigned long) ptr;
4930 - " lghi %2,7\n" /* CS must be aligned on 4 byte b. */
4931 - " ngr %2,%1\n" /* isolate last 2 bits of address */
4932 - " xgr %1,%2\n" /* make addr % 4 == 0 */
4934 - " agr %0,%2\n" /* add alignement to bitnr */
4935 + addr ^= addr & 7; /* align address to 8 */
4936 + nr += (addr & 7) << 3; /* add alignment to bit number */
4939 - " nr %2,%0\n" /* make shift value */
4943 - " la %1,0(%0,%1)\n" /* calc. address for CS */
4944 - " rllg %3,%3,0(%2)\n" /* make AND mask */
4946 - "0: lgr %2,%0\n" /* CS loop starts here */
4947 - " ngr %2,%3\n" /* clear bit */
4948 - " csg %0,%2,0(%1)\n"
4950 - " xgr %0,%2\n" /* isolate old bit */
4951 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
4952 - : "cc", "memory" );
4954 + addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
4955 + mask = ~(1UL << (nr & 63)); /* make AND mask */
4960 + " csg %0,%1,0(%4)\n"
4962 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
4963 + : "d" (mask), "a" (addr)
4965 + return (old ^ new) != 0;
4969 * SMP save test_and_change_bit routine based on compare and swap (CS)
4971 -static __inline__ int
4972 -test_and_change_bit_cs(unsigned long nr, volatile void * addr)
4974 +test_and_change_bit_cs(unsigned long nr, volatile void *ptr)
4976 - unsigned long bits, mask;
4977 - __asm__ __volatile__(
4978 + unsigned long addr, old, new, mask;
4980 + addr = (unsigned long) ptr;
4982 - " lghi %2,7\n" /* CS must be aligned on 4 byte b. */
4983 - " ngr %2,%1\n" /* isolate last 2 bits of address */
4984 - " xgr %1,%2\n" /* make addr % 4 == 0 */
4986 - " agr %0,%2\n" /* add alignement to bitnr */
4987 + addr ^= addr & 7; /* align address to 8 */
4988 + nr += (addr & 7) << 3; /* add alignment to bit number */
4991 - " nr %2,%0\n" /* make shift value */
4995 - " la %1,0(%0,%1)\n" /* calc. address for CS */
4996 - " sllg %3,%3,0(%2)\n" /* make OR mask */
4998 - "0: lgr %2,%0\n" /* CS loop starts here */
4999 - " xgr %2,%3\n" /* change bit */
5000 - " csg %0,%2,0(%1)\n"
5002 - " ngr %0,%3\n" /* isolate old bit */
5003 - : "+a" (nr), "+a" (addr), "=&a" (bits), "=&d" (mask) :
5004 - : "cc", "memory" );
5006 + addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
5007 + mask = 1UL << (nr & 63); /* make XOR mask */
5012 + " csg %0,%1,0(%4)\n"
5014 + : "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
5015 + : "d" (mask), "a" (addr)
5017 + return (old & mask) != 0;
5019 #endif /* CONFIG_SMP */
5022 * fast, non-SMP set_bit routine
5024 -static __inline__ void __set_bit(unsigned long nr, volatile void * addr)
5025 +static inline void __set_bit(unsigned long nr, volatile void *ptr)
5027 - unsigned long reg1, reg2;
5028 - __asm__ __volatile__(
5034 - " la %1,0(%1,%3)\n"
5035 - " la %0,0(%0,%4)\n"
5036 - " oc 0(1,%1),0(%0)"
5037 - : "=&a" (reg1), "=&a" (reg2)
5038 - : "a" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
5041 -static __inline__ void
5042 -__constant_set_bit(const unsigned long nr, volatile void * addr)
5046 - __asm__ __volatile__ ("la 1,%0\n\t"
5048 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5049 - : : "1", "cc", "memory");
5052 - __asm__ __volatile__ ("la 1,%0\n\t"
5054 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5055 - : : "1", "cc", "memory" );
5058 - __asm__ __volatile__ ("la 1,%0\n\t"
5060 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5061 - : : "1", "cc", "memory" );
5064 - __asm__ __volatile__ ("la 1,%0\n\t"
5066 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5067 - : : "1", "cc", "memory" );
5070 - __asm__ __volatile__ ("la 1,%0\n\t"
5072 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5073 - : : "1", "cc", "memory" );
5076 - __asm__ __volatile__ ("la 1,%0\n\t"
5078 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5079 - : : "1", "cc", "memory" );
5082 - __asm__ __volatile__ ("la 1,%0\n\t"
5084 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5085 - : : "1", "cc", "memory" );
5088 - __asm__ __volatile__ ("la 1,%0\n\t"
5090 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5091 - : : "1", "cc", "memory" );
5094 + unsigned long addr;
5096 + addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
5097 + asm volatile("oc 0(1,%1),0(%2)"
5098 + : "+m" (*(char *) addr)
5099 + : "a" (addr), "a" (_oi_bitmap + (nr & 7))
5104 +__constant_set_bit(const unsigned long nr, volatile void *ptr)
5106 + unsigned long addr;
5108 + addr = ((unsigned long) ptr) + ((nr >> 3) ^ 7);
5111 + asm volatile ("oi 0(%1),0x01"
5112 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5115 + asm volatile ("oi 0(%1),0x02"
5116 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5119 + asm volatile ("oi 0(%1),0x04"
5120 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5123 + asm volatile ("oi 0(%1),0x08"
5124 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5127 + asm volatile ("oi 0(%1),0x10"
5128 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5131 + asm volatile ("oi 0(%1),0x20"
5132 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5135 + asm volatile ("oi 0(%1),0x40"
5136 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5139 + asm volatile ("oi 0(%1),0x80"
5140 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5145 #define set_bit_simple(nr,addr) \
5146 @@ -326,76 +275,58 @@
5148 * fast, non-SMP clear_bit routine
5150 -static __inline__ void
5151 -__clear_bit(unsigned long nr, volatile void * addr)
5153 +__clear_bit(unsigned long nr, volatile void *ptr)
5155 - unsigned long reg1, reg2;
5156 - __asm__ __volatile__(
5162 - " la %1,0(%1,%3)\n"
5163 - " la %0,0(%0,%4)\n"
5164 - " nc 0(1,%1),0(%0)"
5165 - : "=&a" (reg1), "=&a" (reg2)
5166 - : "d" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
5169 -static __inline__ void
5170 -__constant_clear_bit(const unsigned long nr, volatile void * addr)
5174 - __asm__ __volatile__ ("la 1,%0\n\t"
5176 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5177 - : : "1", "cc", "memory" );
5180 - __asm__ __volatile__ ("la 1,%0\n\t"
5182 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5183 - : : "1", "cc", "memory" );
5186 - __asm__ __volatile__ ("la 1,%0\n\t"
5188 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5189 - : : "1", "cc", "memory" );
5192 - __asm__ __volatile__ ("la 1,%0\n\t"
5194 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5195 - : : "1", "cc", "memory" );
5198 - __asm__ __volatile__ ("la 1,%0\n\t"
5200 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5201 - : : "cc", "memory" );
5204 - __asm__ __volatile__ ("la 1,%0\n\t"
5206 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5207 - : : "1", "cc", "memory" );
5210 - __asm__ __volatile__ ("la 1,%0\n\t"
5212 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5213 - : : "1", "cc", "memory" );
5216 - __asm__ __volatile__ ("la 1,%0\n\t"
5218 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5219 - : : "1", "cc", "memory" );
5222 + unsigned long addr;
5224 + addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
5225 + asm volatile("nc 0(1,%1),0(%2)"
5226 + : "+m" (*(char *) addr)
5227 + : "a" (addr), "a" (_ni_bitmap + (nr & 7))
5232 +__constant_clear_bit(const unsigned long nr, volatile void *ptr)
5234 + unsigned long addr;
5236 + addr = ((unsigned long) ptr) + ((nr >> 3) ^ 7);
5239 + asm volatile ("ni 0(%1),0xFE"
5240 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5243 + asm volatile ("ni 0(%1),0xFD"
5244 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5247 + asm volatile ("ni 0(%1),0xFB"
5248 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5251 + asm volatile ("ni 0(%1),0xF7"
5252 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5255 + asm volatile ("ni 0(%1),0xEF"
5256 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5259 + asm volatile ("ni 0(%1),0xDF"
5260 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5263 + asm volatile ("ni 0(%1),0xBF"
5264 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5267 + asm volatile ("ni 0(%1),0x7F"
5268 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5273 #define clear_bit_simple(nr,addr) \
5274 @@ -406,75 +337,57 @@
5276 * fast, non-SMP change_bit routine
5278 -static __inline__ void __change_bit(unsigned long nr, volatile void * addr)
5279 +static inline void __change_bit(unsigned long nr, volatile void *ptr)
5281 - unsigned long reg1, reg2;
5282 - __asm__ __volatile__(
5288 - " la %1,0(%1,%3)\n"
5289 - " la %0,0(%0,%4)\n"
5290 - " xc 0(1,%1),0(%0)"
5291 - : "=&a" (reg1), "=&a" (reg2)
5292 - : "d" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
5295 -static __inline__ void
5296 -__constant_change_bit(const unsigned long nr, volatile void * addr)
5300 - __asm__ __volatile__ ("la 1,%0\n\t"
5302 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5303 - : : "cc", "memory" );
5306 - __asm__ __volatile__ ("la 1,%0\n\t"
5308 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5309 - : : "cc", "memory" );
5312 - __asm__ __volatile__ ("la 1,%0\n\t"
5314 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5315 - : : "cc", "memory" );
5318 - __asm__ __volatile__ ("la 1,%0\n\t"
5320 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5321 - : : "cc", "memory" );
5324 - __asm__ __volatile__ ("la 1,%0\n\t"
5326 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5327 - : : "cc", "memory" );
5330 - __asm__ __volatile__ ("la 1,%0\n\t"
5332 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5333 - : : "1", "cc", "memory" );
5336 - __asm__ __volatile__ ("la 1,%0\n\t"
5338 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5339 - : : "1", "cc", "memory" );
5342 - __asm__ __volatile__ ("la 1,%0\n\t"
5344 - : "=m" (*((volatile char *) addr + ((nr>>3)^7)))
5345 - : : "1", "cc", "memory" );
5348 + unsigned long addr;
5350 + addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
5351 + asm volatile("xc 0(1,%1),0(%2)"
5352 + : "+m" (*(char *) addr)
5353 + : "a" (addr), "a" (_oi_bitmap + (nr & 7))
5358 +__constant_change_bit(const unsigned long nr, volatile void *ptr)
5360 + unsigned long addr;
5362 + addr = ((unsigned long) ptr) + ((nr >> 3) ^ 7);
5365 + asm volatile ("xi 0(%1),0x01"
5366 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5369 + asm volatile ("xi 0(%1),0x02"
5370 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5373 + asm volatile ("xi 0(%1),0x04"
5374 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5377 + asm volatile ("xi 0(%1),0x08"
5378 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5381 + asm volatile ("xi 0(%1),0x10"
5382 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5385 + asm volatile ("xi 0(%1),0x20"
5386 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5389 + asm volatile ("xi 0(%1),0x40"
5390 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5393 + asm volatile ("xi 0(%1),0x80"
5394 + : "+m" (*(char *) addr) : "a" (addr) : "cc" );
5399 #define change_bit_simple(nr,addr) \
5400 @@ -485,77 +398,57 @@
5402 * fast, non-SMP test_and_set_bit routine
5404 -static __inline__ int
5405 -test_and_set_bit_simple(unsigned long nr, volatile void * addr)
5407 +test_and_set_bit_simple(unsigned long nr, volatile void *ptr)
5409 - unsigned long reg1, reg2;
5411 - __asm__ __volatile__(
5417 - " la %1,0(%1,%4)\n"
5420 - " la %2,0(%2,%5)\n"
5421 - " oc 0(1,%1),0(%2)"
5422 - : "=&d" (oldbit), "=&a" (reg1), "=&a" (reg2)
5423 - : "d" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
5424 - return oldbit & 1;
5425 + unsigned long addr;
5428 + addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
5429 + ch = *(unsigned char *) addr;
5430 + asm volatile("oc 0(1,%1),0(%2)"
5431 + : "+m" (*(char *) addr)
5432 + : "a" (addr), "a" (_oi_bitmap + (nr & 7))
5434 + return (ch >> (nr & 7)) & 1;
5436 #define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
5439 * fast, non-SMP test_and_clear_bit routine
5441 -static __inline__ int
5442 -test_and_clear_bit_simple(unsigned long nr, volatile void * addr)
5444 +test_and_clear_bit_simple(unsigned long nr, volatile void *ptr)
5446 - unsigned long reg1, reg2;
5448 + unsigned long addr;
5451 - __asm__ __volatile__(
5457 - " la %1,0(%1,%4)\n"
5460 - " la %2,0(%2,%5)\n"
5461 - " nc 0(1,%1),0(%2)"
5462 - : "=&d" (oldbit), "=&a" (reg1), "=&a" (reg2)
5463 - : "d" (nr), "a" (addr), "a" (&_ni_bitmap) : "cc", "memory" );
5464 - return oldbit & 1;
5465 + addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
5466 + ch = *(unsigned char *) addr;
5467 + asm volatile("nc 0(1,%1),0(%2)"
5468 + : "+m" (*(char *) addr)
5469 + : "a" (addr), "a" (_ni_bitmap + (nr & 7))
5471 + return (ch >> (nr & 7)) & 1;
5473 #define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
5476 * fast, non-SMP test_and_change_bit routine
5478 -static __inline__ int
5479 -test_and_change_bit_simple(unsigned long nr, volatile void * addr)
5481 +test_and_change_bit_simple(unsigned long nr, volatile void *ptr)
5483 - unsigned long reg1, reg2;
5485 + unsigned long addr;
5488 - __asm__ __volatile__(
5494 - " la %1,0(%1,%4)\n"
5497 - " la %2,0(%2,%5)\n"
5498 - " xc 0(1,%1),0(%2)"
5499 - : "=&d" (oldbit), "=&a" (reg1), "=&a" (reg2)
5500 - : "d" (nr), "a" (addr), "a" (&_oi_bitmap) : "cc", "memory" );
5501 - return oldbit & 1;
5502 + addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
5503 + ch = *(unsigned char *) addr;
5504 + asm volatile("xc 0(1,%1),0(%2)"
5505 + : "+m" (*(char *) addr)
5506 + : "a" (addr), "a" (_oi_bitmap + (nr & 7))
5508 + return (ch >> (nr & 7)) & 1;
5510 #define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
5512 @@ -580,26 +473,18 @@
5513 * This routine doesn't need to be atomic.
5516 -static __inline__ int __test_bit(unsigned long nr, volatile void * addr)
5517 +static inline int __test_bit(unsigned long nr, volatile void *ptr)
5519 - unsigned long reg1, reg2;
5521 + unsigned long addr;
5524 - __asm__ __volatile__(
5530 - " ic %0,0(%2,%4)\n"
5532 - : "=&d" (oldbit), "=&a" (reg1), "=&a" (reg2)
5533 - : "d" (nr), "a" (addr) : "cc" );
5534 - return oldbit & 1;
5535 + addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
5536 + ch = *(unsigned char *) addr;
5537 + return (ch >> (nr & 7)) & 1;
5540 -static __inline__ int
5541 -__constant_test_bit(unsigned long nr, volatile void * addr) {
5543 +__constant_test_bit(unsigned long nr, volatile void *addr) {
5544 return (((volatile char *) addr)[(nr>>3)^7] & (1<<(nr&7))) != 0;
5549 * Find-bit routines..
5551 -static __inline__ unsigned long
5552 +static inline unsigned long
5553 find_first_zero_bit(void * addr, unsigned long size)
5555 unsigned long res, cmp, count;
5556 @@ -653,7 +538,49 @@
5557 return (res < size) ? res : size;
5560 -static __inline__ unsigned long
5561 +static inline unsigned long
5562 +find_first_bit(void * addr, unsigned long size)
5564 + unsigned long res, cmp, count;
5568 + __asm__(" slgr %1,%1\n"
5573 + "0: cg %1,0(%0,%4)\n"
5579 + "1: lg %2,0(%0,%4)\n"
5584 + " srlg %2,%2,32\n"
5585 + "2: lghi %1,0xff\n"
5586 + " tmll %2,0xffff\n"
5590 + "3: tmll %2,0x00ff\n"
5595 + " ic %2,0(%2,%5)\n"
5598 + : "=&a" (res), "=&d" (cmp), "=&a" (count)
5599 + : "a" (size), "a" (addr), "a" (&_sb_findmap) : "cc" );
5600 + return (res < size) ? res : size;
5603 +static inline unsigned long
5604 find_next_zero_bit (void * addr, unsigned long size, unsigned long offset)
5606 unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
5607 @@ -697,14 +624,56 @@
5608 return (offset + res);
5611 +static inline unsigned long
5612 +find_next_bit (void * addr, unsigned long size, unsigned long offset)
5614 + unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
5615 + unsigned long bitvec, reg;
5616 + unsigned long set, bit = offset & 63, res;
5620 + * Look for zero in first word
5622 + bitvec = (*p) >> bit;
5623 + __asm__(" slgr %0,%0\n"
5627 + " srlg %1,%1,32\n"
5628 + "0: lghi %2,0xff\n"
5629 + " tmll %1,0xffff\n"
5632 + " srlg %1,%1,16\n"
5633 + "1: tmll %1,0x00ff\n"
5638 + " ic %1,0(%1,%3)\n"
5640 + : "=&d" (set), "+a" (bitvec), "=&d" (reg)
5641 + : "a" (&_sb_findmap) : "cc" );
5642 + if (set < (64 - bit))
5643 + return set + offset;
5644 + offset += 64 - bit;
5648 + * No set bit yet, search remaining full words for a bit
5650 + res = find_first_bit (p, size - 64 * (p - (unsigned long *) addr));
5651 + return (offset + res);
5655 * ffz = Find First Zero in word. Undefined if no zero exists,
5656 * so code should check against ~0UL first..
5658 -static __inline__ unsigned long ffz(unsigned long word)
5659 +static inline unsigned long ffz(unsigned long word)
5661 - unsigned long reg;
5663 + unsigned long reg, result;
5665 __asm__(" lhi %2,-1\n"
5667 @@ -730,40 +699,112 @@
5671 + * __ffs = find first bit in word. Undefined if no bit exists,
5672 + * so code should check against 0UL first..
5674 +static inline unsigned long __ffs (unsigned long word)
5676 + unsigned long reg, result;
5678 + __asm__(" slgr %0,%0\n"
5682 + " srlg %1,%1,32\n"
5683 + "0: lghi %2,0xff\n"
5684 + " tmll %1,0xffff\n"
5687 + " srlg %1,%1,16\n"
5688 + "1: tmll %1,0x00ff\n"
5693 + " ic %1,0(%1,%3)\n"
5695 + : "=&d" (result), "+a" (word), "=&d" (reg)
5696 + : "a" (&_sb_findmap) : "cc" );
5701 + * Every architecture must define this function. It's the fastest
5702 + * way of searching a 140-bit bitmap where the first 100 bits are
5703 + * unlikely to be set. It's guaranteed that at least one of the 140
5704 + * bits is cleared.
5706 +static inline int sched_find_first_bit(unsigned long *b)
5708 + return find_first_bit(b, 140);
5712 * ffs: find first bit set. This is defined the same way as
5713 * the libc and compiler builtin ffs routines, therefore
5714 * differs in spirit from the above ffz (man ffs).
5717 -extern int __inline__ ffs (int x)
5718 +extern int inline ffs (int x)
5725 - __asm__(" slr %0,%0\n"
5726 - " tml %1,0xffff\n"
5728 + __asm__(" tml %1,0xffff\n"
5733 "0: tml %1,0x00ff\n"
5738 "1: tml %1,0x000f\n"
5743 "2: tml %1,0x0003\n"
5748 "3: tml %1,0x0001\n"
5752 : "=&d" (r), "+d" (x) : : "cc" );
5758 + * fls: find last bit set.
5760 +extern __inline__ int fls(int x)
5766 + __asm__(" tmh %1,0xffff\n"
5770 + "0: tmh %1,0xff00\n"
5774 + "1: tmh %1,0xf000\n"
5778 + "2: tmh %1,0xc000\n"
5782 + "3: tmh %1,0x8000\n"
5786 + : "+d" (r), "+d" (x) : : "cc" );
5792 #define ext2_set_bit(nr, addr) test_and_set_bit((nr)^56, addr)
5793 #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr)^56, addr)
5794 #define ext2_test_bit(nr, addr) test_bit((nr)^56, addr)
5795 -static __inline__ unsigned long
5796 +static inline unsigned long
5797 ext2_find_first_zero_bit(void *vaddr, unsigned long size)
5799 unsigned long res, cmp, count;
5801 return (res < size) ? res : size;
5804 -static __inline__ unsigned long
5805 +static inline unsigned long
5806 ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
5808 unsigned long *addr = vaddr;
5809 diff -urN linux-2.4.24.org/include/asm-sparc/bitops.h linux-2.4.24/include/asm-sparc/bitops.h
5810 --- linux-2.4.24.org/include/asm-sparc/bitops.h 2004-02-04 20:47:50.760965997 +0100
5811 +++ linux-2.4.24/include/asm-sparc/bitops.h 2004-02-04 20:52:54.074877521 +0100
5812 @@ -231,6 +231,57 @@
5817 + * __ffs - find first bit in word.
5818 + * @word: The word to search
5820 + * Undefined if no bit exists, so code should check against 0 first.
5822 +static __inline__ int __ffs(unsigned long word)
5826 + if ((word & 0xffff) == 0) {
5830 + if ((word & 0xff) == 0) {
5834 + if ((word & 0xf) == 0) {
5838 + if ((word & 0x3) == 0) {
5842 + if ((word & 0x1) == 0)
5848 + * Every architecture must define this function. It's the fastest
5849 + * way of searching a 140-bit bitmap where the first 100 bits are
5850 + * unlikely to be set. It's guaranteed that at least one of the 140
5851 + * bits is cleared.
5853 +static __inline__ int sched_find_first_bit(unsigned long *b)
5856 + if (unlikely(b[0]))
5857 + return __ffs(b[0]);
5858 + if (unlikely(b[1]))
5859 + return __ffs(b[1]) + 32;
5860 + if (unlikely(b[2]))
5861 + return __ffs(b[2]) + 64;
5863 + return __ffs(b[3]) + 96;
5864 + return __ffs(b[4]) + 128;
5868 * ffs: find first bit set. This is defined the same way as
5869 * the libc and compiler builtin ffs routines, therefore
5870 @@ -296,6 +347,32 @@
5871 #define find_first_zero_bit(addr, size) \
5872 find_next_zero_bit((addr), (size), 0)
5875 + * find_next_bit - find the first set bit in a memory region
5876 + * @addr: The address to base the search on
5877 + * @offset: The bitnumber to start searching at
5878 + * @size: The maximum size to search
5880 + * Scheduler induced bitop, do not use.
5882 +static inline int find_next_bit(unsigned long *addr, int size, int offset)
5884 + unsigned long *p = addr + (offset >> 5);
5885 + int num = offset & ~0x1f;
5886 + unsigned long word;
5889 + word &= ~((1 << (offset & 0x1f)) - 1);
5890 + while (num < size) {
5892 + return __ffs(word) + num;
5900 static inline int test_le_bit(int nr, __const__ void * addr)
5902 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
5903 diff -urN linux-2.4.24.org/include/asm-sparc/system.h linux-2.4.24/include/asm-sparc/system.h
5904 --- linux-2.4.24.org/include/asm-sparc/system.h 2004-02-04 20:47:50.644990120 +0100
5905 +++ linux-2.4.24/include/asm-sparc/system.h 2004-02-04 20:52:54.110870035 +0100
5908 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work)
5910 -#define prepare_to_switch() do { \
5911 +#define prepare_arch_switch(rq, next) do { \
5912 __asm__ __volatile__( \
5913 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \
5914 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \
5916 "save %sp, -0x40, %sp\n\t" \
5917 "restore; restore; restore; restore; restore; restore; restore"); \
5919 +#define finish_arch_switch(rq, next) do{ }while(0)
5920 +#define task_running(rq, p) ((rq)->curr == (p))
5922 /* Much care has gone into this code, do not touch it.
5924 diff -urN linux-2.4.24.org/include/asm-sparc64/bitops.h linux-2.4.24/include/asm-sparc64/bitops.h
5925 --- linux-2.4.24.org/include/asm-sparc64/bitops.h 2004-02-04 20:48:02.155595906 +0100
5926 +++ linux-2.4.24/include/asm-sparc64/bitops.h 2004-02-04 20:52:54.137864420 +0100
5930 * bitops.h: Bit string operations on the V9.
5932 * Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
5934 #ifndef _SPARC64_BITOPS_H
5935 #define _SPARC64_BITOPS_H
5937 +#include <linux/compiler.h>
5938 #include <asm/byteorder.h>
5940 -extern long ___test_and_set_bit(unsigned long nr, volatile void *addr);
5941 -extern long ___test_and_clear_bit(unsigned long nr, volatile void *addr);
5942 -extern long ___test_and_change_bit(unsigned long nr, volatile void *addr);
5943 +extern long ___test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
5944 +extern long ___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
5945 +extern long ___test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
5947 #define test_and_set_bit(nr,addr) ({___test_and_set_bit(nr,addr)!=0;})
5948 #define test_and_clear_bit(nr,addr) ({___test_and_clear_bit(nr,addr)!=0;})
5949 @@ -21,109 +22,132 @@
5950 #define change_bit(nr,addr) ((void)___test_and_change_bit(nr,addr))
5952 /* "non-atomic" versions... */
5953 -#define __set_bit(X,Y) \
5954 -do { unsigned long __nr = (X); \
5955 - long *__m = ((long *) (Y)) + (__nr >> 6); \
5956 - *__m |= (1UL << (__nr & 63)); \
5958 -#define __clear_bit(X,Y) \
5959 -do { unsigned long __nr = (X); \
5960 - long *__m = ((long *) (Y)) + (__nr >> 6); \
5961 - *__m &= ~(1UL << (__nr & 63)); \
5963 -#define __change_bit(X,Y) \
5964 -do { unsigned long __nr = (X); \
5965 - long *__m = ((long *) (Y)) + (__nr >> 6); \
5966 - *__m ^= (1UL << (__nr & 63)); \
5968 -#define __test_and_set_bit(X,Y) \
5969 -({ unsigned long __nr = (X); \
5970 - long *__m = ((long *) (Y)) + (__nr >> 6); \
5971 - long __old = *__m; \
5972 - long __mask = (1UL << (__nr & 63)); \
5973 - *__m = (__old | __mask); \
5974 - ((__old & __mask) != 0); \
5976 -#define __test_and_clear_bit(X,Y) \
5977 -({ unsigned long __nr = (X); \
5978 - long *__m = ((long *) (Y)) + (__nr >> 6); \
5979 - long __old = *__m; \
5980 - long __mask = (1UL << (__nr & 63)); \
5981 - *__m = (__old & ~__mask); \
5982 - ((__old & __mask) != 0); \
5984 -#define __test_and_change_bit(X,Y) \
5985 -({ unsigned long __nr = (X); \
5986 - long *__m = ((long *) (Y)) + (__nr >> 6); \
5987 - long __old = *__m; \
5988 - long __mask = (1UL << (__nr & 63)); \
5989 - *__m = (__old ^ __mask); \
5990 - ((__old & __mask) != 0); \
5993 +static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
5995 + volatile unsigned long *m = addr + (nr >> 6);
5997 + *m |= (1UL << (nr & 63));
6000 +static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
6002 + volatile unsigned long *m = addr + (nr >> 6);
6004 + *m &= ~(1UL << (nr & 63));
6007 +static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
6009 + volatile unsigned long *m = addr + (nr >> 6);
6011 + *m ^= (1UL << (nr & 63));
6014 +static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
6016 + volatile unsigned long *m = addr + (nr >> 6);
6018 + long mask = (1UL << (nr & 63));
6020 + *m = (old | mask);
6021 + return ((old & mask) != 0);
6024 +static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
6026 + volatile unsigned long *m = addr + (nr >> 6);
6028 + long mask = (1UL << (nr & 63));
6030 + *m = (old & ~mask);
6031 + return ((old & mask) != 0);
6034 +static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr)
6036 + volatile unsigned long *m = addr + (nr >> 6);
6038 + long mask = (1UL << (nr & 63));
6040 + *m = (old ^ mask);
6041 + return ((old & mask) != 0);
6044 #define smp_mb__before_clear_bit() do { } while(0)
6045 #define smp_mb__after_clear_bit() do { } while(0)
6047 -extern __inline__ int test_bit(int nr, __const__ void *addr)
6048 +static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr)
6050 - return (1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63))) != 0UL;
6051 + return (1UL & ((addr)[nr >> 6] >> (nr & 63))) != 0UL;
6054 /* The easy/cheese version for now. */
6055 -extern __inline__ unsigned long ffz(unsigned long word)
6056 +static __inline__ unsigned long ffz(unsigned long word)
6058 unsigned long result;
6060 -#ifdef ULTRA_HAS_POPULATION_COUNT /* Thanks for nothing Sun... */
6061 - __asm__ __volatile__(
6064 -" xnor %0, %%g1, %%g2\n"
6066 -"1: " : "=&r" (result)
6070 -#if 1 /* def EASY_CHEESE_VERSION */
6077 - unsigned long tmp;
6082 - tmp = ~word & -~word;
6083 - if (!(unsigned)tmp) {
6087 - if (!(unsigned short)tmp) {
6091 - if (!(unsigned char)tmp) {
6095 + * __ffs - find first bit in word.
6096 + * @word: The word to search
6098 + * Undefined if no bit exists, so code should check against 0 first.
6100 +static __inline__ unsigned long __ffs(unsigned long word)
6102 + unsigned long result = 0;
6104 + while (!(word & 1UL)) {
6108 - if (tmp & 0xf0) result += 4;
6109 - if (tmp & 0xcc) result += 2;
6110 - if (tmp & 0xaa) result ++;
6117 + * fls: find last bit set.
6120 +#define fls(x) generic_fls(x)
6125 + * Every architecture must define this function. It's the fastest
6126 + * way of searching a 140-bit bitmap where the first 100 bits are
6127 + * unlikely to be set. It's guaranteed that at least one of the 140
6128 + * bits is cleared.
6130 +static inline int sched_find_first_bit(unsigned long *b)
6132 + if (unlikely(b[0]))
6133 + return __ffs(b[0]);
6134 + if (unlikely(((unsigned int)b[1])))
6135 + return __ffs(b[1]) + 64;
6137 + return __ffs(b[1] >> 32) + 96;
6138 + return __ffs(b[2]) + 128;
6142 * ffs: find first bit set. This is defined the same way as
6143 * the libc and compiler builtin ffs routines, therefore
6144 * differs in spirit from the above ffz (man ffs).
6147 -#define ffs(x) generic_ffs(x)
6148 +static __inline__ int ffs(int x)
6152 + return __ffs((unsigned long)x);
6156 * hweightN: returns the hamming weight (i.e. the number
6159 #ifdef ULTRA_HAS_POPULATION_COUNT
6161 -extern __inline__ unsigned int hweight32(unsigned int w)
6162 +static __inline__ unsigned int hweight32(unsigned int w)
6170 -extern __inline__ unsigned int hweight16(unsigned int w)
6171 +static __inline__ unsigned int hweight16(unsigned int w)
6179 -extern __inline__ unsigned int hweight8(unsigned int w)
6180 +static __inline__ unsigned int hweight8(unsigned int w)
6184 @@ -165,14 +189,69 @@
6186 #endif /* __KERNEL__ */
6189 + * find_next_bit - find the next set bit in a memory region
6190 + * @addr: The address to base the search on
6191 + * @offset: The bitnumber to start searching at
6192 + * @size: The maximum size to search
6194 +static __inline__ unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
6196 + unsigned long *p = addr + (offset >> 6);
6197 + unsigned long result = offset & ~63UL;
6198 + unsigned long tmp;
6200 + if (offset >= size)
6206 + tmp &= (~0UL << offset);
6210 + goto found_middle;
6214 + while (size & ~63UL) {
6215 + if ((tmp = *(p++)))
6216 + goto found_middle;
6225 + tmp &= (~0UL >> (64 - size));
6226 + if (tmp == 0UL) /* Are any bits set? */
6227 + return result + size; /* Nope. */
6229 + return result + __ffs(tmp);
6233 + * find_first_bit - find the first set bit in a memory region
6234 + * @addr: The address to start the search at
6235 + * @size: The maximum size to search
6237 + * Returns the bit-number of the first set bit, not the number of the byte
6238 + * containing a bit.
6240 +#define find_first_bit(addr, size) \
6241 + find_next_bit((addr), (size), 0)
6243 /* find_next_zero_bit() finds the first zero bit in a bit string of length
6244 * 'size' bits, starting the search at bit 'offset'. This is largely based
6245 * on Linus's ALPHA routines, which are pretty portable BTW.
6248 -extern __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
6249 +static __inline__ unsigned long find_next_zero_bit(unsigned long *addr, unsigned long size, unsigned long offset)
6251 - unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
6252 + unsigned long *p = addr + (offset >> 6);
6253 unsigned long result = offset & ~63UL;
6256 @@ -211,15 +290,15 @@
6257 #define find_first_zero_bit(addr, size) \
6258 find_next_zero_bit((addr), (size), 0)
6260 -extern long ___test_and_set_le_bit(int nr, volatile void *addr);
6261 -extern long ___test_and_clear_le_bit(int nr, volatile void *addr);
6262 +extern long ___test_and_set_le_bit(int nr, volatile unsigned long *addr);
6263 +extern long ___test_and_clear_le_bit(int nr, volatile unsigned long *addr);
6265 #define test_and_set_le_bit(nr,addr) ({___test_and_set_le_bit(nr,addr)!=0;})
6266 #define test_and_clear_le_bit(nr,addr) ({___test_and_clear_le_bit(nr,addr)!=0;})
6267 #define set_le_bit(nr,addr) ((void)___test_and_set_le_bit(nr,addr))
6268 #define clear_le_bit(nr,addr) ((void)___test_and_clear_le_bit(nr,addr))
6270 -extern __inline__ int test_le_bit(int nr, __const__ void * addr)
6271 +static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr)
6274 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
6276 #define find_first_zero_le_bit(addr, size) \
6277 find_next_zero_le_bit((addr), (size), 0)
6279 -extern __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned long size, unsigned long offset)
6280 +static __inline__ unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset)
6282 - unsigned long *p = ((unsigned long *) addr) + (offset >> 6);
6283 + unsigned long *p = addr + (offset >> 6);
6284 unsigned long result = offset & ~63UL;
6287 @@ -271,18 +350,22 @@
6291 -#define ext2_set_bit test_and_set_le_bit
6292 -#define ext2_clear_bit test_and_clear_le_bit
6293 -#define ext2_test_bit test_le_bit
6294 -#define ext2_find_first_zero_bit find_first_zero_le_bit
6295 -#define ext2_find_next_zero_bit find_next_zero_le_bit
6296 +#define ext2_set_bit(nr,addr) test_and_set_le_bit((nr),(unsigned long *)(addr))
6297 +#define ext2_clear_bit(nr,addr) test_and_clear_le_bit((nr),(unsigned long *)(addr))
6298 +#define ext2_test_bit(nr,addr) test_le_bit((nr),(unsigned long *)(addr))
6299 +#define ext2_find_first_zero_bit(addr, size) \
6300 + find_first_zero_le_bit((unsigned long *)(addr), (size))
6301 +#define ext2_find_next_zero_bit(addr, size, off) \
6302 + find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
6304 /* Bitmap functions for the minix filesystem. */
6305 -#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
6306 -#define minix_set_bit(nr,addr) set_bit(nr,addr)
6307 -#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
6308 -#define minix_test_bit(nr,addr) test_bit(nr,addr)
6309 -#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
6310 +#define minix_test_and_set_bit(nr,addr) test_and_set_bit((nr),(unsigned long *)(addr))
6311 +#define minix_set_bit(nr,addr) set_bit((nr),(unsigned long *)(addr))
6312 +#define minix_test_and_clear_bit(nr,addr) \
6313 + test_and_clear_bit((nr),(unsigned long *)(addr))
6314 +#define minix_test_bit(nr,addr) test_bit((nr),(unsigned long *)(addr))
6315 +#define minix_find_first_zero_bit(addr,size) \
6316 + find_first_zero_bit((unsigned long *)(addr),(size))
6318 #endif /* __KERNEL__ */
6320 diff -urN linux-2.4.24.org/include/asm-sparc64/smp.h linux-2.4.24/include/asm-sparc64/smp.h
6321 --- linux-2.4.24.org/include/asm-sparc64/smp.h 2004-02-04 20:48:01.767676594 +0100
6322 +++ linux-2.4.24/include/asm-sparc64/smp.h 2004-02-04 20:52:54.175856518 +0100
6327 -#define smp_processor_id() (current->processor)
6328 +#define smp_processor_id() (current->cpu)
6330 /* This needn't do anything as we do not sleep the cpu
6331 * inside of the idler task, so an interrupt is not needed
6332 diff -urN linux-2.4.24.org/include/asm-sparc64/system.h linux-2.4.24/include/asm-sparc64/system.h
6333 --- linux-2.4.24.org/include/asm-sparc64/system.h 2004-02-04 20:48:01.898649351 +0100
6334 +++ linux-2.4.24/include/asm-sparc64/system.h 2004-02-04 20:52:54.208849656 +0100
6335 @@ -154,7 +154,18 @@
6337 #define flush_user_windows flushw_user
6338 #define flush_register_windows flushw_all
6339 -#define prepare_to_switch flushw_all
6341 +#define prepare_arch_schedule(prev) task_lock(prev)
6342 +#define finish_arch_schedule(prev) task_unlock(prev)
6343 +#define prepare_arch_switch(rq, next) \
6344 +do { spin_lock(&(next)->switch_lock); \
6345 + spin_unlock(&(rq)->lock); \
6349 +#define finish_arch_switch(rq, prev) \
6350 +do { spin_unlock_irq(&(prev)->switch_lock); \
6353 #ifndef CONFIG_DEBUG_SPINLOCK
6354 #define CHECK_LOCKS(PREV) do { } while(0)
6355 diff -urN linux-2.4.24.org/include/linux/bitops.h linux-2.4.24/include/linux/bitops.h
6356 --- linux-2.4.24.org/include/linux/bitops.h 2004-02-04 20:47:38.725469391 +0100
6357 +++ linux-2.4.24/include/linux/bitops.h 2004-02-04 20:52:54.244842170 +0100
6359 #ifndef _LINUX_BITOPS_H
6360 #define _LINUX_BITOPS_H
6363 + * fls: find last bit set.
6366 +extern __inline__ int generic_fls(int x)
6372 + if (!(x & 0xffff0000u)) {
6376 + if (!(x & 0xff000000u)) {
6380 + if (!(x & 0xf0000000u)) {
6384 + if (!(x & 0xc0000000u)) {
6388 + if (!(x & 0x80000000u)) {
6396 * ffs: find first bit set. This is defined the same way as
6397 diff -urN linux-2.4.24.org/include/linux/kernel_stat.h linux-2.4.24/include/linux/kernel_stat.h
6398 --- linux-2.4.24.org/include/linux/kernel_stat.h 2004-02-04 20:47:34.063439098 +0100
6399 +++ linux-2.4.24/include/linux/kernel_stat.h 2004-02-04 20:52:54.297831148 +0100
6401 #elif !defined(CONFIG_ARCH_S390)
6402 unsigned int irqs[NR_CPUS][NR_IRQS];
6404 - unsigned int context_swtch;
6407 extern struct kernel_stat kstat;
6408 diff -urN linux-2.4.24.org/include/linux/sched.h linux-2.4.24/include/linux/sched.h
6409 --- linux-2.4.24.org/include/linux/sched.h 2004-02-04 20:47:32.755711107 +0100
6410 +++ linux-2.4.24/include/linux/sched.h 2004-02-04 20:52:54.755735907 +0100
6412 extern unsigned long event;
6414 #include <linux/config.h>
6415 +#include <linux/compiler.h>
6416 #include <linux/binfmts.h>
6417 #include <linux/threads.h>
6418 #include <linux/kernel.h>
6420 #include <asm/mmu.h>
6422 #include <linux/smp.h>
6423 -#include <linux/tty.h>
6424 +//#include <linux/tty.h>
6425 #include <linux/sem.h>
6426 #include <linux/signal.h>
6427 #include <linux/securebits.h>
6429 #define CT_TO_SECS(x) ((x) / HZ)
6430 #define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
6432 -extern int nr_running, nr_threads;
6433 +extern int nr_threads;
6434 extern int last_pid;
6435 +extern unsigned long nr_running(void);
6436 +extern unsigned long nr_uninterruptible(void);
6438 -#include <linux/fs.h>
6439 +//#include <linux/fs.h>
6440 #include <linux/time.h>
6441 #include <linux/param.h>
6442 #include <linux/resource.h>
6443 @@ -109,12 +112,6 @@
6444 #define SCHED_FIFO 1
6448 - * This is an additional bit set when we want to
6449 - * yield the CPU for one re-schedule..
6451 -#define SCHED_YIELD 0x10
6453 struct sched_param {
6456 @@ -132,17 +129,21 @@
6459 extern rwlock_t tasklist_lock;
6460 -extern spinlock_t runqueue_lock;
6461 extern spinlock_t mmlist_lock;
6463 +typedef struct task_struct task_t;
6465 extern void sched_init(void);
6466 -extern void init_idle(void);
6467 +extern void init_idle(task_t *idle, int cpu);
6468 extern void show_state(void);
6469 extern void cpu_init (void);
6470 extern void trap_init(void);
6471 extern void update_process_times(int user);
6472 -extern void update_one_process(struct task_struct *p, unsigned long user,
6473 +extern void update_one_process(task_t *p, unsigned long user,
6474 unsigned long system, int cpu);
6475 +extern void scheduler_tick(int user_tick, int system);
6476 +extern void migration_init(void);
6477 +extern unsigned long cache_decay_ticks;
6479 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
6480 extern signed long FASTCALL(schedule_timeout(signed long timeout));
6481 @@ -152,6 +153,28 @@
6482 extern void flush_scheduled_tasks(void);
6483 extern int start_context_thread(void);
6484 extern int current_is_keventd(void);
6485 +extern void FASTCALL(sched_exit(task_t * p));
6486 +extern int FASTCALL(idle_cpu(int cpu));
6489 + * Priority of a process goes from 0..MAX_PRIO-1, valid RT
6490 + * priority is 0..MAX_RT_PRIO-1, and SCHED_OTHER tasks are
6491 + * in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values
6492 + * are inverted: lower p->prio value means higher priority.
6494 + * The MAX_RT_USER_PRIO value allows the actual maximum
6495 + * RT priority to be separate from the value exported to
6496 + * user-space. This allows kernel threads to set their
6497 + * priority to a value higher than any user task. Note:
6498 + * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
6500 + * Both values are configurable at compile-time.
6503 +#define MAX_USER_RT_PRIO 100
6504 +#define MAX_RT_PRIO MAX_USER_RT_PRIO
6506 +#define MAX_PRIO (MAX_RT_PRIO + 40)
6509 extern void set_cpus_allowed(struct task_struct *p, unsigned long new_mask);
6511 extern struct user_struct root_user;
6512 #define INIT_USER (&root_user)
6514 +typedef struct prio_array prio_array_t;
6516 struct task_struct {
6518 * offsets of these are hardcoded elsewhere - touch with care
6519 @@ -297,35 +322,26 @@
6521 int lock_depth; /* Lock depth */
6524 - * offset 32 begins here on 32-bit platforms. We keep
6525 - * all fields in a single cacheline that are needed for
6526 - * the goodness() loop in schedule().
6530 - unsigned long policy;
6531 - struct mm_struct *mm;
6534 - * cpus_runnable is ~0 if the process is not running on any
6535 - * CPU. It's (1 << cpu) if it's running on a CPU. This mask
6536 - * is updated under the runqueue lock.
6538 - * To determine whether a process might run on a CPU, this
6539 - * mask is AND-ed with cpus_allowed.
6541 - unsigned long cpus_runnable, cpus_allowed;
6543 - * (only the 'next' pointer fits into the cacheline, but
6544 - * that's just fine.)
6545 + * offset 32 begins here on 32-bit platforms.
6548 + int prio, static_prio;
6549 struct list_head run_list;
6550 - unsigned long sleep_time;
6551 + prio_array_t *array;
6553 - struct task_struct *next_task, *prev_task;
6554 - struct mm_struct *active_mm;
6555 + unsigned long sleep_avg;
6556 + unsigned long sleep_timestamp;
6558 + unsigned long policy;
6559 + unsigned long cpus_allowed;
6560 + unsigned int time_slice, first_time_slice;
6562 + task_t *next_task, *prev_task;
6564 + struct mm_struct *mm, *active_mm;
6565 struct list_head local_pages;
6567 unsigned int allocation_order, nr_local_pages;
6570 @@ -348,12 +364,12 @@
6571 * older sibling, respectively. (p->father can be replaced with
6574 - struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
6575 + task_t *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
6576 struct list_head thread_group;
6578 /* PID hash table linkage. */
6579 - struct task_struct *pidhash_next;
6580 - struct task_struct **pidhash_pprev;
6581 + task_t *pidhash_next;
6582 + task_t **pidhash_pprev;
6584 wait_queue_head_t wait_chldexit; /* for wait4() */
6585 struct completion *vfork_done; /* for vfork() */
6588 /* Protection of (de-)allocation: mm, files, fs, tty */
6589 spinlock_t alloc_lock;
6590 +/* context-switch lock */
6591 + spinlock_t switch_lock;
6593 /* journalling filesystem info */
6595 @@ -454,9 +472,15 @@
6597 #define _STK_LIM (8*1024*1024)
6599 -#define DEF_COUNTER (10*HZ/100) /* 100 ms time slice */
6600 -#define MAX_COUNTER (20*HZ/100)
6601 -#define DEF_NICE (0)
6603 +extern void set_cpus_allowed(task_t *p, unsigned long new_mask);
6605 +#define set_cpus_allowed(p, new_mask) do { } while (0)
6608 +extern void set_user_nice(task_t *p, long nice);
6609 +extern int task_prio(task_t *p);
6610 +extern int task_nice(task_t *p);
6612 extern void yield(void);
6614 @@ -477,14 +501,14 @@
6615 addr_limit: KERNEL_DS, \
6616 exec_domain: &default_exec_domain, \
6618 - counter: DEF_COUNTER, \
6620 + prio: MAX_PRIO-20, \
6621 + static_prio: MAX_PRIO-20, \
6622 policy: SCHED_OTHER, \
6623 + cpus_allowed: ~0UL, \
6625 active_mm: &init_mm, \
6626 - cpus_runnable: ~0UL, \
6627 - cpus_allowed: ~0UL, \
6628 run_list: LIST_HEAD_INIT(tsk.run_list), \
6634 pending: { NULL, &tsk.pending.head, {{0}}}, \
6636 alloc_lock: SPIN_LOCK_UNLOCKED, \
6637 + switch_lock: SPIN_LOCK_UNLOCKED, \
6638 journal_info: NULL, \
6641 @@ -518,24 +543,23 @@
6645 - struct task_struct task;
6647 unsigned long stack[INIT_TASK_SIZE/sizeof(long)];
6650 extern union task_union init_task_union;
6652 extern struct mm_struct init_mm;
6653 -extern struct task_struct *init_tasks[NR_CPUS];
6655 /* PID hashing. (shouldnt this be dynamic?) */
6656 #define PIDHASH_SZ (4096 >> 2)
6657 -extern struct task_struct *pidhash[PIDHASH_SZ];
6658 +extern task_t *pidhash[PIDHASH_SZ];
6660 #define pid_hashfn(x) ((((x) >> 8) ^ (x)) & (PIDHASH_SZ - 1))
6662 -static inline void hash_pid(struct task_struct *p)
6663 +static inline void hash_pid(task_t *p)
6665 - struct task_struct **htable = &pidhash[pid_hashfn(p->pid)];
6666 + task_t **htable = &pidhash[pid_hashfn(p->pid)];
6668 if((p->pidhash_next = *htable) != NULL)
6669 (*htable)->pidhash_pprev = &p->pidhash_next;
6670 @@ -543,16 +567,16 @@
6671 p->pidhash_pprev = htable;
6674 -static inline void unhash_pid(struct task_struct *p)
6675 +static inline void unhash_pid(task_t *p)
6678 p->pidhash_next->pidhash_pprev = p->pidhash_pprev;
6679 *p->pidhash_pprev = p->pidhash_next;
6682 -static inline struct task_struct *find_task_by_pid(int pid)
6683 +static inline task_t *find_task_by_pid(int pid)
6685 - struct task_struct *p, **htable = &pidhash[pid_hashfn(pid)];
6686 + task_t *p, **htable = &pidhash[pid_hashfn(pid)];
6688 for(p = *htable; p && p->pid != pid; p = p->pidhash_next)
6690 @@ -560,19 +584,6 @@
6694 -#define task_has_cpu(tsk) ((tsk)->cpus_runnable != ~0UL)
6696 -static inline void task_set_cpu(struct task_struct *tsk, unsigned int cpu)
6698 - tsk->processor = cpu;
6699 - tsk->cpus_runnable = 1UL << cpu;
6702 -static inline void task_release_cpu(struct task_struct *tsk)
6704 - tsk->cpus_runnable = ~0UL;
6707 /* per-UID process charging. */
6708 extern struct user_struct * alloc_uid(uid_t);
6709 extern void free_uid(struct user_struct *);
6710 @@ -600,47 +611,50 @@
6711 extern void FASTCALL(interruptible_sleep_on(wait_queue_head_t *q));
6712 extern long FASTCALL(interruptible_sleep_on_timeout(wait_queue_head_t *q,
6713 signed long timeout));
6714 -extern int FASTCALL(wake_up_process(struct task_struct * tsk));
6715 +extern int FASTCALL(wake_up_process(task_t * p));
6716 +extern void FASTCALL(wake_up_forked_process(task_t * p));
6718 #define wake_up(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
6719 #define wake_up_nr(x, nr) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
6720 #define wake_up_all(x) __wake_up((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 0)
6721 -#define wake_up_sync(x) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1)
6722 -#define wake_up_sync_nr(x, nr) __wake_up_sync((x),TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, nr)
6723 #define wake_up_interruptible(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
6724 #define wake_up_interruptible_nr(x, nr) __wake_up((x),TASK_INTERRUPTIBLE, nr)
6725 #define wake_up_interruptible_all(x) __wake_up((x),TASK_INTERRUPTIBLE, 0)
6726 -#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
6727 -#define wake_up_interruptible_sync_nr(x, nr) __wake_up_sync((x),TASK_INTERRUPTIBLE, nr)
6729 +#define wake_up_interruptible_sync(x) __wake_up_sync((x),TASK_INTERRUPTIBLE, 1)
6731 +#define wake_up_interruptible_sync(x) __wake_up((x),TASK_INTERRUPTIBLE, 1)
6734 asmlinkage long sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru);
6736 extern int in_group_p(gid_t);
6737 extern int in_egroup_p(gid_t);
6739 extern void proc_caches_init(void);
6740 -extern void flush_signals(struct task_struct *);
6741 -extern void flush_signal_handlers(struct task_struct *);
6742 +extern void flush_signals(task_t *);
6743 +extern void flush_signal_handlers(task_t *);
6744 extern void sig_exit(int, int, struct siginfo *);
6745 extern int dequeue_signal(sigset_t *, siginfo_t *);
6746 extern void block_all_signals(int (*notifier)(void *priv), void *priv,
6748 extern void unblock_all_signals(void);
6749 -extern int send_sig_info(int, struct siginfo *, struct task_struct *);
6750 -extern int force_sig_info(int, struct siginfo *, struct task_struct *);
6751 +extern int send_sig_info(int, struct siginfo *, task_t *);
6752 +extern int force_sig_info(int, struct siginfo *, task_t *);
6753 extern int kill_pg_info(int, struct siginfo *, pid_t);
6754 extern int kill_sl_info(int, struct siginfo *, pid_t);
6755 extern int kill_proc_info(int, struct siginfo *, pid_t);
6756 -extern void notify_parent(struct task_struct *, int);
6757 -extern void do_notify_parent(struct task_struct *, int);
6758 -extern void force_sig(int, struct task_struct *);
6759 -extern int send_sig(int, struct task_struct *, int);
6760 +extern void notify_parent(task_t *, int);
6761 +extern void do_notify_parent(task_t *, int);
6762 +extern void force_sig(int, task_t *);
6763 +extern int send_sig(int, task_t *, int);
6764 extern int kill_pg(pid_t, int, int);
6765 extern int kill_sl(pid_t, int, int);
6766 extern int kill_proc(pid_t, int, int);
6767 extern int do_sigaction(int, const struct k_sigaction *, struct k_sigaction *);
6768 extern int do_sigaltstack(const stack_t *, stack_t *, unsigned long);
6770 -static inline int signal_pending(struct task_struct *p)
6771 +static inline int signal_pending(task_t *p)
6773 return (p->sigpending != 0);
6776 This is required every time the blocked sigset_t changes.
6777 All callers should have t->sigmask_lock. */
6779 -static inline void recalc_sigpending(struct task_struct *t)
6780 +static inline void recalc_sigpending(task_t *t)
6782 t->sigpending = has_pending_signals(&t->pending.signal, &t->blocked);
6784 @@ -786,16 +800,17 @@
6785 extern int expand_fdset(struct files_struct *, int nr);
6786 extern void free_fdset(fd_set *, int);
6788 -extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
6789 +extern int copy_thread(int, unsigned long, unsigned long, unsigned long, task_t *, struct pt_regs *);
6790 extern void flush_thread(void);
6791 extern void exit_thread(void);
6793 -extern void exit_mm(struct task_struct *);
6794 -extern void exit_files(struct task_struct *);
6795 -extern void exit_sighand(struct task_struct *);
6796 +extern void exit_mm(task_t *);
6797 +extern void exit_files(task_t *);
6798 +extern void exit_sighand(task_t *);
6800 extern void reparent_to_init(void);
6801 extern void daemonize(void);
6802 +extern task_t *child_reaper;
6804 extern int do_execve(char *, char **, char **, struct pt_regs *);
6805 extern int do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long);
6808 extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
6810 +extern void wait_task_inactive(task_t * p);
6811 +extern void kick_if_running(task_t * p);
6813 #define __wait_event(wq, condition) \
6815 wait_queue_t __wait; \
6816 @@ -887,27 +905,12 @@
6817 for (task = next_thread(current) ; task != current ; task = next_thread(task))
6819 #define next_thread(p) \
6820 - list_entry((p)->thread_group.next, struct task_struct, thread_group)
6821 + list_entry((p)->thread_group.next, task_t, thread_group)
6823 #define thread_group_leader(p) (p->pid == p->tgid)
6825 -static inline void del_from_runqueue(struct task_struct * p)
6826 +static inline void unhash_process(task_t *p)
6829 - p->sleep_time = jiffies;
6830 - list_del(&p->run_list);
6831 - p->run_list.next = NULL;
6834 -static inline int task_on_runqueue(struct task_struct *p)
6836 - return (p->run_list.next != NULL);
6839 -static inline void unhash_process(struct task_struct *p)
6841 - if (task_on_runqueue(p))
6842 - out_of_line_bug();
6843 write_lock_irq(&tasklist_lock);
6846 @@ -917,12 +920,12 @@
6849 /* Protects ->fs, ->files, ->mm, and synchronises with wait4(). Nests inside tasklist_lock */
6850 -static inline void task_lock(struct task_struct *p)
6851 +static inline void task_lock(task_t *p)
6853 spin_lock(&p->alloc_lock);
6856 -static inline void task_unlock(struct task_struct *p)
6857 +static inline void task_unlock(task_t *p)
6859 spin_unlock(&p->alloc_lock);
6861 @@ -946,6 +949,26 @@
6865 +static inline void set_need_resched(void)
6867 + current->need_resched = 1;
6870 +static inline void clear_need_resched(void)
6872 + current->need_resched = 0;
6875 +static inline void set_tsk_need_resched(task_t *tsk)
6877 + tsk->need_resched = 1;
6880 +static inline void clear_tsk_need_resched(task_t *tsk)
6882 + tsk->need_resched = 0;
6885 static inline int need_resched(void)
6887 return (unlikely(current->need_resched));
6891 #endif /* __KERNEL__ */
6894 diff -urN linux-2.4.24.org/include/linux/smp_balance.h linux-2.4.24/include/linux/smp_balance.h
6895 --- linux-2.4.24.org/include/linux/smp_balance.h 1970-01-01 01:00:00.000000000 +0100
6896 +++ linux-2.4.24/include/linux/smp_balance.h 2004-02-04 20:52:54.758735283 +0100
6898 +#ifndef _LINUX_SMP_BALANCE_H
6899 +#define _LINUX_SMP_BALANCE_H
6902 + * per-architecture load balancing logic, e.g. for hyperthreading
6905 +#ifdef ARCH_HAS_SMP_BALANCE
6906 +#include <asm/smp_balance.h>
6908 +#define arch_load_balance(x, y) (0)
6909 +#define arch_reschedule_idle_override(x, idle) (idle)
6912 +#endif /* _LINUX_SMP_BALANCE_H */
6913 diff -urN linux-2.4.24.org/include/linux/smp.h linux-2.4.24/include/linux/smp.h
6914 --- linux-2.4.24.org/include/linux/smp.h 2004-02-04 20:47:38.184581896 +0100
6915 +++ linux-2.4.24/include/linux/smp.h 2004-02-04 20:52:54.806725301 +0100
6917 #define cpu_number_map(cpu) 0
6918 #define smp_call_function(func,info,retry,wait) ({ 0; })
6919 #define cpu_online_map 1
6920 +static inline void smp_send_reschedule(int cpu) { }
6921 +static inline void smp_send_reschedule_all(void) { }
6926 + * Common definitions:
6928 +#define cpu() smp_processor_id()
6931 diff -urN linux-2.4.24.org/include/linux/wait.h linux-2.4.24/include/linux/wait.h
6932 --- linux-2.4.24.org/include/linux/wait.h 2004-02-04 20:47:33.472562001 +0100
6933 +++ linux-2.4.24/include/linux/wait.h 2004-02-04 20:52:54.861713864 +0100
6935 # define wq_write_lock_irq write_lock_irq
6936 # define wq_write_lock_irqsave write_lock_irqsave
6937 # define wq_write_unlock_irqrestore write_unlock_irqrestore
6938 +# define wq_write_unlock_irq write_unlock_irq
6939 # define wq_write_unlock write_unlock
6941 # define wq_lock_t spinlock_t
6943 # define wq_write_lock_irq spin_lock_irq
6944 # define wq_write_lock_irqsave spin_lock_irqsave
6945 # define wq_write_unlock_irqrestore spin_unlock_irqrestore
6946 +# define wq_write_unlock_irq spin_unlock_irq
6947 # define wq_write_unlock spin_unlock
6950 diff -urN linux-2.4.24.org/init/main.c linux-2.4.24/init/main.c
6951 --- linux-2.4.24.org/init/main.c 2004-02-04 20:47:26.630985058 +0100
6952 +++ linux-2.4.24/init/main.c 2004-02-04 20:52:54.909703882 +0100
6954 extern void setup_arch(char **);
6955 extern void cpu_idle(void);
6957 -unsigned long wait_init_idle;
6961 #ifdef CONFIG_X86_LOCAL_APIC
6962 @@ -303,34 +301,24 @@
6963 APIC_init_uniprocessor();
6966 -#define smp_init() do { } while (0)
6967 +#define smp_init() do { } while (0)
6973 /* Called by boot processor to activate the rest. */
6974 static void __init smp_init(void)
6976 /* Get other processors into their bootup holding patterns. */
6978 - wait_init_idle = cpu_online_map;
6979 - clear_bit(current->processor, &wait_init_idle); /* Don't wait on me! */
6981 smp_threads_ready=1;
6984 - /* Wait for the other cpus to set up their idle processes */
6985 - printk("Waiting on wait_init_idle (map = 0x%lx)\n", wait_init_idle);
6986 - while (wait_init_idle) {
6990 - printk("All processors have done init_idle\n");
6997 * We need to finalize in a non-__init function or else race conditions
6998 * between the root thread and the init thread may cause start_kernel to
7001 kernel_thread(init, NULL, CLONE_FS | CLONE_FILES | CLONE_SIGNAL);
7003 - current->need_resched = 1;
7010 * Activate the first processor.
7013 printk("POSIX conformance testing by UNIFIX\n");
7015 + init_idle(current, smp_processor_id());
7017 * We count on the initial thread going ok
7018 * Like idlers init is an unlocked kernel thread, which will
7019 @@ -465,6 +453,10 @@
7021 static void __init do_basic_setup(void)
7023 + /* Start the per-CPU migration threads */
7029 * Tell the world that we're going to be the grim
7030 diff -urN linux-2.4.24.org/kernel/capability.c linux-2.4.24/kernel/capability.c
7031 --- linux-2.4.24.org/kernel/capability.c 2004-02-04 20:47:27.302845310 +0100
7032 +++ linux-2.4.24/kernel/capability.c 2004-02-04 20:52:54.945696396 +0100
7034 #include <linux/mm.h>
7035 #include <asm/uaccess.h>
7037 +unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
7039 kernel_cap_t cap_bset = CAP_INIT_EFF_SET;
7041 /* Note: never hold tasklist_lock while spinning for this one */
7042 diff -urN linux-2.4.24.org/kernel/exit.c linux-2.4.24/kernel/exit.c
7043 --- linux-2.4.24.org/kernel/exit.c 2004-02-04 20:47:27.240858204 +0100
7044 +++ linux-2.4.24/kernel/exit.c 2004-02-04 20:52:54.951695148 +0100
7047 static void release_task(struct task_struct * p)
7049 - if (p != current) {
7054 - * Wait to make sure the process isn't on the
7055 - * runqueue (active on some other CPU still)
7059 - if (!task_has_cpu(p))
7065 - } while (task_has_cpu(p));
7068 + wait_task_inactive(p);
7070 - atomic_dec(&p->user->processes);
7071 - free_uid(p->user);
7072 - unhash_process(p);
7074 - release_thread(p);
7075 - current->cmin_flt += p->min_flt + p->cmin_flt;
7076 - current->cmaj_flt += p->maj_flt + p->cmaj_flt;
7077 - current->cnswap += p->nswap + p->cnswap;
7079 - * Potentially available timeslices are retrieved
7080 - * here - this way the parent does not get penalized
7081 - * for creating too many processes.
7083 - * (this cannot be used to artificially 'generate'
7084 - * timeslices, because any timeslice recovered here
7085 - * was given away by the parent in the first place.)
7087 - current->counter += p->counter;
7088 - if (current->counter >= MAX_COUNTER)
7089 - current->counter = MAX_COUNTER;
7091 - free_task_struct(p);
7093 - printk("task releasing itself\n");
7095 + atomic_dec(&p->user->processes);
7096 + free_uid(p->user);
7097 + unhash_process(p);
7099 + release_thread(p);
7100 + current->cmin_flt += p->min_flt + p->cmin_flt;
7101 + current->cmaj_flt += p->maj_flt + p->cmaj_flt;
7102 + current->cnswap += p->nswap + p->cnswap;
7105 + free_task_struct(p);
7109 @@ -150,6 +123,79 @@
7114 + * reparent_to_init() - Reparent the calling kernel thread to the init task.
7116 + * If a kernel thread is launched as a result of a system call, or if
7117 + * it ever exits, it should generally reparent itself to init so that
7118 + * it is correctly cleaned up on exit.
7120 + * The various task state such as scheduling policy and priority may have
7121 + * been inherited from a user process, so we reset them to sane values here.
7123 + * NOTE that reparent_to_init() gives the caller full capabilities.
7125 +void reparent_to_init(void)
7127 + write_lock_irq(&tasklist_lock);
7129 + /* Reparent to init */
7130 + REMOVE_LINKS(current);
7131 + current->p_pptr = child_reaper;
7132 + current->p_opptr = child_reaper;
7133 + SET_LINKS(current);
7135 + /* Set the exit signal to SIGCHLD so we signal init on exit */
7136 + current->exit_signal = SIGCHLD;
7138 + current->ptrace = 0;
7139 + if ((current->policy == SCHED_OTHER) && (task_nice(current) < 0))
7140 + set_user_nice(current, 0);
7141 + /* cpus_allowed? */
7142 + /* rt_priority? */
7144 + current->cap_effective = CAP_INIT_EFF_SET;
7145 + current->cap_inheritable = CAP_INIT_INH_SET;
7146 + current->cap_permitted = CAP_FULL_SET;
7147 + current->keep_capabilities = 0;
7148 + memcpy(current->rlim, init_task.rlim, sizeof(*(current->rlim)));
7149 + current->user = INIT_USER;
7151 + write_unlock_irq(&tasklist_lock);
7155 + * Put all the gunge required to become a kernel thread without
7156 + * attached user resources in one place where it belongs.
7159 +void daemonize(void)
7161 + struct fs_struct *fs;
7165 + * If we were started as result of loading a module, close all of the
7166 + * user space pages. We don't need them, and if we didn't close them
7167 + * they would be locked into memory.
7171 + current->session = 1;
7172 + current->pgrp = 1;
7173 + current->tty = NULL;
7175 + /* Become as one with the init task */
7177 + exit_fs(current); /* current->fs->count--; */
7178 + fs = init_task.fs;
7180 + atomic_inc(&fs->count);
7181 + exit_files(current);
7182 + current->files = init_task.files;
7183 + atomic_inc(¤t->files->count);
7187 * When we die, we re-parent all our children.
7188 * Try to give them to another thread in our thread
7190 /* Make sure we're not reparenting to ourselves */
7191 p->p_opptr = child_reaper;
7193 + p->first_time_slice = 0;
7194 if (p->pdeath_signal) send_sig(p->pdeath_signal, p, 0);
7197 diff -urN linux-2.4.24.org/kernel/fork.c linux-2.4.24/kernel/fork.c
7198 --- linux-2.4.24.org/kernel/fork.c 2004-02-04 20:47:26.750960103 +0100
7199 +++ linux-2.4.24/kernel/fork.c 2004-02-04 20:52:54.987687662 +0100
7202 /* The idle threads do not count.. */
7207 unsigned long total_forks; /* Handle normal Linux uptimes. */
7210 struct task_struct *pidhash[PIDHASH_SZ];
7212 +rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
7214 void add_wait_queue(wait_queue_head_t *q, wait_queue_t * wait)
7216 unsigned long flags;
7218 if (p->pid == 0 && current->pid != 0)
7219 goto bad_fork_cleanup;
7221 - p->run_list.next = NULL;
7222 - p->run_list.prev = NULL;
7225 init_waitqueue_head(&p->wait_chldexit);
7226 p->vfork_done = NULL;
7228 init_completion(&vfork);
7230 spin_lock_init(&p->alloc_lock);
7231 + spin_lock_init(&p->switch_lock);
7234 init_sigpending(&p->pending);
7235 @@ -727,11 +726,11 @@
7239 - p->cpus_runnable = ~0UL;
7240 - p->processor = current->processor;
7242 /* ?? should we just memset this ?? */
7243 for(i = 0; i < smp_num_cpus; i++)
7244 - p->per_cpu_utime[i] = p->per_cpu_stime[i] = 0;
7245 + p->per_cpu_utime[cpu_logical_map(i)] =
7246 + p->per_cpu_stime[cpu_logical_map(i)] = 0;
7247 spin_lock_init(&p->sigmask_lock);
7250 @@ -769,15 +768,27 @@
7251 p->pdeath_signal = 0;
7254 - * "share" dynamic priority between parent and child, thus the
7255 - * total amount of dynamic priorities in the system doesn't change,
7256 - * more scheduling fairness. This is only important in the first
7257 - * timeslice, on the long run the scheduling behaviour is unchanged.
7259 - p->counter = (current->counter + 1) >> 1;
7260 - current->counter >>= 1;
7261 - if (!current->counter)
7262 - current->need_resched = 1;
7263 + * Share the timeslice between parent and child, thus the
7264 + * total amount of pending timeslices in the system doesnt change,
7265 + * resulting in more scheduling fairness.
7268 + if (!current->time_slice)
7270 + p->time_slice = (current->time_slice + 1) >> 1;
7271 + current->time_slice >>= 1;
7272 + p->first_time_slice = 1;
7273 + if (!current->time_slice) {
7275 + * This case is rare, it happens when the parent has only
7276 + * a single jiffy left from its timeslice. Taking the
7277 + * runqueue lock is not a problem.
7279 + current->time_slice = 1;
7280 + scheduler_tick(0,0);
7282 + p->sleep_timestamp = jiffies;
7286 * Ok, add it to the run-queues and make it
7287 @@ -813,11 +824,16 @@
7289 if (p->ptrace & PT_PTRACED)
7290 send_sig(SIGSTOP, p, 1);
7292 - wake_up_process(p); /* do this last */
7293 + wake_up_forked_process(p); /* do this last */
7295 if (clone_flags & CLONE_VFORK)
7296 wait_for_completion(&vfork);
7299 + * Let the child process run first, to avoid most of the
7300 + * COW overhead when the child exec()s afterwards.
7302 + current->need_resched = 1;
7306 diff -urN linux-2.4.24.org/kernel/ksyms.c linux-2.4.24/kernel/ksyms.c
7307 --- linux-2.4.24.org/kernel/ksyms.c 2004-02-04 20:47:26.747960727 +0100
7308 +++ linux-2.4.24/kernel/ksyms.c 2004-02-04 20:52:54.992686623 +0100
7310 /* process management */
7311 EXPORT_SYMBOL(complete_and_exit);
7312 EXPORT_SYMBOL(__wake_up);
7313 -EXPORT_SYMBOL(__wake_up_sync);
7314 EXPORT_SYMBOL(wake_up_process);
7315 EXPORT_SYMBOL(sleep_on);
7316 EXPORT_SYMBOL(sleep_on_timeout);
7319 EXPORT_SYMBOL(yield);
7320 EXPORT_SYMBOL(__cond_resched);
7321 +EXPORT_SYMBOL(set_user_nice);
7322 +EXPORT_SYMBOL(nr_context_switches);
7323 EXPORT_SYMBOL(jiffies);
7324 EXPORT_SYMBOL(xtime);
7325 EXPORT_SYMBOL(do_gettimeofday);
7329 EXPORT_SYMBOL(kstat);
7330 -EXPORT_SYMBOL(nr_running);
7333 EXPORT_SYMBOL(panic);
7334 diff -urN linux-2.4.24.org/kernel/printk.c linux-2.4.24/kernel/printk.c
7335 --- linux-2.4.24.org/kernel/printk.c 2004-02-04 20:47:26.744961351 +0100
7336 +++ linux-2.4.24/kernel/printk.c 2004-02-04 20:52:55.015681840 +0100
7338 #include <linux/module.h>
7339 #include <linux/interrupt.h> /* For in_interrupt() */
7340 #include <linux/config.h>
7341 +#include <linux/delay.h>
7343 #include <asm/uaccess.h>
7345 diff -urN linux-2.4.24.org/kernel/ptrace.c linux-2.4.24/kernel/ptrace.c
7346 --- linux-2.4.24.org/kernel/ptrace.c 2004-02-04 20:47:26.776954696 +0100
7347 +++ linux-2.4.24/kernel/ptrace.c 2004-02-04 20:52:55.029678928 +0100
7349 if (child->state != TASK_STOPPED)
7352 - /* Make sure the child gets off its CPU.. */
7355 - if (!task_has_cpu(child))
7357 - task_unlock(child);
7359 - if (child->state != TASK_STOPPED)
7363 - } while (task_has_cpu(child));
7365 - task_unlock(child);
7366 + wait_task_inactive(child);
7370 diff -urN linux-2.4.24.org/kernel/sched.c linux-2.4.24/kernel/sched.c
7371 --- linux-2.4.24.org/kernel/sched.c 2004-02-04 20:47:26.741961975 +0100
7372 +++ linux-2.4.24/kernel/sched.c 2004-02-04 20:52:55.076669155 +0100
7375 * Kernel scheduler and related syscalls
7377 - * Copyright (C) 1991, 1992 Linus Torvalds
7378 + * Copyright (C) 1991-2002 Linus Torvalds
7380 * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
7381 * make semaphores SMP safe
7382 * 1998-11-19 Implemented schedule_timeout() and related stuff
7383 * by Andrea Arcangeli
7384 - * 1998-12-28 Implemented better SMP scheduling by Ingo Molnar
7385 + * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
7386 + * hybrid priority-list and round-robin design with
7387 + * an array-switch method of distributing timeslices
7388 + * and per-CPU runqueues. Additional code by Davide
7389 + * Libenzi, Robert Love, and Rusty Russell.
7393 - * 'sched.c' is the main kernel file. It contains scheduling primitives
7394 - * (sleep_on, wakeup, schedule etc) as well as a number of simple system
7395 - * call functions (type getpid()), which just extract a field from
7399 -#include <linux/config.h>
7400 #include <linux/mm.h>
7401 -#include <linux/init.h>
7402 -#include <linux/smp_lock.h>
7403 #include <linux/nmi.h>
7404 #include <linux/interrupt.h>
7405 -#include <linux/kernel_stat.h>
7406 -#include <linux/completion.h>
7407 -#include <linux/prefetch.h>
7408 -#include <linux/compiler.h>
7410 +#include <linux/init.h>
7411 #include <asm/uaccess.h>
7412 +#include <linux/smp_lock.h>
7413 #include <asm/mmu_context.h>
7415 -extern void timer_bh(void);
7416 -extern void tqueue_bh(void);
7417 -extern void immediate_bh(void);
7418 +#include <linux/kernel_stat.h>
7419 +#include <linux/completion.h>
7422 - * scheduler variables
7424 + * Convert user-nice values [ -20 ... 0 ... 19 ]
7425 + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
7428 +#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
7429 +#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
7430 +#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
7432 -unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
7434 -extern void mem_use(void);
7436 + * 'User priority' is the nice value converted to something we
7437 + * can work with better when scaling various scheduler parameters,
7438 + * it's a [ 0 ... 39 ] range.
7440 +#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
7441 +#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
7442 +#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
7445 - * Scheduling quanta.
7446 + * These are the 'tuning knobs' of the scheduler:
7448 - * NOTE! The unix "nice" value influences how long a process
7449 - * gets. The nice value ranges from -20 to +19, where a -20
7450 - * is a "high-priority" task, and a "+10" is a low-priority
7453 - * We want the time-slice to be around 50ms or so, so this
7454 - * calculation depends on the value of HZ.
7457 -#define TICK_SCALE(x) ((x) >> 2)
7459 -#define TICK_SCALE(x) ((x) >> 1)
7461 -#define TICK_SCALE(x) (x)
7463 -#define TICK_SCALE(x) ((x) << 1)
7465 -#define TICK_SCALE(x) ((x) << 2)
7468 -#define NICE_TO_TICKS(nice) (TICK_SCALE(20-(nice))+1)
7470 + * Minimum timeslice is 10 msecs, default timeslice is 150 msecs,
7471 + * maximum timeslice is 300 msecs. Timeslices get refilled after
7474 +#define MIN_TIMESLICE ( 10 * HZ / 1000)
7475 +#define MAX_TIMESLICE (300 * HZ / 1000)
7476 +#define CHILD_PENALTY 50
7477 +#define PARENT_PENALTY 100
7478 +#define PRIO_BONUS_RATIO 25
7479 +#define INTERACTIVE_DELTA 2
7480 +#define MAX_SLEEP_AVG (2*HZ)
7481 +#define STARVATION_LIMIT (2*HZ)
7484 - * Init task must be ok at boot for the ix86 as we will check its signals
7485 - * via the SMP irq return path.
7488 -struct task_struct * init_tasks[NR_CPUS] = {&init_task, };
7489 + * If a task is 'interactive' then we reinsert it in the active
7490 + * array after it has expired its current timeslice. (it will not
7491 + * continue to run immediately, it will still roundrobin with
7492 + * other interactive tasks.)
7494 + * This part scales the interactivity limit depending on niceness.
7496 + * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
7497 + * Here are a few examples of different nice levels:
7499 + * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
7500 + * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
7501 + * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0]
7502 + * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
7503 + * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
7505 + * (the X axis represents the possible -5 ... 0 ... +5 dynamic
7506 + * priority range a task can explore, a value of '1' means the
7507 + * task is rated interactive.)
7509 + * Ie. nice +19 tasks can never get 'interactive' enough to be
7510 + * reinserted into the active array. And only heavily CPU-hog nice -20
7511 + * tasks will be expired. Default nice 0 tasks are somewhere between,
7512 + * it takes some effort for them to get interactive, but it's not
7516 +#define SCALE(v1,v1_max,v2_max) \
7517 + (v1) * (v2_max) / (v1_max)
7520 + (SCALE(TASK_NICE(p), 40, MAX_USER_PRIO*PRIO_BONUS_RATIO/100) + \
7521 + INTERACTIVE_DELTA)
7523 +#define TASK_INTERACTIVE(p) \
7524 + ((p)->prio <= (p)->static_prio - DELTA(p))
7527 - * The tasklist_lock protects the linked list of processes.
7529 - * The runqueue_lock locks the parts that actually access
7530 - * and change the run-queues, and have to be interrupt-safe.
7532 - * If both locks are to be concurrently held, the runqueue_lock
7533 - * nests inside the tasklist_lock.
7534 + * TASK_TIMESLICE scales user-nice values [ -20 ... 19 ]
7535 + * to time slice values.
7537 - * task->alloc_lock nests inside tasklist_lock.
7538 + * The higher a process's priority, the bigger timeslices
7539 + * it gets during one round of execution. But even the lowest
7540 + * priority process gets MIN_TIMESLICE worth of execution time.
7542 -spinlock_t runqueue_lock __cacheline_aligned = SPIN_LOCK_UNLOCKED; /* inner */
7543 -rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED; /* outer */
7545 -static LIST_HEAD(runqueue_head);
7546 +#define TASK_TIMESLICE(p) (MIN_TIMESLICE + \
7547 + ((MAX_TIMESLICE - MIN_TIMESLICE) * (MAX_PRIO-1-(p)->static_prio)/39))
7550 - * We align per-CPU scheduling data on cacheline boundaries,
7551 - * to prevent cacheline ping-pong.
7552 + * These are the runqueue data structures:
7555 - struct schedule_data {
7556 - struct task_struct * curr;
7557 - cycles_t last_schedule;
7559 - char __pad [SMP_CACHE_BYTES];
7560 -} aligned_data [NR_CPUS] __cacheline_aligned = { {{&init_task,0}}};
7562 -#define cpu_curr(cpu) aligned_data[(cpu)].schedule_data.curr
7563 -#define last_schedule(cpu) aligned_data[(cpu)].schedule_data.last_schedule
7564 +#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
7566 -struct kernel_stat kstat;
7567 -extern struct task_struct *child_reaper;
7568 +typedef struct runqueue runqueue_t;
7571 +struct prio_array {
7573 + unsigned long bitmap[BITMAP_SIZE];
7574 + struct list_head queue[MAX_PRIO];
7577 -#define idle_task(cpu) (init_tasks[cpu_number_map(cpu)])
7578 -#define can_schedule(p,cpu) \
7579 - ((p)->cpus_runnable & (p)->cpus_allowed & (1UL << cpu))
7581 + * This is the main, per-CPU runqueue data structure.
7583 + * Locking rule: those places that want to lock multiple runqueues
7584 + * (such as the load balancing or the process migration code), lock
7585 + * acquire operations must be ordered by ascending &runqueue.
7589 + unsigned long nr_running, nr_switches, expired_timestamp;
7590 + task_t *curr, *idle;
7591 + prio_array_t *active, *expired, arrays[2];
7592 + long nr_uninterruptible;
7595 + int prev_nr_running[NR_CPUS];
7596 + task_t *migration_thread;
7597 + struct list_head migration_queue;
7599 +} ____cacheline_aligned;
7602 +static struct runqueue runqueues[NR_CPUS] __cacheline_aligned;
7604 -#define idle_task(cpu) (&init_task)
7605 -#define can_schedule(p,cpu) (1)
7606 +#define cpu_rq(cpu) (runqueues + (cpu))
7607 +#define this_rq() cpu_rq(smp_processor_id())
7608 +#define task_rq(p) cpu_rq((p)->cpu)
7609 +#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
7610 +#define rt_task(p) ((p)->prio < MAX_RT_PRIO)
7613 + * Default context-switch locking:
7615 +#ifndef prepare_arch_switch
7616 +# define prepare_arch_switch(rq, next) do { } while(0)
7617 +# define finish_arch_switch(rq, prev) spin_unlock_irq(&(rq)->lock)
7620 -void scheduling_functions_start_here(void) { }
7623 - * This is the function that decides how desirable a process is..
7624 - * You can weigh different processes against each other depending
7625 - * on what CPU they've run on lately etc to try to handle cache
7626 - * and TLB miss penalties.
7629 - * -1000: never select this
7630 - * 0: out of time, recalculate counters (but it might still be
7632 - * +ve: "goodness" value (the larger, the better)
7633 - * +1000: realtime process, select this.
7634 + * task_rq_lock - lock the runqueue a given task resides on and disable
7635 + * interrupts. Note the ordering: we can safely lookup the task_rq without
7636 + * explicitly disabling preemption.
7639 -static inline int goodness(struct task_struct * p, int this_cpu, struct mm_struct *this_mm)
7640 +static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
7645 - * select the current process after every other
7646 - * runnable process, but before the idle thread.
7647 - * Also, dont trigger a counter recalculation.
7650 - if (p->policy & SCHED_YIELD)
7652 + struct runqueue *rq;
7655 - * Non-RT process - normal case first.
7657 - if (p->policy == SCHED_OTHER) {
7659 - * Give the process a first-approximation goodness value
7660 - * according to the number of clock-ticks it has left.
7662 - * Don't do any other calculations if the time slice is
7665 - weight = p->counter;
7670 - /* Give a largish advantage to the same processor... */
7671 - /* (this is equivalent to penalizing other processors) */
7672 - if (p->processor == this_cpu)
7673 - weight += PROC_CHANGE_PENALTY;
7676 - /* .. and a slight advantage to the current MM */
7677 - if (p->mm == this_mm || !p->mm)
7679 - weight += 20 - p->nice;
7683 + spin_lock_irqsave(&rq->lock, *flags);
7684 + if (unlikely(rq != task_rq(p))) {
7685 + spin_unlock_irqrestore(&rq->lock, *flags);
7686 + goto repeat_lock_task;
7692 - * Realtime process, select the first one on the
7693 - * runqueue (taking priorities within processes
7696 - weight = 1000 + p->rt_priority;
7699 +static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
7701 + spin_unlock_irqrestore(&rq->lock, *flags);
7705 - * the 'goodness value' of replacing a process on a given CPU.
7706 - * positive value means 'replace', zero or negative means 'dont'.
7707 + * Adding/removing a task to/from a priority array:
7709 -static inline int preemption_goodness(struct task_struct * prev, struct task_struct * p, int cpu)
7710 +static inline void dequeue_task(struct task_struct *p, prio_array_t *array)
7712 - return goodness(p, cpu, prev->active_mm) - goodness(prev, cpu, prev->active_mm);
7713 + array->nr_active--;
7714 + list_del(&p->run_list);
7715 + if (list_empty(array->queue + p->prio))
7716 + __clear_bit(p->prio, array->bitmap);
7720 - * This is ugly, but reschedule_idle() is very timing-critical.
7721 - * We are called with the runqueue spinlock held and we must
7722 - * not claim the tasklist_lock.
7724 -static FASTCALL(void reschedule_idle(struct task_struct * p));
7725 +#define enqueue_task(p, array) __enqueue_task(p, array, NULL)
7726 +static inline void __enqueue_task(struct task_struct *p, prio_array_t *array, task_t * parent)
7729 + list_add_tail(&p->run_list, array->queue + p->prio);
7730 + __set_bit(p->prio, array->bitmap);
7733 + list_add_tail(&p->run_list, &parent->run_list);
7734 + array = p->array = parent->array;
7736 + array->nr_active++;
7739 -static void reschedule_idle(struct task_struct * p)
7740 +static inline int effective_prio(task_t *p)
7743 - int this_cpu = smp_processor_id();
7744 - struct task_struct *tsk, *target_tsk;
7745 - int cpu, best_cpu, i, max_prio;
7746 - cycles_t oldest_idle;
7750 - * shortcut if the woken up task's last CPU is
7752 + * Here we scale the actual sleep average [0 .... MAX_SLEEP_AVG]
7753 + * into the -5 ... 0 ... +5 bonus/penalty range.
7755 + * We use 25% of the full 0...39 priority range so that:
7757 + * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
7758 + * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
7760 + * Both properties are important to certain workloads.
7762 - best_cpu = p->processor;
7763 - if (can_schedule(p, best_cpu)) {
7764 - tsk = idle_task(best_cpu);
7765 - if (cpu_curr(best_cpu) == tsk) {
7769 - * If need_resched == -1 then we can skip sending
7770 - * the IPI altogether, tsk->need_resched is
7771 - * actively watched by the idle thread.
7773 - need_resched = tsk->need_resched;
7774 - tsk->need_resched = 1;
7775 - if ((best_cpu != this_cpu) && !need_resched)
7776 - smp_send_reschedule(best_cpu);
7780 + bonus = MAX_USER_PRIO*PRIO_BONUS_RATIO*p->sleep_avg/MAX_SLEEP_AVG/100 -
7781 + MAX_USER_PRIO*PRIO_BONUS_RATIO/100/2;
7784 - * We know that the preferred CPU has a cache-affine current
7785 - * process, lets try to find a new idle CPU for the woken-up
7786 - * process. Select the least recently active idle CPU. (that
7787 - * one will have the least active cache context.) Also find
7788 - * the executing process which has the least priority.
7790 - oldest_idle = (cycles_t) -1;
7791 - target_tsk = NULL;
7793 + prio = p->static_prio - bonus;
7794 + if (prio < MAX_RT_PRIO)
7795 + prio = MAX_RT_PRIO;
7796 + if (prio > MAX_PRIO-1)
7797 + prio = MAX_PRIO-1;
7801 - for (i = 0; i < smp_num_cpus; i++) {
7802 - cpu = cpu_logical_map(i);
7803 - if (!can_schedule(p, cpu))
7805 - tsk = cpu_curr(cpu);
7806 +#define activate_task(p, rq) __activate_task(p, rq, NULL)
7807 +static inline void __activate_task(task_t *p, runqueue_t *rq, task_t * parent)
7809 + unsigned long sleep_time = jiffies - p->sleep_timestamp;
7810 + prio_array_t *array = rq->active;
7812 + if (!parent && !rt_task(p) && sleep_time) {
7814 - * We use the first available idle CPU. This creates
7815 - * a priority list between idle CPUs, but this is not
7817 + * This code gives a bonus to interactive tasks. We update
7818 + * an 'average sleep time' value here, based on
7819 + * sleep_timestamp. The more time a task spends sleeping,
7820 + * the higher the average gets - and the higher the priority
7821 + * boost gets as well.
7823 - if (tsk == idle_task(cpu)) {
7824 -#if defined(__i386__) && defined(CONFIG_SMP)
7826 - * Check if two siblings are idle in the same
7827 - * physical package. Use them if found.
7829 - if (smp_num_siblings == 2) {
7830 - if (cpu_curr(cpu_sibling_map[cpu]) ==
7831 - idle_task(cpu_sibling_map[cpu])) {
7832 - oldest_idle = last_schedule(cpu);
7839 - if (last_schedule(cpu) < oldest_idle) {
7840 - oldest_idle = last_schedule(cpu);
7844 - if (oldest_idle == (cycles_t)-1) {
7845 - int prio = preemption_goodness(tsk, p, cpu);
7847 - if (prio > max_prio) {
7856 - if (oldest_idle != (cycles_t)-1) {
7857 - best_cpu = tsk->processor;
7858 - goto send_now_idle;
7860 - tsk->need_resched = 1;
7861 - if (tsk->processor != this_cpu)
7862 - smp_send_reschedule(tsk->processor);
7863 + p->sleep_timestamp = jiffies;
7864 + p->sleep_avg += sleep_time;
7865 + if (p->sleep_avg > MAX_SLEEP_AVG)
7866 + p->sleep_avg = MAX_SLEEP_AVG;
7867 + p->prio = effective_prio(p);
7871 + __enqueue_task(p, array, parent);
7876 - int this_cpu = smp_processor_id();
7877 - struct task_struct *tsk;
7878 +static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
7881 + if (p->state == TASK_UNINTERRUPTIBLE)
7882 + rq->nr_uninterruptible++;
7883 + dequeue_task(p, p->array);
7887 +static inline void resched_task(task_t *p)
7892 - tsk = cpu_curr(this_cpu);
7893 - if (preemption_goodness(tsk, p, this_cpu) > 0)
7894 - tsk->need_resched = 1;
7895 + need_resched = p->need_resched;
7896 + set_tsk_need_resched(p);
7897 + if (!need_resched && (p->cpu != smp_processor_id()))
7898 + smp_send_reschedule(p->cpu);
7900 + set_tsk_need_resched(p);
7909 - * This has to add the process to the _end_ of the
7910 - * run-queue, not the beginning. The goodness value will
7911 - * determine whether this process will run next. This is
7912 - * important to get SCHED_FIFO and SCHED_RR right, where
7913 - * a process that is either pre-empted or its time slice
7914 - * has expired, should be moved to the tail of the run
7915 - * queue for its priority - Bhavesh Davda
7916 + * Wait for a process to unschedule. This is used by the exit() and
7919 -static inline void add_to_runqueue(struct task_struct * p)
7920 +void wait_task_inactive(task_t * p)
7922 - list_add_tail(&p->run_list, &runqueue_head);
7924 + unsigned long flags;
7929 + if (unlikely(rq->curr == p)) {
7934 + rq = task_rq_lock(p, &flags);
7935 + if (unlikely(rq->curr == p)) {
7936 + task_rq_unlock(rq, &flags);
7939 + task_rq_unlock(rq, &flags);
7942 -static inline void move_last_runqueue(struct task_struct * p)
7944 + * Kick the remote CPU if the task is running currently,
7945 + * this code is used by the signal code to signal tasks
7946 + * which are in user-mode as quickly as possible.
7948 + * (Note that we do this lockless - if the task does anything
7949 + * while the message is in flight then it will notice the
7950 + * sigpending condition anyway.)
7952 +void kick_if_running(task_t * p)
7954 - list_del(&p->run_list);
7955 - list_add_tail(&p->run_list, &runqueue_head);
7956 + if (p == task_rq(p)->curr && p->cpu != smp_processor_id())
7962 +static int FASTCALL(reschedule_idle(task_t * p));
7963 +static void FASTCALL(load_balance(runqueue_t *this_rq, int idle));
7968 * Wake up a process. Put it on the run-queue if it's not
7969 @@ -345,429 +338,721 @@
7970 * progress), and as such you're allowed to do the simpler
7971 * "current->state = TASK_RUNNING" to mark yourself runnable
7972 * without the overhead of this.
7974 + * returns failure only if the task is already active.
7976 -static inline int try_to_wake_up(struct task_struct * p, int synchronous)
7977 +static int try_to_wake_up(task_t * p, int sync)
7979 unsigned long flags;
7984 + int migrated_to_idle = 0;
7990 + rq = task_rq_lock(p, &flags);
7991 + old_state = p->state;
7994 + if (likely(rq->curr != p)) {
7996 + if (unlikely(sync)) {
7997 + if (p->cpu != smp_processor_id() &&
7998 + p->cpus_allowed & (1UL << smp_processor_id())) {
7999 + p->cpu = smp_processor_id();
8000 + goto migrated_task;
8003 + if (reschedule_idle(p))
8004 + goto migrated_task;
8008 + if (old_state == TASK_UNINTERRUPTIBLE)
8009 + rq->nr_uninterruptible--;
8010 + activate_task(p, rq);
8011 + if (p->prio < rq->curr->prio)
8012 + resched_task(rq->curr);
8015 + p->state = TASK_RUNNING;
8019 - * We want the common case fall through straight, thus the goto.
8020 + * Subtle: we can load_balance only here (before unlock)
8021 + * because it can internally drop the lock. Claim
8022 + * that the cpu is running so it will be a light rebalance,
8023 + * if this cpu will go idle soon schedule() will trigger the
8024 + * idle rescheduling balancing by itself.
8026 - spin_lock_irqsave(&runqueue_lock, flags);
8027 - p->state = TASK_RUNNING;
8028 - if (task_on_runqueue(p))
8030 - add_to_runqueue(p);
8031 - if (!synchronous || !(p->cpus_allowed & (1UL << smp_processor_id())))
8032 - reschedule_idle(p);
8035 - spin_unlock_irqrestore(&runqueue_lock, flags);
8036 + if (success && migrated_to_idle)
8037 + load_balance(rq, 0);
8040 + task_rq_unlock(rq, &flags);
8046 + task_rq_unlock(rq, &flags);
8047 + migrated_to_idle = 1;
8048 + goto repeat_lock_task;
8052 -inline int wake_up_process(struct task_struct * p)
8053 +int wake_up_process(task_t * p)
8055 return try_to_wake_up(p, 0);
8058 -static void process_timeout(unsigned long __data)
8059 +void wake_up_forked_process(task_t * p)
8061 - struct task_struct * p = (struct task_struct *) __data;
8063 + task_t * parent = current;
8065 - wake_up_process(p);
8068 + spin_lock_irq(&rq->lock);
8071 - * schedule_timeout - sleep until timeout
8072 - * @timeout: timeout value in jiffies
8074 - * Make the current task sleep until @timeout jiffies have
8075 - * elapsed. The routine will return immediately unless
8076 - * the current task state has been set (see set_current_state()).
8078 - * You can set the task state as follows -
8080 - * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
8081 - * pass before the routine returns. The routine will return 0
8083 - * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
8084 - * delivered to the current task. In this case the remaining time
8085 - * in jiffies will be returned, or 0 if the timer expired in time
8087 - * The current task state is guaranteed to be TASK_RUNNING when this
8088 - * routine returns.
8090 - * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
8091 - * the CPU away without a bound on the timeout. In this case the return
8092 - * value will be %MAX_SCHEDULE_TIMEOUT.
8094 - * In all cases the return value is guaranteed to be non-negative.
8096 -signed long schedule_timeout(signed long timeout)
8098 - struct timer_list timer;
8099 - unsigned long expire;
8100 + p->state = TASK_RUNNING;
8101 + if (likely(!rt_task(p) && parent->array)) {
8103 + * We decrease the sleep average of forked
8104 + * children, to keep max-interactive tasks
8105 + * from forking tasks that are max-interactive.
8106 + * CHILD_PENALTY is set to 50% since we have
8107 + * no clue if this is still an interactive
8108 + * task like the parent or if this will be a
8109 + * cpu bound task. The parent isn't touched
8110 + * as we don't make assumption about the parent
8111 + * changing behaviour after the child is forked.
8113 + parent->sleep_avg = parent->sleep_avg * PARENT_PENALTY / 100;
8114 + p->sleep_avg = p->sleep_avg * CHILD_PENALTY / 100;
8118 - case MAX_SCHEDULE_TIMEOUT:
8120 - * These two special cases are useful to be comfortable
8121 - * in the caller. Nothing more. We could take
8122 - * MAX_SCHEDULE_TIMEOUT from one of the negative value
8123 - * but I' d like to return a valid offset (>=0) to allow
8124 - * the caller to do everything it want with the retval.
8125 + * For its first schedule keep the child at the same
8126 + * priority (i.e. in the same list) of the parent,
8127 + * activate_forked_task() will take care to put the
8128 + * child in front of the parent (lifo) to guarantee a
8129 + * schedule-child-first behaviour after fork.
8134 + p->prio = parent->prio;
8137 - * Another bit of PARANOID. Note that the retval will be
8138 - * 0 since no piece of kernel is supposed to do a check
8139 - * for a negative retval of schedule_timeout() (since it
8140 - * should never happens anyway). You just have the printk()
8141 - * that will tell you if something is gone wrong and where.
8142 + * Take the usual wakeup path if it's RT or if
8143 + * it's a child of the first idle task (during boot
8148 - printk(KERN_ERR "schedule_timeout: wrong timeout "
8149 - "value %lx from %p\n", timeout,
8150 - __builtin_return_address(0));
8151 - current->state = TASK_RUNNING;
8154 + p->prio = effective_prio(p);
8158 - expire = timeout + jiffies;
8159 + p->cpu = smp_processor_id();
8160 + __activate_task(p, rq, parent);
8161 + spin_unlock_irq(&rq->lock);
8164 - init_timer(&timer);
8165 - timer.expires = expire;
8166 - timer.data = (unsigned long) current;
8167 - timer.function = process_timeout;
8169 + * Potentially available exiting-child timeslices are
8170 + * retrieved here - this way the parent does not get
8171 + * penalized for creating too many processes.
8173 + * (this cannot be used to 'generate' timeslices
8174 + * artificially, because any timeslice recovered here
8175 + * was given away by the parent in the first place.)
8177 +void sched_exit(task_t * p)
8180 + if (p->first_time_slice) {
8181 + current->time_slice += p->time_slice;
8182 + if (unlikely(current->time_slice > MAX_TIMESLICE))
8183 + current->time_slice = MAX_TIMESLICE;
8188 - add_timer(&timer);
8190 - del_timer_sync(&timer);
8192 +asmlinkage void schedule_tail(task_t *prev)
8194 + finish_arch_switch(this_rq(), prev);
8198 +static inline task_t * context_switch(task_t *prev, task_t *next)
8200 + struct mm_struct *mm = next->mm;
8201 + struct mm_struct *oldmm = prev->active_mm;
8203 - timeout = expire - jiffies;
8204 + if (unlikely(!mm)) {
8205 + next->active_mm = oldmm;
8206 + atomic_inc(&oldmm->mm_count);
8207 + enter_lazy_tlb(oldmm, next, smp_processor_id());
8209 + switch_mm(oldmm, mm, next, smp_processor_id());
8212 - return timeout < 0 ? 0 : timeout;
8213 + if (unlikely(!prev->mm)) {
8214 + prev->active_mm = NULL;
8218 + /* Here we just switch the register state and the stack. */
8219 + switch_to(prev, next, prev);
8225 - * schedule_tail() is getting called from the fork return path. This
8226 - * cleans up all remaining scheduler things, without impacting the
8229 -static inline void __schedule_tail(struct task_struct *prev)
8230 +unsigned long nr_running(void)
8234 + unsigned long i, sum = 0;
8237 - * prev->policy can be written from here only before `prev'
8238 - * can be scheduled (before setting prev->cpus_runnable to ~0UL).
8239 - * Of course it must also be read before allowing prev
8240 - * to be rescheduled, but since the write depends on the read
8241 - * to complete, wmb() is enough. (the spin_lock() acquired
8242 - * before setting cpus_runnable is not enough because the spin_lock()
8243 - * common code semantics allows code outside the critical section
8244 - * to enter inside the critical section)
8246 - policy = prev->policy;
8247 - prev->policy = policy & ~SCHED_YIELD;
8249 + for (i = 0; i < smp_num_cpus; i++)
8250 + sum += cpu_rq(cpu_logical_map(i))->nr_running;
8253 - * fast path falls through. We have to clear cpus_runnable before
8254 - * checking prev->state to avoid a wakeup race. Protect against
8255 - * the task exiting early.
8258 - task_release_cpu(prev);
8260 - if (prev->state == TASK_RUNNING)
8261 - goto needs_resched;
8266 - task_unlock(prev); /* Synchronise here with release_task() if prev is TASK_ZOMBIE */
8268 +/* Note: the per-cpu information is useful only to get the cumulative result */
8269 +unsigned long nr_uninterruptible(void)
8271 + unsigned long i, sum = 0;
8274 - * Slow path - we 'push' the previous process and
8275 - * reschedule_idle() will attempt to find a new
8276 - * processor for it. (but it might preempt the
8277 - * current process as well.) We must take the runqueue
8278 - * lock and re-check prev->state to be correct. It might
8279 - * still happen that this process has a preemption
8280 - * 'in progress' already - but this is not a problem and
8281 - * might happen in other circumstances as well.
8285 - unsigned long flags;
8286 + for (i = 0; i < smp_num_cpus; i++)
8287 + sum += cpu_rq(cpu_logical_map(i))->nr_uninterruptible;
8290 - * Avoid taking the runqueue lock in cases where
8291 - * no preemption-check is necessery:
8293 - if ((prev == idle_task(smp_processor_id())) ||
8294 - (policy & SCHED_YIELD))
8299 - spin_lock_irqsave(&runqueue_lock, flags);
8300 - if ((prev->state == TASK_RUNNING) && !task_has_cpu(prev))
8301 - reschedule_idle(prev);
8302 - spin_unlock_irqrestore(&runqueue_lock, flags);
8306 - prev->policy &= ~SCHED_YIELD;
8307 -#endif /* CONFIG_SMP */
8308 +unsigned long nr_context_switches(void)
8310 + unsigned long i, sum = 0;
8312 + for (i = 0; i < smp_num_cpus; i++)
8313 + sum += cpu_rq(cpu_logical_map(i))->nr_switches;
8318 -asmlinkage void schedule_tail(struct task_struct *prev)
8319 +inline int idle_cpu(int cpu)
8321 - __schedule_tail(prev);
8322 + return cpu_curr(cpu) == cpu_rq(cpu)->idle;
8327 - * 'schedule()' is the scheduler function. It's a very simple and nice
8328 - * scheduler: it's not perfect, but certainly works for most things.
8330 - * The goto is "interesting".
8332 - * NOTE!! Task 0 is the 'idle' task, which gets called when no other
8333 - * tasks can run. It can not be killed, and it cannot sleep. The 'state'
8334 - * information in task[0] is never used.
8335 + * Lock the busiest runqueue as well, this_rq is locked already.
8336 + * Recalculate nr_running if we have to drop the runqueue lock.
8338 -asmlinkage void schedule(void)
8339 +static inline unsigned int double_lock_balance(runqueue_t *this_rq,
8340 + runqueue_t *busiest, int this_cpu, int idle, unsigned int nr_running)
8342 - struct schedule_data * sched_data;
8343 - struct task_struct *prev, *next, *p;
8344 - struct list_head *tmp;
8346 + if (unlikely(!spin_trylock(&busiest->lock))) {
8347 + if (busiest < this_rq) {
8348 + spin_unlock(&this_rq->lock);
8349 + spin_lock(&busiest->lock);
8350 + spin_lock(&this_rq->lock);
8351 + /* Need to recalculate nr_running */
8352 + if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu]))
8353 + nr_running = this_rq->nr_running;
8355 + nr_running = this_rq->prev_nr_running[this_cpu];
8357 + spin_lock(&busiest->lock);
8359 + return nr_running;
8363 + * Move a task from a remote runqueue to the local runqueue.
8364 + * Both runqueues must be locked.
8366 +static inline int pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, int this_cpu)
8370 - spin_lock_prefetch(&runqueue_lock);
8371 + dequeue_task(p, src_array);
8372 + src_rq->nr_running--;
8373 + p->cpu = this_cpu;
8374 + this_rq->nr_running++;
8375 + enqueue_task(p, this_rq->active);
8377 + * Note that idle threads have a prio of MAX_PRIO, for this test
8378 + * to be always true for them.
8380 + if (p->prio < this_rq->curr->prio)
8383 - BUG_ON(!current->active_mm);
8386 - this_cpu = prev->processor;
8390 - if (unlikely(in_interrupt())) {
8391 - printk("Scheduling in interrupt\n");
8393 +static inline int idle_cpu_reschedule(task_t * p, int cpu)
8395 + if (unlikely(!(p->cpus_allowed & (1UL << cpu))))
8397 + return idle_cpu(cpu);
8400 +#include <linux/smp_balance.h>
8402 +static int reschedule_idle(task_t * p)
8404 + int p_cpu = p->cpu, i;
8406 + if (idle_cpu(p_cpu))
8409 + p_cpu = cpu_number_map(p_cpu);
8411 + for (i = (p_cpu + 1) % smp_num_cpus;
8413 + i = (i + 1) % smp_num_cpus) {
8414 + int physical = cpu_logical_map(i);
8416 + if (idle_cpu_reschedule(p, physical)) {
8417 + physical = arch_reschedule_idle_override(p, physical);
8418 + p->cpu = physical;
8423 - release_kernel_lock(prev, this_cpu);
8428 + * Current runqueue is empty, or rebalance tick: if there is an
8429 + * inbalance (current runqueue is too short) then pull from
8430 + * busiest runqueue(s).
8432 + * We call this with the current runqueue locked,
8435 +static void load_balance(runqueue_t *this_rq, int idle)
8437 + int imbalance, nr_running, load, max_load,
8438 + idx, i, this_cpu = this_rq - runqueues;
8440 + runqueue_t *busiest, *rq_src;
8441 + prio_array_t *array;
8442 + struct list_head *head, *curr;
8446 - * 'sched_data' is protected by the fact that we can run
8447 - * only one process per CPU.
8448 + * Handle architecture-specific balancing, such as hyperthreading.
8450 - sched_data = & aligned_data[this_cpu].schedule_data;
8451 + if (arch_load_balance(this_cpu, idle))
8454 - spin_lock_irq(&runqueue_lock);
8457 + * We search all runqueues to find the most busy one.
8458 + * We do this lockless to reduce cache-bouncing overhead,
8459 + * we re-check the 'best' source CPU later on again, with
8462 + * We fend off statistical fluctuations in runqueue lengths by
8463 + * saving the runqueue length during the previous load-balancing
8464 + * operation and using the smaller one the current and saved lengths.
8465 + * If a runqueue is long enough for a longer amount of time then
8466 + * we recognize it and pull tasks from it.
8468 + * The 'current runqueue length' is a statistical maximum variable,
8469 + * for that one we take the longer one - to avoid fluctuations in
8470 + * the other direction. So for a load-balance to happen it needs
8471 + * stable long runqueue on the target CPU and stable short runqueue
8472 + * on the local runqueue.
8474 + * We make an exception if this CPU is about to become idle - in
8475 + * that case we are less picky about moving a task across CPUs and
8476 + * take what can be taken.
8478 + if (idle || (this_rq->nr_running > this_rq->prev_nr_running[this_cpu]))
8479 + nr_running = this_rq->nr_running;
8481 + nr_running = this_rq->prev_nr_running[this_cpu];
8483 - /* move an exhausted RR process to be last.. */
8484 - if (unlikely(prev->policy == SCHED_RR))
8485 - if (!prev->counter) {
8486 - prev->counter = NICE_TO_TICKS(prev->nice);
8487 - move_last_runqueue(prev);
8491 + for (i = 0; i < smp_num_cpus; i++) {
8492 + int logical = cpu_logical_map(i);
8494 - switch (prev->state) {
8495 - case TASK_INTERRUPTIBLE:
8496 - if (signal_pending(prev)) {
8497 - prev->state = TASK_RUNNING;
8501 - del_from_runqueue(prev);
8502 - case TASK_RUNNING:;
8503 + rq_src = cpu_rq(logical);
8504 + if (idle || (rq_src->nr_running < this_rq->prev_nr_running[logical]))
8505 + load = rq_src->nr_running;
8507 + load = this_rq->prev_nr_running[logical];
8508 + this_rq->prev_nr_running[logical] = rq_src->nr_running;
8510 + if ((load > max_load) && (rq_src != this_rq)) {
8515 - prev->need_resched = 0;
8517 + if (likely(!busiest))
8520 + imbalance = (max_load - nr_running) / 2;
8522 + /* It needs an at least ~25% imbalance to trigger balancing. */
8523 + if (!idle && (imbalance < (max_load + 3)/4))
8527 - * this is the scheduler proper:
8528 + * Make sure nothing significant changed since we checked the
8529 + * runqueue length.
8531 + if (double_lock_balance(this_rq, busiest, this_cpu, idle, nr_running) > nr_running ||
8532 + busiest->nr_running < max_load)
8533 + goto out_unlock_retry;
8537 - * Default process to select..
8538 + * We first consider expired tasks. Those will likely not be
8539 + * executed in the near future, and they are most likely to
8540 + * be cache-cold, thus switching CPUs has the least effect
8543 - next = idle_task(this_cpu);
8545 - list_for_each(tmp, &runqueue_head) {
8546 - p = list_entry(tmp, struct task_struct, run_list);
8547 - if (can_schedule(p, this_cpu)) {
8548 - int weight = goodness(p, this_cpu, prev->active_mm);
8550 - c = weight, next = p;
8551 + if (busiest->expired->nr_active)
8552 + array = busiest->expired;
8554 + array = busiest->active;
8558 + /* Start searching at priority 0: */
8562 + idx = sched_find_first_bit(array->bitmap);
8564 + idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
8565 + if (idx == MAX_PRIO) {
8566 + if (array == busiest->expired) {
8567 + array = busiest->active;
8573 - /* Do we need to re-calculate counters? */
8574 - if (unlikely(!c)) {
8575 - struct task_struct *p;
8577 - spin_unlock_irq(&runqueue_lock);
8578 - read_lock(&tasklist_lock);
8580 - p->counter = (p->counter >> 1) + NICE_TO_TICKS(p->nice);
8581 - read_unlock(&tasklist_lock);
8582 - spin_lock_irq(&runqueue_lock);
8583 - goto repeat_schedule;
8584 + head = array->queue + idx;
8585 + curr = head->prev;
8587 + tmp = list_entry(curr, task_t, run_list);
8590 + * We do not migrate tasks that are:
8591 + * 1) running (obviously), or
8592 + * 2) cannot be migrated to this CPU due to cpus_allowed, or
8593 + * 3) are cache-hot on their current CPU.
8596 +#define CAN_MIGRATE_TASK(p,rq,this_cpu) \
8597 + ((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \
8598 + ((p) != (rq)->curr) && \
8599 + ((p)->cpus_allowed & (1UL << (this_cpu))))
8601 + curr = curr->prev;
8603 + if (!CAN_MIGRATE_TASK(tmp, busiest, this_cpu)) {
8609 + resched |= pull_task(busiest, array, tmp, this_rq, this_cpu);
8610 + if (--imbalance > 0) {
8617 + spin_unlock(&busiest->lock);
8619 + resched_task(this_rq->curr);
8622 + spin_unlock(&busiest->lock);
8627 - * from this point on nothing can prevent us from
8628 - * switching to the next task, save this fact in
8631 - sched_data->curr = next;
8632 - task_set_cpu(next, this_cpu);
8633 - spin_unlock_irq(&runqueue_lock);
8635 - if (unlikely(prev == next)) {
8636 - /* We won't go through the normal tail, so do this by hand */
8637 - prev->policy &= ~SCHED_YIELD;
8638 - goto same_process;
8640 + * One of the idle_cpu_tick() or the busy_cpu_tick() function will
8641 + * gets called every timer tick, on every CPU. Our balancing action
8642 + * frequency and balancing agressivity depends on whether the CPU is
8645 + * busy-rebalance every 250 msecs. idle-rebalance every 100 msec.
8647 +#define BUSY_REBALANCE_TICK (HZ/4 ?: 1)
8648 +#define IDLE_REBALANCE_TICK (HZ/10 ?: 1)
8650 +static inline void idle_tick(void)
8652 + if (unlikely(time_before_eq(this_rq()->last_jiffy + IDLE_REBALANCE_TICK, jiffies))) {
8653 + spin_lock(&this_rq()->lock);
8654 + load_balance(this_rq(), 1);
8655 + spin_unlock(&this_rq()->lock);
8656 + this_rq()->last_jiffy = jiffies;
8662 - * maintain the per-process 'last schedule' value.
8663 - * (this has to be recalculated even if we reschedule to
8664 - * the same process) Currently this is only used on SMP,
8665 - * and it's approximate, so we do not have to maintain
8666 - * it while holding the runqueue spinlock.
8668 - sched_data->last_schedule = get_cycles();
8672 - * We drop the scheduler lock early (it's a global spinlock),
8673 - * thus we have to lock the previous process from getting
8674 - * rescheduled during switch_to().
8677 + * We place interactive tasks back into the active array, if possible.
8679 + * To guarantee that this does not starve expired tasks we ignore the
8680 + * interactivity of a task if the first expired task had to wait more
8681 + * than a 'reasonable' amount of time. This deadline timeout is
8682 + * load-dependent, as the frequency of array switched decreases with
8683 + * increasing number of running tasks:
8685 +#define EXPIRED_STARVING(rq) \
8686 + ((rq)->expired_timestamp && \
8687 + (jiffies - (rq)->expired_timestamp >= \
8688 + STARVATION_LIMIT * ((rq)->nr_running) + 1))
8690 -#endif /* CONFIG_SMP */
8692 + * This function gets called by the timer code, with HZ frequency.
8693 + * We call it with interrupts disabled.
8695 +void scheduler_tick(int user_tick, int system)
8697 + int cpu = smp_processor_id();
8698 + runqueue_t *rq = this_rq();
8699 + task_t *p = current;
8701 - kstat.context_swtch++;
8703 - * there are 3 processes which are affected by a context switch:
8705 - * prev == .... ==> (last => next)
8707 - * It's the 'much more previous' 'prev' that is on next's stack,
8708 - * but prev is set to (the just run) 'last' process by switch_to().
8709 - * This might sound slightly confusing but makes tons of sense.
8711 - prepare_to_switch();
8713 - struct mm_struct *mm = next->mm;
8714 - struct mm_struct *oldmm = prev->active_mm;
8716 - BUG_ON(next->active_mm);
8717 - next->active_mm = oldmm;
8718 - atomic_inc(&oldmm->mm_count);
8719 - enter_lazy_tlb(oldmm, next, this_cpu);
8721 - BUG_ON(next->active_mm != mm);
8722 - switch_mm(oldmm, mm, next, this_cpu);
8723 + if (p == rq->idle) {
8724 + if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
8725 + kstat.per_cpu_system[cpu] += system;
8731 + if (TASK_NICE(p) > 0)
8732 + kstat.per_cpu_nice[cpu] += user_tick;
8734 + kstat.per_cpu_user[cpu] += user_tick;
8735 + kstat.per_cpu_system[cpu] += system;
8737 + /* Task might have expired already, but not scheduled off yet */
8738 + if (p->array != rq->active) {
8739 + set_tsk_need_resched(p);
8742 + spin_lock(&rq->lock);
8743 + if (unlikely(rt_task(p))) {
8745 + * RR tasks need a special form of timeslice management.
8746 + * FIFO tasks have no timeslices.
8748 + if ((p->policy == SCHED_RR) && !--p->time_slice) {
8749 + p->time_slice = TASK_TIMESLICE(p);
8750 + p->first_time_slice = 0;
8751 + set_tsk_need_resched(p);
8753 + /* put it at the end of the queue: */
8754 + dequeue_task(p, rq->active);
8755 + enqueue_task(p, rq->active);
8760 + * The task was running during this tick - update the
8761 + * time slice counter and the sleep average. Note: we
8762 + * do not update a process's priority until it either
8763 + * goes to sleep or uses up its timeslice. This makes
8764 + * it possible for interactive tasks to use up their
8765 + * timeslices at their highest priority levels.
8769 + if (!--p->time_slice) {
8770 + dequeue_task(p, rq->active);
8771 + set_tsk_need_resched(p);
8772 + p->prio = effective_prio(p);
8773 + p->time_slice = TASK_TIMESLICE(p);
8774 + p->first_time_slice = 0;
8776 + if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
8777 + if (!rq->expired_timestamp)
8778 + rq->expired_timestamp = jiffies;
8779 + enqueue_task(p, rq->expired);
8781 + enqueue_task(p, rq->active);
8785 + if (unlikely(time_before_eq(this_rq()->last_jiffy + BUSY_REBALANCE_TICK, jiffies))) {
8786 + load_balance(rq, 0);
8787 + rq->last_jiffy = jiffies;
8790 + spin_unlock(&rq->lock);
8793 +void scheduling_functions_start_here(void) { }
8796 + * 'schedule()' is the main scheduler function.
8798 +asmlinkage void schedule(void)
8800 + task_t *prev, *next;
8802 + prio_array_t *array;
8803 + struct list_head *queue;
8806 + if (unlikely(in_interrupt()))
8810 - prev->active_mm = NULL;
8816 + release_kernel_lock(prev, smp_processor_id());
8817 + prev->sleep_timestamp = jiffies;
8818 + spin_lock_irq(&rq->lock);
8820 + switch (prev->state) {
8821 + case TASK_INTERRUPTIBLE:
8822 + if (unlikely(signal_pending(prev))) {
8823 + prev->state = TASK_RUNNING;
8827 + deactivate_task(prev, rq);
8828 + case TASK_RUNNING:
8834 + if (unlikely(!rq->nr_running)) {
8836 + load_balance(rq, 2);
8837 + rq->last_jiffy = jiffies;
8838 + if (rq->nr_running)
8839 + goto pick_next_task;
8842 + rq->expired_timestamp = 0;
8843 + goto switch_tasks;
8847 - * This just switches the register state and the
8850 - switch_to(prev, next, prev);
8851 - __schedule_tail(prev);
8852 + array = rq->active;
8853 + if (unlikely(!array->nr_active)) {
8855 + * Switch the active and expired arrays.
8857 + rq->active = rq->expired;
8858 + rq->expired = array;
8859 + array = rq->active;
8860 + rq->expired_timestamp = 0;
8863 + idx = sched_find_first_bit(array->bitmap);
8864 + queue = array->queue + idx;
8865 + next = list_entry(queue->next, task_t, run_list);
8869 + clear_tsk_need_resched(prev);
8871 + if (likely(prev != next)) {
8872 + rq->nr_switches++;
8875 + prepare_arch_switch(rq, next);
8876 + prev = context_switch(prev, next);
8879 + finish_arch_switch(rq, prev);
8881 + spin_unlock_irq(&rq->lock);
8884 reacquire_kernel_lock(current);
8885 - if (current->need_resched)
8886 - goto need_resched_back;
8888 + if (need_resched())
8889 + goto need_resched;
8893 - * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just wake everything
8894 - * up. If it's an exclusive wakeup (nr_exclusive == small +ve number) then we wake all the
8895 - * non-exclusive tasks and one exclusive task.
8896 + * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
8897 + * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
8898 + * number) then we wake all the non-exclusive tasks and one exclusive task.
8900 * There are circumstances in which we can try to wake a task which has already
8901 - * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns zero
8902 - * in this (rare) case, and we handle it by contonuing to scan the queue.
8903 + * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
8904 + * zero in this (rare) case, and we handle it by continuing to scan the queue.
8906 -static inline void __wake_up_common (wait_queue_head_t *q, unsigned int mode,
8907 - int nr_exclusive, const int sync)
8908 +static inline void __wake_up_common(wait_queue_head_t *q, unsigned int mode, int nr_exclusive, int sync)
8910 struct list_head *tmp;
8911 - struct task_struct *p;
8913 - CHECK_MAGIC_WQHEAD(q);
8914 - WQ_CHECK_LIST_HEAD(&q->task_list);
8916 - list_for_each(tmp,&q->task_list) {
8917 - unsigned int state;
8918 - wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
8919 + unsigned int state;
8920 + wait_queue_t *curr;
8923 - CHECK_MAGIC(curr->__magic);
8924 + list_for_each(tmp, &q->task_list) {
8925 + curr = list_entry(tmp, wait_queue_t, task_list);
8928 - if (state & mode) {
8929 - WQ_NOTE_WAKER(curr);
8930 - if (try_to_wake_up(p, sync) && (curr->flags&WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
8931 + if ((state & mode) && try_to_wake_up(p, sync) &&
8932 + ((curr->flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive))
8938 -void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr)
8939 +void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
8942 - unsigned long flags;
8943 - wq_read_lock_irqsave(&q->lock, flags);
8944 - __wake_up_common(q, mode, nr, 0);
8945 - wq_read_unlock_irqrestore(&q->lock, flags);
8947 + unsigned long flags;
8952 + wq_read_lock_irqsave(&q->lock, flags);
8953 + __wake_up_common(q, mode, nr_exclusive, 0);
8954 + wq_read_unlock_irqrestore(&q->lock, flags);
8957 -void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr)
8960 +void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
8963 - unsigned long flags;
8964 - wq_read_lock_irqsave(&q->lock, flags);
8965 - __wake_up_common(q, mode, nr, 1);
8966 - wq_read_unlock_irqrestore(&q->lock, flags);
8968 + unsigned long flags;
8973 + wq_read_lock_irqsave(&q->lock, flags);
8974 + if (likely(nr_exclusive))
8975 + __wake_up_common(q, mode, nr_exclusive, 1);
8977 + __wake_up_common(q, mode, nr_exclusive, 0);
8978 + wq_read_unlock_irqrestore(&q->lock, flags);
8983 void complete(struct completion *x)
8985 unsigned long flags;
8987 - spin_lock_irqsave(&x->wait.lock, flags);
8988 + wq_write_lock_irqsave(&x->wait.lock, flags);
8990 __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE, 1, 0);
8991 - spin_unlock_irqrestore(&x->wait.lock, flags);
8992 + wq_write_unlock_irqrestore(&x->wait.lock, flags);
8995 void wait_for_completion(struct completion *x)
8997 - spin_lock_irq(&x->wait.lock);
8998 + wq_write_lock_irq(&x->wait.lock);
9000 DECLARE_WAITQUEUE(wait, current);
9002 @@ -775,14 +1060,14 @@
9003 __add_wait_queue_tail(&x->wait, &wait);
9005 __set_current_state(TASK_UNINTERRUPTIBLE);
9006 - spin_unlock_irq(&x->wait.lock);
9007 + wq_write_unlock_irq(&x->wait.lock);
9009 - spin_lock_irq(&x->wait.lock);
9010 + wq_write_lock_irq(&x->wait.lock);
9012 __remove_wait_queue(&x->wait, &wait);
9015 - spin_unlock_irq(&x->wait.lock);
9016 + wq_write_unlock_irq(&x->wait.lock);
9019 #define SLEEP_ON_VAR \
9020 @@ -850,43 +1135,40 @@
9022 void scheduling_functions_end_here(void) { }
9026 - * set_cpus_allowed() - change a given task's processor affinity
9027 - * @p: task to bind
9028 - * @new_mask: bitmask of allowed processors
9030 - * Upon return, the task is running on a legal processor. Note the caller
9031 - * must have a valid reference to the task: it must not exit() prematurely.
9032 - * This call can sleep; do not hold locks on call.
9034 -void set_cpus_allowed(struct task_struct *p, unsigned long new_mask)
9035 +void set_user_nice(task_t *p, long nice)
9037 - new_mask &= cpu_online_map;
9038 - BUG_ON(!new_mask);
9040 - p->cpus_allowed = new_mask;
9041 + unsigned long flags;
9042 + prio_array_t *array;
9045 + if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
9048 - * If the task is on a no-longer-allowed processor, we need to move
9049 - * it. If the task is not current, then set need_resched and send
9050 - * its processor an IPI to reschedule.
9051 + * We have to be careful, if called from sys_setpriority(),
9052 + * the task might be in the middle of scheduling on another CPU.
9054 - if (!(p->cpus_runnable & p->cpus_allowed)) {
9055 - if (p != current) {
9056 - p->need_resched = 1;
9057 - smp_send_reschedule(p->processor);
9059 + rq = task_rq_lock(p, &flags);
9061 + p->static_prio = NICE_TO_PRIO(nice);
9066 + dequeue_task(p, array);
9067 + p->static_prio = NICE_TO_PRIO(nice);
9068 + p->prio = NICE_TO_PRIO(nice);
9070 + enqueue_task(p, array);
9072 - * Wait until we are on a legal processor. If the task is
9073 - * current, then we should be on a legal processor the next
9074 - * time we reschedule. Otherwise, we need to wait for the IPI.
9075 + * If the task is running and lowered its priority,
9076 + * or increased its priority then reschedule its CPU:
9078 - while (!(p->cpus_runnable & p->cpus_allowed))
9080 + if (p == rq->curr)
9081 + resched_task(rq->curr);
9084 + task_rq_unlock(rq, &flags);
9086 -#endif /* CONFIG_SMP */
9090 @@ -898,7 +1180,7 @@
9092 asmlinkage long sys_nice(int increment)
9098 * Setpriority might change our priority at the same moment.
9099 @@ -914,32 +1196,46 @@
9103 - newprio = current->nice + increment;
9104 - if (newprio < -20)
9108 - current->nice = newprio;
9109 + nice = PRIO_TO_NICE(current->static_prio) + increment;
9114 + set_user_nice(current, nice);
9120 -static inline struct task_struct *find_process_by_pid(pid_t pid)
9122 + * This is the priority value as seen by users in /proc
9124 + * RT tasks are offset by -200. Normal tasks are centered
9125 + * around 0, value goes from -16 to +15.
9127 +int task_prio(task_t *p)
9129 - struct task_struct *tsk = current;
9130 + return p->prio - MAX_USER_RT_PRIO;
9134 - tsk = find_task_by_pid(pid);
9136 +int task_nice(task_t *p)
9138 + return TASK_NICE(p);
9141 +static inline task_t *find_process_by_pid(pid_t pid)
9143 + return pid ? find_task_by_pid(pid) : current;
9146 -static int setscheduler(pid_t pid, int policy,
9147 - struct sched_param *param)
9148 +static int setscheduler(pid_t pid, int policy, struct sched_param *param)
9150 struct sched_param lp;
9151 - struct task_struct *p;
9152 + prio_array_t *array;
9153 + unsigned long flags;
9159 if (!param || pid < 0)
9160 @@ -953,14 +1249,19 @@
9161 * We play safe to avoid deadlocks.
9163 read_lock_irq(&tasklist_lock);
9164 - spin_lock(&runqueue_lock);
9166 p = find_process_by_pid(pid);
9172 + goto out_unlock_tasklist;
9175 + * To be able to change p->policy safely, the apropriate
9176 + * runqueue lock must be held.
9178 + rq = task_rq_lock(p, &flags);
9183 @@ -969,40 +1270,48 @@
9184 policy != SCHED_OTHER)
9190 - * Valid priorities for SCHED_FIFO and SCHED_RR are 1..99, valid
9191 - * priority for SCHED_OTHER is 0.
9192 + * Valid priorities for SCHED_FIFO and SCHED_RR are
9193 + * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_OTHER is 0.
9196 - if (lp.sched_priority < 0 || lp.sched_priority > 99)
9197 + if (lp.sched_priority < 0 || lp.sched_priority > MAX_USER_RT_PRIO-1)
9199 if ((policy == SCHED_OTHER) != (lp.sched_priority == 0))
9203 - if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
9204 + if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
9205 !capable(CAP_SYS_NICE))
9207 if ((current->euid != p->euid) && (current->euid != p->uid) &&
9208 !capable(CAP_SYS_NICE))
9213 + deactivate_task(p, task_rq(p));
9216 p->rt_priority = lp.sched_priority;
9218 - current->need_resched = 1;
9219 + if (policy != SCHED_OTHER)
9220 + p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
9222 + p->prio = p->static_prio;
9224 + activate_task(p, task_rq(p));
9227 - spin_unlock(&runqueue_lock);
9228 + task_rq_unlock(rq, &flags);
9229 +out_unlock_tasklist:
9230 read_unlock_irq(&tasklist_lock);
9236 -asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
9237 +asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
9238 struct sched_param *param)
9240 return setscheduler(pid, policy, param);
9241 @@ -1015,7 +1324,7 @@
9243 asmlinkage long sys_sched_getscheduler(pid_t pid)
9245 - struct task_struct *p;
9250 @@ -1026,7 +1335,7 @@
9251 read_lock(&tasklist_lock);
9252 p = find_process_by_pid(pid);
9254 - retval = p->policy & ~SCHED_YIELD;
9255 + retval = p->policy;
9256 read_unlock(&tasklist_lock);
9259 @@ -1035,7 +1344,7 @@
9261 asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param *param)
9263 - struct task_struct *p;
9265 struct sched_param lp;
9268 @@ -1066,42 +1375,64 @@
9270 asmlinkage long sys_sched_yield(void)
9273 - * Trick. sched_yield() first counts the number of truly
9274 - * 'pending' runnable processes, then returns if it's
9275 - * only the current processes. (This test does not have
9276 - * to be atomic.) In threaded applications this optimization
9277 - * gets triggered quite often.
9279 + runqueue_t *rq = this_rq();
9280 + prio_array_t *array;
9283 - int nr_pending = nr_running;
9284 + spin_lock_irq(&rq->lock);
9286 + if (unlikely(rq->nr_running == 1)) {
9287 + spin_unlock_irq(&rq->lock);
9293 + array = current->array;
9294 + if (unlikely(rt_task(current))) {
9295 + list_del(¤t->run_list);
9296 + list_add_tail(¤t->run_list, array->queue + current->prio);
9300 - // Subtract non-idle processes running on other CPUs.
9301 - for (i = 0; i < smp_num_cpus; i++) {
9302 - int cpu = cpu_logical_map(i);
9303 - if (aligned_data[cpu].schedule_data.curr != idle_task(cpu))
9305 + if (unlikely(array == rq->expired) && rq->active->nr_active)
9308 + list_del(¤t->run_list);
9309 + if (!list_empty(array->queue + current->prio)) {
9310 + list_add(¤t->run_list, array->queue[current->prio].next);
9314 - // on UP this process is on the runqueue as well
9319 + __clear_bit(current->prio, array->bitmap);
9320 + if (likely(array == rq->active) && array->nr_active == 1) {
9322 - * This process can only be rescheduled by us,
9323 - * so this is safe without any locking.
9324 + * We're the last task in the active queue so
9325 + * we must move ourself to the expired array
9326 + * to avoid running again immediatly.
9328 - if (current->policy == SCHED_OTHER)
9329 - current->policy |= SCHED_YIELD;
9330 - current->need_resched = 1;
9332 - spin_lock_irq(&runqueue_lock);
9333 - move_last_runqueue(current);
9334 - spin_unlock_irq(&runqueue_lock);
9335 + array->nr_active--;
9336 + array = rq->expired;
9337 + array->nr_active++;
9340 + i = sched_find_first_bit(array->bitmap);
9342 + BUG_ON(i == MAX_PRIO);
9343 + BUG_ON(i == current->prio && array == current->array);
9345 + if (array == current->array && i < current->prio)
9346 + i = current->prio;
9348 + current->array = array;
9349 + current->prio = i;
9351 + list_add(¤t->run_list, array->queue[i].next);
9352 + __set_bit(i, array->bitmap);
9355 + spin_unlock_irq(&rq->lock);
9362 @@ -1113,14 +1444,13 @@
9366 - set_current_state(TASK_RUNNING);
9367 + __set_current_state(TASK_RUNNING);
9372 void __cond_resched(void)
9374 - set_current_state(TASK_RUNNING);
9375 + __set_current_state(TASK_RUNNING);
9379 @@ -1131,7 +1461,7 @@
9384 + ret = MAX_USER_RT_PRIO-1;
9388 @@ -1158,7 +1488,7 @@
9389 asmlinkage long sys_sched_rr_get_interval(pid_t pid, struct timespec *interval)
9392 - struct task_struct *p;
9394 int retval = -EINVAL;
9397 @@ -1168,8 +1498,8 @@
9398 read_lock(&tasklist_lock);
9399 p = find_process_by_pid(pid);
9401 - jiffies_to_timespec(p->policy & SCHED_FIFO ? 0 : NICE_TO_TICKS(p->nice),
9403 + jiffies_to_timespec(p->policy & SCHED_FIFO ?
9404 + 0 : TASK_TIMESLICE(p), &t);
9405 read_unlock(&tasklist_lock);
9407 retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
9408 @@ -1177,14 +1507,14 @@
9412 -static void show_task(struct task_struct * p)
9413 +static void show_task(task_t * p)
9415 unsigned long free = 0;
9417 static const char * stat_nam[] = { "R", "S", "D", "Z", "T", "W" };
9419 printk("%-13.13s ", p->comm);
9420 - state = p->state ? ffz(~p->state) + 1 : 0;
9421 + state = p->state ? __ffs(p->state) + 1 : 0;
9422 if (((unsigned) state) < sizeof(stat_nam)/sizeof(char *))
9423 printk(stat_nam[state]);
9425 @@ -1225,7 +1555,7 @@
9426 printk(" (NOTLB)\n");
9429 - extern void show_trace_task(struct task_struct *tsk);
9430 + extern void show_trace_task(task_t *tsk);
9434 @@ -1247,7 +1577,7 @@
9436 void show_state(void)
9438 - struct task_struct *p;
9441 #if (BITS_PER_LONG == 32)
9443 @@ -1270,128 +1600,280 @@
9444 read_unlock(&tasklist_lock);
9448 - * reparent_to_init() - Reparent the calling kernel thread to the init task.
9450 - * If a kernel thread is launched as a result of a system call, or if
9451 - * it ever exits, it should generally reparent itself to init so that
9452 - * it is correctly cleaned up on exit.
9454 + * double_rq_lock - safely lock two runqueues
9456 - * The various task state such as scheduling policy and priority may have
9457 - * been inherited fro a user process, so we reset them to sane values here.
9458 + * Note this does not disable interrupts like task_rq_lock,
9459 + * you need to do so manually before calling.
9461 +static inline void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
9464 + spin_lock(&rq1->lock);
9467 + spin_lock(&rq1->lock);
9468 + spin_lock(&rq2->lock);
9470 + spin_lock(&rq2->lock);
9471 + spin_lock(&rq1->lock);
9477 + * double_rq_unlock - safely unlock two runqueues
9479 - * NOTE that reparent_to_init() gives the caller full capabilities.
9480 + * Note this does not restore interrupts like task_rq_unlock,
9481 + * you need to do so manually after calling.
9483 -void reparent_to_init(void)
9484 +static inline void double_rq_unlock(runqueue_t *rq1, runqueue_t *rq2)
9486 - struct task_struct *this_task = current;
9487 + spin_unlock(&rq1->lock);
9489 + spin_unlock(&rq2->lock);
9492 - write_lock_irq(&tasklist_lock);
9493 +void __init init_idle(task_t *idle, int cpu)
9495 + runqueue_t *idle_rq = cpu_rq(cpu), *rq = cpu_rq(idle->cpu);
9496 + unsigned long flags;
9498 - /* Reparent to init */
9499 - REMOVE_LINKS(this_task);
9500 - this_task->p_pptr = child_reaper;
9501 - this_task->p_opptr = child_reaper;
9502 - SET_LINKS(this_task);
9503 + __save_flags(flags);
9505 + double_rq_lock(idle_rq, rq);
9507 + idle_rq->curr = idle_rq->idle = idle;
9508 + deactivate_task(idle, rq);
9509 + idle->array = NULL;
9510 + idle->prio = MAX_PRIO;
9511 + idle->state = TASK_RUNNING;
9513 + double_rq_unlock(idle_rq, rq);
9514 + set_tsk_need_resched(idle);
9515 + __restore_flags(flags);
9518 +extern void init_timervecs(void);
9519 +extern void timer_bh(void);
9520 +extern void tqueue_bh(void);
9521 +extern void immediate_bh(void);
9523 +void __init sched_init(void)
9528 + for (i = 0; i < NR_CPUS; i++) {
9529 + prio_array_t *array;
9531 - /* Set the exit signal to SIGCHLD so we signal init on exit */
9532 - this_task->exit_signal = SIGCHLD;
9534 + rq->active = rq->arrays;
9535 + rq->expired = rq->arrays + 1;
9536 + spin_lock_init(&rq->lock);
9538 + INIT_LIST_HEAD(&rq->migration_queue);
9541 - /* We also take the runqueue_lock while altering task fields
9542 - * which affect scheduling decisions */
9543 - spin_lock(&runqueue_lock);
9544 + for (j = 0; j < 2; j++) {
9545 + array = rq->arrays + j;
9546 + for (k = 0; k < MAX_PRIO; k++) {
9547 + INIT_LIST_HEAD(array->queue + k);
9548 + __clear_bit(k, array->bitmap);
9550 + // delimiter for bitsearch
9551 + __set_bit(MAX_PRIO, array->bitmap);
9555 + * We have to do a little magic to get the first
9556 + * process right in SMP mode.
9559 + rq->curr = current;
9560 + rq->idle = current;
9561 + current->cpu = smp_processor_id();
9562 + wake_up_process(current);
9564 - this_task->ptrace = 0;
9565 - this_task->nice = DEF_NICE;
9566 - this_task->policy = SCHED_OTHER;
9567 - /* cpus_allowed? */
9568 - /* rt_priority? */
9570 - this_task->cap_effective = CAP_INIT_EFF_SET;
9571 - this_task->cap_inheritable = CAP_INIT_INH_SET;
9572 - this_task->cap_permitted = CAP_FULL_SET;
9573 - this_task->keep_capabilities = 0;
9574 - memcpy(this_task->rlim, init_task.rlim, sizeof(*(this_task->rlim)));
9575 - switch_uid(INIT_USER);
9577 + init_bh(TIMER_BH, timer_bh);
9578 + init_bh(TQUEUE_BH, tqueue_bh);
9579 + init_bh(IMMEDIATE_BH, immediate_bh);
9581 - spin_unlock(&runqueue_lock);
9582 - write_unlock_irq(&tasklist_lock);
9584 + * The boot idle thread does lazy MMU switching as well:
9586 + atomic_inc(&init_mm.mm_count);
9587 + enter_lazy_tlb(&init_mm, current, smp_processor_id());
9593 - * Put all the gunge required to become a kernel thread without
9594 - * attached user resources in one place where it belongs.
9596 + * This is how migration works:
9598 + * 1) we queue a migration_req_t structure in the source CPU's
9599 + * runqueue and wake up that CPU's migration thread.
9600 + * 2) we down() the locked semaphore => thread blocks.
9601 + * 3) migration thread wakes up (implicitly it forces the migrated
9602 + * thread off the CPU)
9603 + * 4) it gets the migration request and checks whether the migrated
9604 + * task is still in the wrong runqueue.
9605 + * 5) if it's in the wrong runqueue then the migration thread removes
9606 + * it and puts it into the right queue.
9607 + * 6) migration thread up()s the semaphore.
9608 + * 7) we wake up and the migration is done.
9612 + struct list_head list;
9614 + struct completion done;
9617 -void daemonize(void)
9619 + * Change a given task's CPU affinity. Migrate the process to a
9620 + * proper CPU and schedule it away if the CPU it's executing on
9621 + * is removed from the allowed bitmask.
9623 + * NOTE: the caller must have a valid reference to the task, the
9624 + * task must not exit() & deallocate itself prematurely. The
9625 + * call is not atomic; no spinlocks may be held.
9627 +void set_cpus_allowed(task_t *p, unsigned long new_mask)
9629 - struct fs_struct *fs;
9630 + unsigned long flags;
9631 + migration_req_t req;
9634 + new_mask &= cpu_online_map;
9638 + rq = task_rq_lock(p, &flags);
9639 + p->cpus_allowed = new_mask;
9641 - * If we were started as result of loading a module, close all of the
9642 - * user space pages. We don't need them, and if we didn't close them
9643 - * they would be locked into memory.
9644 + * Can the task run on the task's current CPU? If not then
9645 + * migrate the process off to a proper CPU.
9648 + if (new_mask & (1UL << p->cpu)) {
9649 + task_rq_unlock(rq, &flags);
9653 - current->session = 1;
9654 - current->pgrp = 1;
9655 - current->tty = NULL;
9657 + * If the task is not on a runqueue, then it is safe to
9658 + * simply update the task's cpu field.
9660 + if (!p->array && (p != rq->curr)) {
9661 + p->cpu = __ffs(p->cpus_allowed);
9662 + task_rq_unlock(rq, &flags);
9666 - /* Become as one with the init task */
9667 + init_completion(&req.done);
9669 + list_add(&req.list, &rq->migration_queue);
9670 + task_rq_unlock(rq, &flags);
9671 + wake_up_process(rq->migration_thread);
9673 - exit_fs(current); /* current->fs->count--; */
9674 - fs = init_task.fs;
9676 - atomic_inc(&fs->count);
9677 - exit_files(current);
9678 - current->files = init_task.files;
9679 - atomic_inc(¤t->files->count);
9680 + wait_for_completion(&req.done);
9683 -extern unsigned long wait_init_idle;
9684 +static __initdata int master_migration_thread;
9686 -void __init init_idle(void)
9687 +static int migration_thread(void * bind_cpu)
9689 - struct schedule_data * sched_data;
9690 - sched_data = &aligned_data[smp_processor_id()].schedule_data;
9691 + int cpu = cpu_logical_map((int) (long) bind_cpu);
9692 + struct sched_param param = { sched_priority: MAX_RT_PRIO-1 };
9696 - if (current != &init_task && task_on_runqueue(current)) {
9697 - printk("UGH! (%d:%d) was on the runqueue, removing.\n",
9698 - smp_processor_id(), current->pid);
9699 - del_from_runqueue(current);
9701 + sigfillset(¤t->blocked);
9702 + set_fs(KERNEL_DS);
9704 + * The first migration thread is started on the boot CPU, it
9705 + * migrates the other migration threads to their destination CPUs.
9707 + if (cpu != master_migration_thread) {
9708 + while (!cpu_rq(master_migration_thread)->migration_thread)
9710 + set_cpus_allowed(current, 1UL << cpu);
9712 - sched_data->curr = current;
9713 - sched_data->last_schedule = get_cycles();
9714 - clear_bit(current->processor, &wait_init_idle);
9716 + printk("migration_task %d on cpu=%d\n", cpu, smp_processor_id());
9717 + ret = setscheduler(0, SCHED_FIFO, ¶m);
9719 -extern void init_timervecs (void);
9721 + rq->migration_thread = current;
9723 -void __init sched_init(void)
9726 - * We have to do a little magic to get the first
9727 - * process right in SMP mode.
9729 - int cpu = smp_processor_id();
9731 + sprintf(current->comm, "migration_CPU%d", smp_processor_id());
9733 - init_task.processor = cpu;
9735 + runqueue_t *rq_src, *rq_dest;
9736 + struct list_head *head;
9737 + int cpu_src, cpu_dest;
9738 + migration_req_t *req;
9739 + unsigned long flags;
9742 - for(nr = 0; nr < PIDHASH_SZ; nr++)
9743 - pidhash[nr] = NULL;
9744 + spin_lock_irqsave(&rq->lock, flags);
9745 + head = &rq->migration_queue;
9746 + current->state = TASK_INTERRUPTIBLE;
9747 + if (list_empty(head)) {
9748 + spin_unlock_irqrestore(&rq->lock, flags);
9752 + req = list_entry(head->next, migration_req_t, list);
9753 + list_del_init(head->next);
9754 + spin_unlock_irqrestore(&rq->lock, flags);
9757 + cpu_dest = __ffs(p->cpus_allowed);
9758 + rq_dest = cpu_rq(cpu_dest);
9761 + rq_src = cpu_rq(cpu_src);
9763 + local_irq_save(flags);
9764 + double_rq_lock(rq_src, rq_dest);
9765 + if (p->cpu != cpu_src) {
9766 + double_rq_unlock(rq_src, rq_dest);
9767 + local_irq_restore(flags);
9770 + if (rq_src == rq) {
9771 + p->cpu = cpu_dest;
9773 + deactivate_task(p, rq_src);
9774 + activate_task(p, rq_dest);
9777 + double_rq_unlock(rq_src, rq_dest);
9778 + local_irq_restore(flags);
9781 + complete(&req->done);
9785 - init_bh(TIMER_BH, timer_bh);
9786 - init_bh(TQUEUE_BH, tqueue_bh);
9787 - init_bh(IMMEDIATE_BH, immediate_bh);
9788 +void __init migration_init(void)
9793 - * The boot idle thread does lazy MMU switching as well:
9795 - atomic_inc(&init_mm.mm_count);
9796 - enter_lazy_tlb(&init_mm, current, cpu);
9797 + master_migration_thread = smp_processor_id();
9798 + current->cpus_allowed = 1UL << master_migration_thread;
9800 + for (cpu = 0; cpu < smp_num_cpus; cpu++) {
9801 + if (kernel_thread(migration_thread, (void *) (long) cpu,
9802 + CLONE_FS | CLONE_FILES | CLONE_SIGNAL) < 0)
9805 + current->cpus_allowed = -1L;
9807 + for (cpu = 0; cpu < smp_num_cpus; cpu++)
9808 + while (!cpu_rq(cpu_logical_map(cpu))->migration_thread)
9809 + schedule_timeout(2);
9812 +#endif /* CONFIG_SMP */
9813 diff -urN linux-2.4.24.org/kernel/signal.c linux-2.4.24/kernel/signal.c
9814 --- linux-2.4.24.org/kernel/signal.c 2004-02-04 20:47:26.821945338 +0100
9815 +++ linux-2.4.24/kernel/signal.c 2004-02-04 20:52:55.082667907 +0100
9816 @@ -507,12 +507,9 @@
9817 * process of changing - but no harm is done by that
9818 * other than doing an extra (lightweight) IPI interrupt.
9820 - spin_lock(&runqueue_lock);
9821 - if (task_has_cpu(t) && t->processor != smp_processor_id())
9822 - smp_send_reschedule(t->processor);
9823 - spin_unlock(&runqueue_lock);
9824 -#endif /* CONFIG_SMP */
9826 + if ((t->state == TASK_RUNNING) && (t->cpu != cpu()))
9827 + kick_if_running(t);
9829 if (t->state & TASK_INTERRUPTIBLE) {
9832 diff -urN linux-2.4.24.org/kernel/softirq.c linux-2.4.24/kernel/softirq.c
9833 --- linux-2.4.24.org/kernel/softirq.c 2004-02-04 20:47:27.211864234 +0100
9834 +++ linux-2.4.24/kernel/softirq.c 2004-02-04 20:52:55.110662084 +0100
9835 @@ -364,13 +364,13 @@
9836 int cpu = cpu_logical_map(bind_cpu);
9839 - current->nice = 19;
9840 + set_user_nice(current, 19);
9841 sigfillset(¤t->blocked);
9843 /* Migrate to the right CPU */
9844 - current->cpus_allowed = 1UL << cpu;
9845 - while (smp_processor_id() != cpu)
9847 + set_cpus_allowed(current, 1UL << cpu);
9851 sprintf(current->comm, "ksoftirqd_CPU%d", bind_cpu);
9857 -static __init int spawn_ksoftirqd(void)
9858 +__init int spawn_ksoftirqd(void)
9862 diff -urN linux-2.4.24.org/kernel/sys.c linux-2.4.24/kernel/sys.c
9863 --- linux-2.4.24.org/kernel/sys.c 2004-02-04 20:47:26.739962391 +0100
9864 +++ linux-2.4.24/kernel/sys.c 2004-02-04 20:52:55.139656054 +0100
9865 @@ -239,10 +239,10 @@
9867 if (error == -ESRCH)
9869 - if (niceval < p->nice && !capable(CAP_SYS_NICE))
9870 + if (niceval < task_nice(p) && !capable(CAP_SYS_NICE))
9873 - p->nice = niceval;
9874 + set_user_nice(p, niceval);
9876 read_unlock(&tasklist_lock);
9880 if (!proc_sel(p, which, who))
9882 - niceval = 20 - p->nice;
9883 + niceval = 20 - task_nice(p);
9884 if (niceval > retval)
9887 diff -urN linux-2.4.24.org/kernel/timer.c linux-2.4.24/kernel/timer.c
9888 --- linux-2.4.24.org/kernel/timer.c 2004-02-04 20:47:27.115884198 +0100
9889 +++ linux-2.4.24/kernel/timer.c 2004-02-04 20:52:55.155652727 +0100
9892 #include <asm/uaccess.h>
9894 +struct kernel_stat kstat;
9897 * Timekeeping variables
9899 @@ -598,25 +600,7 @@
9900 int cpu = smp_processor_id(), system = user_tick ^ 1;
9902 update_one_process(p, user_tick, system, cpu);
9904 - if (--p->counter <= 0) {
9907 - * SCHED_FIFO is priority preemption, so this is
9908 - * not the place to decide whether to reschedule a
9909 - * SCHED_FIFO task or not - Bhavesh Davda
9911 - if (p->policy != SCHED_FIFO) {
9912 - p->need_resched = 1;
9916 - kstat.per_cpu_nice[cpu] += user_tick;
9918 - kstat.per_cpu_user[cpu] += user_tick;
9919 - kstat.per_cpu_system[cpu] += system;
9920 - } else if (local_bh_count(cpu) || local_irq_count(cpu) > 1)
9921 - kstat.per_cpu_system[cpu] += system;
9922 + scheduler_tick(user_tick, system);
9926 @@ -624,17 +608,7 @@
9928 static unsigned long count_active_tasks(void)
9930 - struct task_struct *p;
9931 - unsigned long nr = 0;
9933 - read_lock(&tasklist_lock);
9934 - for_each_task(p) {
9935 - if ((p->state == TASK_RUNNING ||
9936 - (p->state & TASK_UNINTERRUPTIBLE)))
9939 - read_unlock(&tasklist_lock);
9941 + return (nr_running() + nr_uninterruptible()) * FIXED_1;
9945 @@ -827,6 +801,89 @@
9949 +static void process_timeout(unsigned long __data)
9951 + wake_up_process((task_t *)__data);
9955 + * schedule_timeout - sleep until timeout
9956 + * @timeout: timeout value in jiffies
9958 + * Make the current task sleep until @timeout jiffies have
9959 + * elapsed. The routine will return immediately unless
9960 + * the current task state has been set (see set_current_state()).
9962 + * You can set the task state as follows -
9964 + * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
9965 + * pass before the routine returns. The routine will return 0
9967 + * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
9968 + * delivered to the current task. In this case the remaining time
9969 + * in jiffies will be returned, or 0 if the timer expired in time
9971 + * The current task state is guaranteed to be TASK_RUNNING when this
9972 + * routine returns.
9974 + * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
9975 + * the CPU away without a bound on the timeout. In this case the return
9976 + * value will be %MAX_SCHEDULE_TIMEOUT.
9978 + * In all cases the return value is guaranteed to be non-negative.
9980 +signed long schedule_timeout(signed long timeout)
9982 + struct timer_list timer;
9983 + unsigned long expire;
9987 + case MAX_SCHEDULE_TIMEOUT:
9989 + * These two special cases are useful to be comfortable
9990 + * in the caller. Nothing more. We could take
9991 + * MAX_SCHEDULE_TIMEOUT from one of the negative value
9992 + * but I' d like to return a valid offset (>=0) to allow
9993 + * the caller to do everything it want with the retval.
9999 + * Another bit of PARANOID. Note that the retval will be
10000 + * 0 since no piece of kernel is supposed to do a check
10001 + * for a negative retval of schedule_timeout() (since it
10002 + * should never happens anyway). You just have the printk()
10003 + * that will tell you if something is gone wrong and where.
10007 + printk(KERN_ERR "schedule_timeout: wrong timeout "
10008 + "value %lx from %p\n", timeout,
10009 + __builtin_return_address(0));
10010 + current->state = TASK_RUNNING;
10015 + expire = timeout + jiffies;
10017 + init_timer(&timer);
10018 + timer.expires = expire;
10019 + timer.data = (unsigned long) current;
10020 + timer.function = process_timeout;
10022 + add_timer(&timer);
10024 + del_timer_sync(&timer);
10026 + timeout = expire - jiffies;
10029 + return timeout < 0 ? 0 : timeout;
10032 /* Thread ID - the internal kernel "pid" */
10033 asmlinkage long sys_gettid(void)
10035 @@ -873,4 +930,3 @@
10040 diff -urN linux-2.4.24.org/mm/oom_kill.c linux-2.4.24/mm/oom_kill.c
10041 --- linux-2.4.24.org/mm/oom_kill.c 2004-02-04 20:47:28.626569974 +0100
10042 +++ linux-2.4.24/mm/oom_kill.c 2004-02-04 20:57:30.567369583 +0100
10044 * Niced processes are most likely less important, so double
10045 * their badness points.
10048 + if (task_nice(p) > 0)
10052 @@ -150,7 +150,7 @@
10053 * all the memory it needs. That way it should be able to
10054 * exit() and clear out its resources quickly...
10056 - p->counter = 5 * HZ;
10057 + p->time_slice = HZ;
10058 p->flags |= PF_MEMALLOC | PF_MEMDIE;
10060 /* This process has hardware access, be more careful. */
10061 diff -urN linux-2.4.24.org/net/bluetooth/bnep/core.c linux-2.4.24/net/bluetooth/bnep/core.c
10062 --- linux-2.4.24.org/net/bluetooth/bnep/core.c 2004-02-04 20:48:41.535404904 +0100
10063 +++ linux-2.4.24/net/bluetooth/bnep/core.c 2004-02-04 20:52:55.199643577 +0100
10064 @@ -460,7 +460,7 @@
10065 sigfillset(¤t->blocked);
10066 flush_signals(current);
10068 - current->nice = -15;
10069 + set_user_nice(current, -15);
10073 diff -urN linux-2.4.24.org/net/bluetooth/cmtp/core.c linux-2.4.24/net/bluetooth/cmtp/core.c
10074 --- linux-2.4.24.org/net/bluetooth/cmtp/core.c 2004-02-04 20:48:41.311451486 +0100
10075 +++ linux-2.4.24/net/bluetooth/cmtp/core.c 2004-02-04 20:52:55.224638378 +0100
10076 @@ -298,7 +298,7 @@
10077 sigfillset(¤t->blocked);
10078 flush_signals(current);
10080 - current->nice = -15;
10081 + set_user_nice(current, -15);