]> git.pld-linux.org Git - packages/kernel.git/blame - linux-2.4.20-sched-O1-ingo-fixes.patch
- netfilter/ip_nat_* fix needed for big-endians; detailed description inside
[packages/kernel.git] / linux-2.4.20-sched-O1-ingo-fixes.patch
CommitLineData
0c6c889c
JR
1diff -urN linux-2.4.20/include/linux/sched.h linux-2.4.20-interactive/include/linux/sched.h
2--- linux-2.4.20/include/linux/sched.h Sun Mar 16 23:39:08 2003
3+++ linux-2.4.20-interactive/include/linux/sched.h Sun Mar 16 23:40:18 2003
4@@ -335,7 +335,7 @@
5 prio_array_t *array;
6
7 unsigned long sleep_avg;
8- unsigned long sleep_timestamp;
9+ unsigned long last_run;
10
11 unsigned long policy;
12 unsigned long cpus_allowed;
13diff -urN linux-2.4.20/kernel/fork.c linux-2.4.20-interactive/kernel/fork.c
14--- linux-2.4.20/kernel/fork.c Sun Mar 16 23:38:02 2003
15+++ linux-2.4.20-interactive/kernel/fork.c Sun Mar 16 23:40:18 2003
16@@ -724,7 +724,7 @@
17 current->time_slice = 1;
18 scheduler_tick(0,0);
19 }
20- p->sleep_timestamp = jiffies;
21+ p->last_run = jiffies;
22 __sti();
23
24 /*
25diff -urN linux-2.4.20/kernel/sched.c linux-2.4.20-interactive/kernel/sched.c
26--- linux-2.4.20/kernel/sched.c Sun Mar 16 23:38:57 2003
27+++ linux-2.4.20-interactive/kernel/sched.c Sun Mar 16 23:40:18 2003
28@@ -47,18 +47,18 @@
29 /*
30 * These are the 'tuning knobs' of the scheduler:
31 *
32- * Minimum timeslice is 10 msecs, default timeslice is 150 msecs,
33- * maximum timeslice is 300 msecs. Timeslices get refilled after
34+ * Minimum timeslice is 10 msecs, default timeslice is 100 msecs,
35+ * maximum timeslice is 200 msecs. Timeslices get refilled after
36 * they expire.
37 */
38 #define MIN_TIMESLICE ( 10 * HZ / 1000)
39-#define MAX_TIMESLICE (300 * HZ / 1000)
40+#define MAX_TIMESLICE (200 * HZ / 1000)
41 #define CHILD_PENALTY 50
42 #define PARENT_PENALTY 100
43 #define PRIO_BONUS_RATIO 25
44 #define INTERACTIVE_DELTA 2
45-#define MAX_SLEEP_AVG (2*HZ)
46-#define STARVATION_LIMIT (2*HZ)
47+#define MAX_SLEEP_AVG (10*HZ)
48+#define STARVATION_LIMIT (10*HZ)
49
50 /*
51 * If a task is 'interactive' then we reinsert it in the active
52@@ -239,24 +239,23 @@
53 #define activate_task(p, rq) __activate_task(p, rq, NULL)
54 static inline void __activate_task(task_t *p, runqueue_t *rq, task_t * parent)
55 {
56- unsigned long sleep_time = jiffies - p->sleep_timestamp;
57- prio_array_t *array = rq->active;
58+ long sleep_time = jiffies - p->last_run - 1;
59
60- if (!parent && !rt_task(p) && sleep_time) {
61+ if (!parent && !rt_task(p) && sleep_time > 0) {
62 /*
63 * This code gives a bonus to interactive tasks. We update
64 * an 'average sleep time' value here, based on
65- * sleep_timestamp. The more time a task spends sleeping,
66+ * ->last_run. The more time a task spends sleeping,
67 * the higher the average gets - and the higher the priority
68 * boost gets as well.
69 */
70- p->sleep_timestamp = jiffies;
71+ p->last_run = jiffies;
72 p->sleep_avg += sleep_time;
73 if (p->sleep_avg > MAX_SLEEP_AVG)
74 p->sleep_avg = MAX_SLEEP_AVG;
75 p->prio = effective_prio(p);
76 }
77- __enqueue_task(p, array, parent);
78+ __enqueue_task(p, rq->active, parent);
79 rq->nr_running++;
80 }
81
82@@ -374,9 +373,14 @@
83 #endif
84 if (old_state == TASK_UNINTERRUPTIBLE)
85 rq->nr_uninterruptible--;
86- activate_task(p, rq);
87- if (p->prio < rq->curr->prio)
88- resched_task(rq->curr);
89+ if (sync) {
90+ enqueue_task(p, rq->active);
91+ rq->nr_running++;
92+ } else {
93+ activate_task(p, rq);
94+ if (p->prio < rq->curr->prio)
95+ resched_task(rq->curr);
96+ }
97 success = 1;
98 }
99 p->state = TASK_RUNNING;
100@@ -453,7 +457,16 @@
101 }
102
103 p->cpu = smp_processor_id();
104- __activate_task(p, rq, parent);
105+ if (unlikely(!current->array)) {
106+ __enqueue_task(p, rq->active, parent);
107+ rq->nr_running++;
108+ } else {
109+ p->prio = current->prio;
110+ list_add_tail(&p->run_list, &current->run_list);
111+ p->array = current->array;
112+ p->array->nr_active++;
113+ rq->nr_running++;
114+ }
115 spin_unlock_irq(&rq->lock);
116 }
117
118@@ -586,6 +599,11 @@
119 */
120 if (p->prio < this_rq->curr->prio)
121 resched = 1;
122+ else {
123+ if (p->prio == this_rq->curr->prio &&
124+ p->time_slice > this_rq->curr->time_slice)
125+ resched = 1;
126+ }
127
128 return resched;
129 }
130@@ -751,7 +769,7 @@
131 */
132
133 #define CAN_MIGRATE_TASK(p,rq,this_cpu) \
134- ((jiffies - (p)->sleep_timestamp > cache_decay_ticks) && \
135+ ((jiffies - (p)->last_run > cache_decay_ticks) && \
136 ((p) != (rq)->curr) && \
137 ((p)->cpus_allowed & (1UL << (this_cpu))))
138
139@@ -813,9 +831,9 @@
140 * increasing number of running tasks:
141 */
142 #define EXPIRED_STARVING(rq) \
143- ((rq)->expired_timestamp && \
144+ (STARVATION_LIMIT && ((rq)->expired_timestamp && \
145 (jiffies - (rq)->expired_timestamp >= \
146- STARVATION_LIMIT * ((rq)->nr_running) + 1))
147+ STARVATION_LIMIT * ((rq)->nr_running) + 1)))
148
149 /*
150 * This function gets called by the timer code, with HZ frequency.
151@@ -918,7 +936,7 @@
152 rq = this_rq();
153
154 release_kernel_lock(prev, smp_processor_id());
155- prev->sleep_timestamp = jiffies;
156+ prev->last_run = jiffies;
157 spin_lock_irq(&rq->lock);
158
159 switch (prev->state) {
160@@ -1299,8 +1317,10 @@
161 p->prio = MAX_USER_RT_PRIO-1 - p->rt_priority;
162 else
163 p->prio = p->static_prio;
164- if (array)
165- activate_task(p, task_rq(p));
166+ if (array) {
167+ enqueue_task(p, task_rq(p)->active);
168+ task_rq(p)->nr_running++;
169+ }
170
171 out_unlock:
172 task_rq_unlock(rq, &flags);
173diff -urN linux-2.4.20/kernel/softirq.c linux-2.4.20-interactive/kernel/softirq.c
174--- linux-2.4.20/kernel/softirq.c Sun Mar 16 23:38:03 2003
175+++ linux-2.4.20-interactive/kernel/softirq.c Sun Mar 16 23:40:18 2003
176@@ -99,10 +99,9 @@
177 mask &= ~pending;
178 goto restart;
179 }
180- __local_bh_enable();
181-
182 if (pending)
183 wakeup_softirqd(cpu);
184+ __local_bh_enable();
185 }
186
187 local_irq_restore(flags);
This page took 0.971893 seconds and 4 git commands to generate.