]> git.pld-linux.org Git - packages/kernel.git/blob - esfq-kernel.patch
- updated config list: SCSI_ARCMSR=m
[packages/kernel.git] / esfq-kernel.patch
1 diff -Naur linux-2.6.13.3.old/include/linux/pkt_sched.h linux-2.6.13.3/include/linux/pkt_sched.h
2 --- linux-2.6.13.3.old/include/linux/pkt_sched.h        2005-10-23 17:39:16.000000000 -0700
3 +++ linux-2.6.13.3/include/linux/pkt_sched.h    2005-10-21 18:13:37.000000000 -0700
4 @@ -145,8 +145,35 @@
5   *
6   *     The only reason for this is efficiency, it is possible
7   *     to change these parameters in compile time.
8 + *     
9 + *     If you need to play with these values use esfq instead.
10   */
11  
12 +/* ESFQ section */
13 +
14 +enum
15 +{
16 +        /* traditional */
17 +       TCA_SFQ_HASH_CLASSIC,
18 +       TCA_SFQ_HASH_DST,
19 +       TCA_SFQ_HASH_SRC,
20 +       TCA_SFQ_HASH_FWMARK,
21 +        /* direct */
22 +       TCA_SFQ_HASH_DSTDIR,
23 +       TCA_SFQ_HASH_SRCDIR,
24 +       TCA_SFQ_HASH_FWMARKDIR,
25 +};
26 +
27 +struct tc_esfq_qopt
28 +{
29 +       unsigned        quantum;        /* Bytes per round allocated to flow */
30 +       int             perturb_period; /* Period of hash perturbation */
31 +       __u32           limit;          /* Maximal packets in queue */
32 +       unsigned        divisor;        /* Hash divisor  */
33 +       unsigned        flows;          /* Maximal number of flows  */
34 +       unsigned        hash_kind;      /* Hash function to use for flow identification */
35 +};
36 +
37  /* RED section */
38  
39  enum
40 diff -Naur linux-2.6.13.3.old/net/sched/Kconfig linux-2.6.13.3/net/sched/Kconfig
41 --- linux-2.6.13.3.old/net/sched/Kconfig        2005-10-23 17:39:16.000000000 -0700
42 +++ linux-2.6.13.3/net/sched/Kconfig    2005-10-09 16:56:27.000000000 -0700
43 @@ -191,6 +191,28 @@
44           To compile this code as a module, choose M here: the
45           module will be called sch_sfq.
46  
47 +config NET_SCH_ESFQ
48 +       tristate "ESFQ queue"
49 +       depends on NET_SCHED
50 +       ---help---
51 +         Say Y here if you want to use the Enhanced Stochastic Fairness
52 +         Queueing (ESFQ) packet scheduling algorithm for some of your network
53 +         devices or as a leaf discipline for a classful qdisc such as HTB or
54 +         CBQ (see the top of <file:net/sched/sch_esfq.c> for details and
55 +         references to the SFQ algorithm).
56 +         
57 +         This is an enchanced SFQ version which allows you to control some
58 +         hardcoded values in the SFQ scheduler: queue depth, hash table size,
59 +         and queues limit.
60 +         
61 +         ESFQ also adds control to the hash function used to identify packet
62 +         flows. The original SFQ hashes by individual flow (TCP session or UDP
63 +         stream); ESFQ can hash by src or dst IP as well, which can be more
64 +         fair to users in some networking situations.
65 +         
66 +         To compile this code as a module, choose M here: the
67 +         module will be called sch_esfq.
68 +
69  config NET_SCH_TEQL
70         tristate "TEQL queue"
71         depends on NET_SCHED
72 diff -Naur linux-2.6.13.3.old/net/sched/Makefile linux-2.6.13.3/net/sched/Makefile
73 --- linux-2.6.13.3.old/net/sched/Makefile       2005-10-23 17:39:16.000000000 -0700
74 +++ linux-2.6.13.3/net/sched/Makefile   2005-10-09 16:56:27.000000000 -0700
75 @@ -23,6 +23,7 @@
76  obj-$(CONFIG_NET_SCH_INGRESS)  += sch_ingress.o 
77  obj-$(CONFIG_NET_SCH_DSMARK)   += sch_dsmark.o
78  obj-$(CONFIG_NET_SCH_SFQ)      += sch_sfq.o
79 +obj-$(CONFIG_NET_SCH_ESFQ)     += sch_esfq.o
80  obj-$(CONFIG_NET_SCH_TBF)      += sch_tbf.o
81  obj-$(CONFIG_NET_SCH_TEQL)     += sch_teql.o
82  obj-$(CONFIG_NET_SCH_PRIO)     += sch_prio.o
83 diff -Naur linux-2.6.13.3.old/net/sched/sch_esfq.c linux-2.6.13.3/net/sched/sch_esfq.c
84 --- linux-2.6.13.3.old/net/sched/sch_esfq.c     1969-12-31 16:00:00.000000000 -0800
85 +++ linux-2.6.13.3/net/sched/sch_esfq.c 2005-10-23 17:32:19.000000000 -0700
86 @@ -0,0 +1,639 @@
87 +/*
88 + * net/sched/sch_esfq.c        Extended Stochastic Fairness Queueing discipline.
89 + *
90 + *             This program is free software; you can redistribute it and/or
91 + *             modify it under the terms of the GNU General Public License
92 + *             as published by the Free Software Foundation; either version
93 + *             2 of the License, or (at your option) any later version.
94 + *
95 + * Authors:    Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
96 + *
97 + * Changes:    Alexander Atanasov, <alex@ssi.bg>
98 + *             Added dynamic depth,limit,divisor,hash_kind options.
99 + *             Added dst and src hashes.
100 + *
101 + *             Alexander Clouter, <alex@digriz.org.uk>
102 + *             Ported ESFQ to Linux 2.6.
103 + *
104 + *             Corey Hickey, <bugfood-c@fatooh.org>
105 + *             Maintenance of the Linux 2.6 port.
106 + *             Added fwmark hash (thanks to Robert Kurjata)
107 + *             Added direct hashing for src, dst, and fwmark.
108 + *             
109 + */
110 +
111 +#include <linux/config.h>
112 +#include <linux/module.h>
113 +#include <asm/uaccess.h>
114 +#include <asm/system.h>
115 +#include <linux/bitops.h>
116 +#include <linux/types.h>
117 +#include <linux/kernel.h>
118 +#include <linux/jiffies.h>
119 +#include <linux/string.h>
120 +#include <linux/mm.h>
121 +#include <linux/socket.h>
122 +#include <linux/sockios.h>
123 +#include <linux/in.h>
124 +#include <linux/errno.h>
125 +#include <linux/interrupt.h>
126 +#include <linux/if_ether.h>
127 +#include <linux/inet.h>
128 +#include <linux/netdevice.h>
129 +#include <linux/etherdevice.h>
130 +#include <linux/notifier.h>
131 +#include <linux/init.h>
132 +#include <net/ip.h>
133 +#include <linux/ipv6.h>
134 +#include <net/route.h>
135 +#include <linux/skbuff.h>
136 +#include <net/sock.h>
137 +#include <net/pkt_sched.h>
138 +
139 +
140 +/*     Stochastic Fairness Queuing algorithm.
141 +       For more comments look at sch_sfq.c.
142 +       The difference is that you can change limit, depth,
143 +       hash table size and choose 7 hash types.
144 +       
145 +       classic:        same as in sch_sfq.c
146 +       dst:            destination IP address
147 +       src:            source IP address
148 +       fwmark:         netfilter mark value
149 +       dst_direct:
150 +       src_direct:
151 +       fwmark_direct:  direct hashing of the above sources
152 +       
153 +       TODO: 
154 +               make sfq_change work.
155 +*/
156 +
157 +
158 +/* This type should contain at least SFQ_DEPTH*2 values */
159 +typedef unsigned int esfq_index;
160 +
161 +struct esfq_head
162 +{
163 +       esfq_index      next;
164 +       esfq_index      prev;
165 +};
166 +
167 +struct esfq_sched_data
168 +{
169 +/* Parameters */
170 +       int             perturb_period;
171 +       unsigned        quantum;        /* Allotment per round: MUST BE >= MTU */
172 +       int             limit;
173 +       unsigned        depth;
174 +       unsigned        hash_divisor;
175 +       unsigned        hash_kind;
176 +/* Variables */
177 +       struct timer_list perturb_timer;
178 +       int             perturbation;
179 +       esfq_index      tail;           /* Index of current slot in round */
180 +       esfq_index      max_depth;      /* Maximal depth */
181 +
182 +       esfq_index      *ht;                    /* Hash table */
183 +       esfq_index      *next;                  /* Active slots link */
184 +       short           *allot;                 /* Current allotment per slot */
185 +       unsigned short  *hash;                  /* Hash value indexed by slots */
186 +       struct sk_buff_head     *qs;            /* Slot queue */
187 +       struct esfq_head        *dep;           /* Linked list of slots, indexed by depth */
188 +       unsigned        dyn_min;        /* For dynamic divisor adjustment; minimum value seen */
189 +       unsigned        dyn_max;        /*                                 maximum value seen */
190 +       unsigned        dyn_range;      /*                                 saved range */
191 +};
192 +
193 +static __inline__ unsigned esfq_hash_u32(struct esfq_sched_data *q,u32 h)
194 +{
195 +       int pert = q->perturbation;
196 +
197 +       if (pert)
198 +               h = (h<<pert) ^ (h>>(0x1F - pert));
199 +
200 +       h = ntohl(h) * 2654435761UL;
201 +       return h & (q->hash_divisor-1);
202 +}
203 +
204 +/* Hash input values directly into the "nearest" slot, taking into account the
205 + * range of input values seen. This is most useful when the hash table is at
206 + * least as large as the range of possible values. */
207 +static __inline__ unsigned esfq_hash_direct(struct esfq_sched_data *q, u32 h)
208 +{
209 +       /* adjust minimum and maximum */
210 +       if (h < q->dyn_min || h > q->dyn_max) {
211 +               q->dyn_min = h < q->dyn_min ? h : q->dyn_min;
212 +               q->dyn_max = h > q->dyn_max ? h : q->dyn_max;
213 +       
214 +               /* find new range */
215 +               if ((q->dyn_range = q->dyn_max - q->dyn_min) >= q->hash_divisor)
216 +                       printk(KERN_WARNING "ESFQ: (direct hash) Input range %u is larger than hash "
217 +                                       "table. See ESFQ README for details.\n", q->dyn_range);
218 +       }
219 +       
220 +       /* hash input values into slot numbers */
221 +       if (q->dyn_min == q->dyn_max)
222 +               return 0; /* only one value seen; avoid division by 0 */
223 +       else
224 +               return (h - q->dyn_min) * (q->hash_divisor - 1) / q->dyn_range;
225 +}
226 +
227 +static __inline__ unsigned esfq_fold_hash_classic(struct esfq_sched_data *q, u32 h, u32 h1)
228 +{
229 +       int pert = q->perturbation;
230 +
231 +       /* Have we any rotation primitives? If not, WHY? */
232 +       h ^= (h1<<pert) ^ (h1>>(0x1F - pert));
233 +       h ^= h>>10;
234 +       return h & (q->hash_divisor-1);
235 +}
236 +
237 +static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
238 +{
239 +       u32 h, h2;
240 +       u32 hs;
241 +       u32 nfm;
242 +
243 +       switch (skb->protocol) {
244 +       case __constant_htons(ETH_P_IP):
245 +       {
246 +               struct iphdr *iph = skb->nh.iph;
247 +               h = iph->daddr;
248 +               hs = iph->saddr;
249 +               nfm = skb->nfmark;
250 +               h2 = hs^iph->protocol;
251 +               if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
252 +                   (iph->protocol == IPPROTO_TCP ||
253 +                    iph->protocol == IPPROTO_UDP ||
254 +                    iph->protocol == IPPROTO_ESP))
255 +                       h2 ^= *(((u32*)iph) + iph->ihl);
256 +               break;
257 +       }
258 +       case __constant_htons(ETH_P_IPV6):
259 +       {
260 +               struct ipv6hdr *iph = skb->nh.ipv6h;
261 +               h = iph->daddr.s6_addr32[3];
262 +               hs = iph->saddr.s6_addr32[3];
263 +               nfm = skb->nfmark;
264 +               h2 = hs^iph->nexthdr;
265 +               if (iph->nexthdr == IPPROTO_TCP ||
266 +                   iph->nexthdr == IPPROTO_UDP ||
267 +                   iph->nexthdr == IPPROTO_ESP)
268 +                       h2 ^= *(u32*)&iph[1];
269 +               break;
270 +       }
271 +       default:
272 +               h = (u32)(unsigned long)skb->dst;
273 +               hs = (u32)(unsigned long)skb->sk;
274 +               nfm = skb->nfmark;
275 +               h2 = hs^skb->protocol;
276 +       }
277 +       switch(q->hash_kind)
278 +       {
279 +       case TCA_SFQ_HASH_CLASSIC:
280 +               return esfq_fold_hash_classic(q, h, h2);
281 +       case TCA_SFQ_HASH_DST:
282 +               return esfq_hash_u32(q,h);
283 +       case TCA_SFQ_HASH_DSTDIR:
284 +               return esfq_hash_direct(q, ntohl(h));
285 +       case TCA_SFQ_HASH_SRC:
286 +               return esfq_hash_u32(q,hs);
287 +       case TCA_SFQ_HASH_SRCDIR:
288 +               return esfq_hash_direct(q, ntohl(hs));
289 +#ifdef CONFIG_NETFILTER
290 +       case TCA_SFQ_HASH_FWMARK:
291 +               return esfq_hash_u32(q,nfm);
292 +       case TCA_SFQ_HASH_FWMARKDIR:
293 +               return esfq_hash_direct(q,nfm);
294 +#endif
295 +       default:
296 +               if (net_ratelimit())
297 +                       printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n");
298 +       }
299 +       return esfq_fold_hash_classic(q, h, h2);
300 +}
301 +
302 +static inline void esfq_link(struct esfq_sched_data *q, esfq_index x)
303 +{
304 +       esfq_index p, n;
305 +       int d = q->qs[x].qlen + q->depth;
306 +
307 +       p = d;
308 +       n = q->dep[d].next;
309 +       q->dep[x].next = n;
310 +       q->dep[x].prev = p;
311 +       q->dep[p].next = q->dep[n].prev = x;
312 +}
313 +
314 +static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x)
315 +{
316 +       esfq_index p, n;
317 +
318 +       n = q->dep[x].next;
319 +       p = q->dep[x].prev;
320 +       q->dep[p].next = n;
321 +       q->dep[n].prev = p;
322 +
323 +       if (n == p && q->max_depth == q->qs[x].qlen + 1)
324 +               q->max_depth--;
325 +
326 +       esfq_link(q, x);
327 +}
328 +
329 +static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x)
330 +{
331 +       esfq_index p, n;
332 +       int d;
333 +
334 +       n = q->dep[x].next;
335 +       p = q->dep[x].prev;
336 +       q->dep[p].next = n;
337 +       q->dep[n].prev = p;
338 +       d = q->qs[x].qlen;
339 +       if (q->max_depth < d)
340 +               q->max_depth = d;
341 +
342 +       esfq_link(q, x);
343 +}
344 +
345 +static unsigned int esfq_drop(struct Qdisc *sch)
346 +{
347 +       struct esfq_sched_data *q = qdisc_priv(sch);
348 +       esfq_index d = q->max_depth;
349 +       struct sk_buff *skb;
350 +       unsigned int len;
351 +
352 +       /* Queue is full! Find the longest slot and
353 +          drop a packet from it */
354 +
355 +       if (d > 1) {
356 +               esfq_index x = q->dep[d+q->depth].next;
357 +               skb = q->qs[x].prev;
358 +               len = skb->len;
359 +               __skb_unlink(skb, &q->qs[x]);
360 +               kfree_skb(skb);
361 +               esfq_dec(q, x);
362 +               sch->q.qlen--;
363 +               sch->qstats.drops++;
364 +               return len;
365 +       }
366 +
367 +       if (d == 1) {
368 +               /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
369 +               d = q->next[q->tail];
370 +               q->next[q->tail] = q->next[d];
371 +               q->allot[q->next[d]] += q->quantum;
372 +               skb = q->qs[d].prev;
373 +               len = skb->len;
374 +               __skb_unlink(skb, &q->qs[d]);
375 +               kfree_skb(skb);
376 +               esfq_dec(q, d);
377 +               sch->q.qlen--;
378 +               q->ht[q->hash[d]] = q->depth;
379 +               sch->qstats.drops++;
380 +               return len;
381 +       }
382 +
383 +       return 0;
384 +}
385 +
386 +static int
387 +esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
388 +{
389 +       struct esfq_sched_data *q = qdisc_priv(sch);
390 +       unsigned hash = esfq_hash(q, skb);
391 +       unsigned depth = q->depth;
392 +       esfq_index x;
393 +
394 +       x = q->ht[hash];
395 +       if (x == depth) {
396 +               q->ht[hash] = x = q->dep[depth].next;
397 +               q->hash[x] = hash;
398 +       }
399 +       __skb_queue_tail(&q->qs[x], skb);
400 +       esfq_inc(q, x);
401 +       if (q->qs[x].qlen == 1) {               /* The flow is new */
402 +               if (q->tail == depth) { /* It is the first flow */
403 +                       q->tail = x;
404 +                       q->next[x] = x;
405 +                       q->allot[x] = q->quantum;
406 +               } else {
407 +                       q->next[x] = q->next[q->tail];
408 +                       q->next[q->tail] = x;
409 +                       q->tail = x;
410 +               }
411 +       }
412 +       if (++sch->q.qlen < q->limit-1) {
413 +               sch->bstats.bytes += skb->len;
414 +               sch->bstats.packets++;
415 +               return 0;
416 +       }
417 +
418 +       esfq_drop(sch);
419 +       return NET_XMIT_CN;
420 +}
421 +
422 +static int
423 +esfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
424 +{
425 +       struct esfq_sched_data *q = qdisc_priv(sch);
426 +       unsigned hash = esfq_hash(q, skb);
427 +       unsigned depth = q->depth;
428 +       esfq_index x;
429 +
430 +       x = q->ht[hash];
431 +       if (x == depth) {
432 +               q->ht[hash] = x = q->dep[depth].next;
433 +               q->hash[x] = hash;
434 +       }
435 +       __skb_queue_head(&q->qs[x], skb);
436 +       esfq_inc(q, x);
437 +       if (q->qs[x].qlen == 1) {               /* The flow is new */
438 +               if (q->tail == depth) { /* It is the first flow */
439 +                       q->tail = x;
440 +                       q->next[x] = x;
441 +                       q->allot[x] = q->quantum;
442 +               } else {
443 +                       q->next[x] = q->next[q->tail];
444 +                       q->next[q->tail] = x;
445 +                       q->tail = x;
446 +               }
447 +       }
448 +       if (++sch->q.qlen < q->limit - 1) {
449 +               sch->qstats.requeues++;
450 +               return 0;
451 +       }
452 +
453 +       sch->qstats.drops++;
454 +       esfq_drop(sch);
455 +       return NET_XMIT_CN;
456 +}
457 +
458 +
459 +
460 +
461 +static struct sk_buff *
462 +esfq_dequeue(struct Qdisc* sch)
463 +{
464 +       struct esfq_sched_data *q = qdisc_priv(sch);
465 +       struct sk_buff *skb;
466 +       unsigned depth = q->depth;
467 +       esfq_index a, old_a;
468 +
469 +       /* No active slots */
470 +       if (q->tail == depth)
471 +               return NULL;
472 +       
473 +       a = old_a = q->next[q->tail];
474 +       
475 +       /* Grab packet */
476 +       skb = __skb_dequeue(&q->qs[a]);
477 +       esfq_dec(q, a);
478 +       sch->q.qlen--;
479 +       
480 +       /* Is the slot empty? */
481 +       if (q->qs[a].qlen == 0) {
482 +               q->ht[q->hash[a]] = depth;
483 +               a = q->next[a];
484 +               if (a == old_a) {
485 +                       q->tail = depth;
486 +                       return skb;
487 +               }
488 +               q->next[q->tail] = a;
489 +               q->allot[a] += q->quantum;
490 +       } else if ((q->allot[a] -= skb->len) <= 0) {
491 +               q->tail = a;
492 +               a = q->next[a];
493 +               q->allot[a] += q->quantum;
494 +       }
495 +       
496 +       return skb;
497 +}
498 +
499 +static void
500 +esfq_reset(struct Qdisc* sch)
501 +{
502 +       struct sk_buff *skb;
503 +
504 +       while ((skb = esfq_dequeue(sch)) != NULL)
505 +               kfree_skb(skb);
506 +}
507 +
508 +static void esfq_perturbation(unsigned long arg)
509 +{
510 +       struct Qdisc *sch = (struct Qdisc*)arg;
511 +       struct esfq_sched_data *q = qdisc_priv(sch);
512 +
513 +       q->perturbation = net_random()&0x1F;
514 +
515 +       if (q->perturb_period) {
516 +               q->perturb_timer.expires = jiffies + q->perturb_period;
517 +               add_timer(&q->perturb_timer);
518 +       }
519 +}
520 +
521 +static int esfq_change(struct Qdisc *sch, struct rtattr *opt)
522 +{
523 +       struct esfq_sched_data *q = qdisc_priv(sch);
524 +       struct tc_esfq_qopt *ctl = RTA_DATA(opt);
525 +       int old_perturb = q->perturb_period;
526 +       
527 +       if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
528 +               return -EINVAL;
529 +       
530 +       sch_tree_lock(sch);
531 +       q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
532 +       q->perturb_period = ctl->perturb_period*HZ;
533 +//     q->hash_divisor = ctl->divisor;
534 +//     q->tail = q->limit = q->depth = ctl->flows;
535 +       
536 +       if (ctl->limit)
537 +               q->limit = min_t(u32, ctl->limit, q->depth);
538 +       
539 +       if (ctl->hash_kind) {
540 +               q->hash_kind = ctl->hash_kind;
541 +               if (q->hash_kind !=  TCA_SFQ_HASH_CLASSIC)
542 +                       q->perturb_period = 0;
543 +       }
544 +       
545 +       // is sch_tree_lock enough to do this ?
546 +       while (sch->q.qlen >= q->limit-1)
547 +               esfq_drop(sch);
548 +       
549 +       if (old_perturb)
550 +               del_timer(&q->perturb_timer);
551 +       if (q->perturb_period) {
552 +               q->perturb_timer.expires = jiffies + q->perturb_period;
553 +               add_timer(&q->perturb_timer);
554 +       } else {
555 +               q->perturbation = 0;
556 +       }
557 +       sch_tree_unlock(sch);
558 +       return 0;
559 +}
560 +
561 +static int esfq_init(struct Qdisc *sch, struct rtattr *opt)
562 +{
563 +       struct esfq_sched_data *q = qdisc_priv(sch);
564 +       struct tc_esfq_qopt *ctl;
565 +       esfq_index p = ~0UL/2;
566 +       int i;
567 +       
568 +       if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
569 +               return -EINVAL;
570 +
571 +       init_timer(&q->perturb_timer);
572 +       q->perturb_timer.data = (unsigned long)sch;
573 +       q->perturb_timer.function = esfq_perturbation;
574 +       q->perturbation = 0;
575 +       q->hash_kind = TCA_SFQ_HASH_CLASSIC;
576 +       q->max_depth = 0;
577 +       q->dyn_min = ~0U; /* maximum value for this type */
578 +       q->dyn_max = 0;  /* dyn_min/dyn_max will be set properly upon first packet */
579 +       if (opt == NULL) {
580 +               q->quantum = psched_mtu(sch->dev);
581 +               q->perturb_period = 0;
582 +               q->hash_divisor = 1024;
583 +               q->tail = q->limit = q->depth = 128;
584 +               
585 +       } else {
586 +               ctl = RTA_DATA(opt);
587 +               q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
588 +               q->perturb_period = ctl->perturb_period*HZ;
589 +               q->hash_divisor = ctl->divisor ? : 1024;
590 +               q->tail = q->limit = q->depth = ctl->flows ? : 128;
591 +               
592 +               if ( q->depth > p - 1 )
593 +                       return -EINVAL;
594 +               
595 +               if (ctl->limit)
596 +                       q->limit = min_t(u32, ctl->limit, q->depth);
597 +               
598 +               if (ctl->hash_kind) {
599 +                       q->hash_kind = ctl->hash_kind;
600 +               }
601 +               
602 +               if (q->perturb_period) {
603 +                       q->perturb_timer.expires = jiffies + q->perturb_period;
604 +                       add_timer(&q->perturb_timer);
605 +               }
606 +       }
607 +       
608 +       q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
609 +       if (!q->ht)
610 +               goto err_case;
611 +               
612 +       q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
613 +       if (!q->dep)
614 +               goto err_case;
615 +       q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
616 +       if (!q->next)
617 +               goto err_case;
618 +       
619 +       q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
620 +       if (!q->allot)
621 +               goto err_case;
622 +       q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
623 +       if (!q->hash)
624 +               goto err_case;
625 +       q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
626 +       if (!q->qs)
627 +               goto err_case;
628 +       
629 +       for (i=0; i< q->hash_divisor; i++)
630 +               q->ht[i] = q->depth;
631 +       for (i=0; i<q->depth; i++) {
632 +               skb_queue_head_init(&q->qs[i]);
633 +               q->dep[i+q->depth].next = i+q->depth;
634 +               q->dep[i+q->depth].prev = i+q->depth;
635 +       }
636 +       
637 +       for (i=0; i<q->depth; i++)
638 +               esfq_link(q, i);
639 +       return 0;
640 +err_case:
641 +       if (q->ht)
642 +               kfree(q->ht);
643 +       if (q->dep)
644 +               kfree(q->dep);
645 +       if (q->next)
646 +               kfree(q->next);
647 +       if (q->allot)
648 +               kfree(q->allot);
649 +       if (q->hash)
650 +               kfree(q->hash);
651 +       if (q->qs)
652 +               kfree(q->qs);
653 +       return -ENOBUFS;
654 +}
655 +
656 +static void esfq_destroy(struct Qdisc *sch)
657 +{
658 +       struct esfq_sched_data *q = qdisc_priv(sch);
659 +       del_timer(&q->perturb_timer);
660 +       if(q->ht)
661 +               kfree(q->ht);
662 +       if(q->dep)
663 +               kfree(q->dep);
664 +       if(q->next)
665 +               kfree(q->next);
666 +       if(q->allot)
667 +               kfree(q->allot);
668 +       if(q->hash)
669 +               kfree(q->hash);
670 +       if(q->qs)
671 +               kfree(q->qs);
672 +}
673 +
674 +static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
675 +{
676 +       struct esfq_sched_data *q = qdisc_priv(sch);
677 +       unsigned char    *b = skb->tail;
678 +       struct tc_esfq_qopt opt;
679 +
680 +       opt.quantum = q->quantum;
681 +       opt.perturb_period = q->perturb_period/HZ;
682 +
683 +       opt.limit = q->limit;
684 +       opt.divisor = q->hash_divisor;
685 +       opt.flows = q->depth;
686 +       opt.hash_kind = q->hash_kind;
687 +
688 +       RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
689 +
690 +       return skb->len;
691 +
692 +rtattr_failure:
693 +       skb_trim(skb, b - skb->data);
694 +       return -1;
695 +}
696 +
697 +static struct Qdisc_ops esfq_qdisc_ops =
698 +{
699 +       .next           =       NULL,
700 +       .cl_ops         =       NULL,
701 +       .id             =       "esfq",
702 +       .priv_size      =       sizeof(struct esfq_sched_data),
703 +       .enqueue        =       esfq_enqueue,
704 +       .dequeue        =       esfq_dequeue,
705 +       .requeue        =       esfq_requeue,
706 +       .drop           =       esfq_drop,
707 +       .init           =       esfq_init,
708 +       .reset          =       esfq_reset,
709 +       .destroy        =       esfq_destroy,
710 +       .change         =       NULL, /* esfq_change - needs more work */
711 +       .dump           =       esfq_dump,
712 +       .owner          =       THIS_MODULE,
713 +};
714 +
715 +static int __init esfq_module_init(void)
716 +{
717 +       return register_qdisc(&esfq_qdisc_ops);
718 +}
719 +static void __exit esfq_module_exit(void) 
720 +{
721 +       unregister_qdisc(&esfq_qdisc_ops);
722 +}
723 +module_init(esfq_module_init)
724 +module_exit(esfq_module_exit)
725 +MODULE_LICENSE("GPL");
This page took 0.090314 seconds and 3 git commands to generate.