]> git.pld-linux.org Git - packages/kernel.git/blob - 01_linux-2.6.0-test11-esfq-imq.diff
- obsolete
[packages/kernel.git] / 01_linux-2.6.0-test11-esfq-imq.diff
1 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/drivers/net/Kconfig linux-2.6.0-test11/drivers/net/Kconfig
2 --- linux-2.6.0-test11.orig/drivers/net/Kconfig 2003-11-30 20:43:42.000000000 +0000
3 +++ linux-2.6.0-test11/drivers/net/Kconfig      2003-12-02 23:28:09.000000000 +0000
4 @@ -85,6 +85,20 @@
5           To compile this driver as a module, choose M here: the module
6           will be called eql.  If unsure, say N.
7  
8 +config IMQ
9 +       tristate "IMQ (intermediate queueing device) support"
10 +       depends on NETDEVICES && NETFILTER
11 +       ---help---
12 +         The imq device(s) is used as placeholder for QoS queueing disciplines.
13 +         Every packet entering/leaving the ip stack can be directed through
14 +         the imq device where it's enqueued/dequeued to the attached qdisc.
15 +         This allows you to treat network devices as classes and distribute
16 +         bandwidth among them. Iptables is used to specify through which imq
17 +         device, if any, packets travel.
18 +         
19 +         To compile this driver as a module, choose M here: the module
20 +         will be called imq.  If unsure, say N.
21 +
22  config TUN
23         tristate "Universal TUN/TAP device driver support"
24         depends on NETDEVICES
25 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/drivers/net/Makefile linux-2.6.0-test11/drivers/net/Makefile
26 --- linux-2.6.0-test11.orig/drivers/net/Makefile        2003-11-30 20:43:42.000000000 +0000
27 +++ linux-2.6.0-test11/drivers/net/Makefile     2003-12-02 19:54:09.000000000 +0000
28 @@ -109,6 +109,7 @@
29  endif
30  
31  obj-$(CONFIG_DUMMY) += dummy.o
32 +obj-$(CONFIG_IMQ) += imq.o
33  obj-$(CONFIG_DE600) += de600.o
34  obj-$(CONFIG_DE620) += de620.o
35  obj-$(CONFIG_AT1500) += lance.o
36 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/drivers/net/imq.c linux-2.6.0-test11/drivers/net/imq.c
37 --- linux-2.6.0-test11.orig/drivers/net/imq.c   1970-01-01 01:00:00.000000000 +0100
38 +++ linux-2.6.0-test11/drivers/net/imq.c        2003-12-02 23:52:55.000000000 +0000
39 @@ -0,0 +1,321 @@
40 +/*
41 + *             Pseudo-driver for the intermediate queue device.
42 + *
43 + *             This program is free software; you can redistribute it and/or
44 + *             modify it under the terms of the GNU General Public License
45 + *             as published by the Free Software Foundation; either version
46 + *             2 of the License, or (at your option) any later version.
47 + *
48 + * Authors:    Patrick McHardy, <kaber@trash.net>
49 + *
50 + *            The first version was written by Martin Devera, <devik@cdi.cz>
51 + *
52 + * Credits:    Jan Rafaj <imq2t@cedric.vabo.cz>
53 + *              - Update patch to 2.4.21
54 + *             Sebastian Strollo <sstrollo@nortelnetworks.com>
55 + *              - Fix "Dead-loop on netdevice imq"-issue
56 + */
57 +
58 +#include <linux/kernel.h>
59 +#include <linux/module.h>
60 +#include <linux/config.h>
61 +#include <linux/skbuff.h>
62 +#include <linux/netdevice.h>
63 +#include <linux/rtnetlink.h>
64 +#include <linux/if_arp.h>
65 +#include <linux/netfilter.h>
66 +#include <linux/netfilter_ipv4.h>
67 +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
68 +#include <linux/netfilter_ipv6.h>
69 +#endif
70 +#include <linux/imq.h>
71 +#include <net/pkt_sched.h>
72 +
73 +static nf_hookfn imq_nf_hook;
74 +
75 +static struct nf_hook_ops imq_ingress_ipv4 = {
76 +       .hook           = imq_nf_hook,
77 +       .owner          = THIS_MODULE,
78 +       .pf             = PF_INET,
79 +       .hooknum        = NF_IP_PRE_ROUTING,
80 +       .priority       = NF_IP_PRI_MANGLE + 1
81 +};
82 +
83 +static struct nf_hook_ops imq_egress_ipv4 = {
84 +       .hook           = imq_nf_hook,
85 +       .owner          = THIS_MODULE,
86 +       .pf             = PF_INET,
87 +       .hooknum        = NF_IP_POST_ROUTING,
88 +       .priority       = NF_IP_PRI_LAST
89 +};
90 +
91 +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
92 +static struct nf_hook_ops imq_ingress_ipv6 = {
93 +       .hook           = imq_nf_hook,
94 +       .owner          = THIS_MODULE,
95 +       .pf             = PF_INET6,
96 +       .hooknum        = NF_IP6_PRE_ROUTING,
97 +       .priority       = NF_IP6_PRI_MANGLE + 1
98 +};
99 +
100 +static struct nf_hook_ops imq_egress_ipv6 = {
101 +       .hook           = imq_nf_hook,
102 +       .owner          = THIS_MODULE,
103 +       .pf             = PF_INET6,
104 +       .hooknum        = NF_IP6_POST_ROUTING,
105 +       .priority       = NF_IP6_PRI_LAST
106 +};
107 +#endif
108 +
109 +static unsigned int numdevs = 2;
110 +
111 +MODULE_PARM(numdevs, "i");
112 +MODULE_PARM_DESC(numdevs, "number of imq devices");
113 +
114 +static struct net_device *imq_devs;
115 +
116 +
117 +static struct net_device_stats *imq_get_stats(struct net_device *dev)
118 +{
119 +       return (struct net_device_stats *)dev->priv;
120 +}
121 +
122 +/* called for packets kfree'd in qdiscs at places other than enqueue */
123 +static void imq_skb_destructor(struct sk_buff *skb)
124 +{
125 +       struct nf_info *info = skb->nf_info;
126 +
127 +       if (info) {
128 +               if (info->indev)
129 +                       dev_put(info->indev);
130 +               if (info->outdev)
131 +                       dev_put(info->outdev);
132 +               kfree(info);
133 +       }
134 +}
135 +
136 +static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
137 +{
138 +       struct net_device_stats *stats = (struct net_device_stats*) dev->priv;
139 +       
140 +       stats->tx_bytes += skb->len;
141 +       stats->tx_packets++;
142 +
143 +       skb->imq_flags = 0;
144 +       skb->destructor = NULL;
145 +       
146 +       dev->trans_start = jiffies;
147 +       nf_reinject(skb, skb->nf_info, NF_ACCEPT);
148 +       return 0;
149 +}
150 +
151 +static int imq_nf_queue(struct sk_buff *skb, struct nf_info *info,
152 +                       void *data)
153 +{
154 +       struct net_device *dev;
155 +       struct net_device_stats *stats;
156 +       struct sk_buff *skb2 = NULL;
157 +       struct Qdisc *q;
158 +       unsigned int index = skb->imq_flags&IMQ_F_IFMASK;
159 +       int ret = -1;
160 +
161 +       if (index > numdevs) 
162 +               return -1;
163 +       
164 +       dev = imq_devs + index;
165 +       if (!(dev->flags & IFF_UP)) {
166 +               skb->imq_flags = 0;
167 +               nf_reinject(skb, info, NF_ACCEPT);
168 +               return 0;
169 +       }
170 +       dev->last_rx = jiffies;
171 +
172 +       if (skb->destructor) {
173 +               skb2 = skb;
174 +               skb = skb_clone(skb, GFP_ATOMIC);
175 +               if (!skb)
176 +                       return -1;
177 +       }
178 +       skb->nf_info = info;
179 +
180 +       stats = (struct net_device_stats *)dev->priv;
181 +       stats->rx_bytes+= skb->len;
182 +       stats->rx_packets++;
183 +       
184 +       spin_lock_bh(&dev->queue_lock);
185 +       q = dev->qdisc;
186 +       if (q->enqueue) {
187 +               q->enqueue(skb_get(skb), q);
188 +               if (skb_shared(skb)) {
189 +                       skb->destructor = imq_skb_destructor;
190 +                       kfree_skb(skb);
191 +                       ret = 0;
192 +               }
193 +       }
194 +       if (spin_is_locked(&dev->xmit_lock))
195 +               netif_schedule(dev);
196 +       else
197 +               qdisc_run(dev);
198 +       spin_unlock_bh(&dev->queue_lock);
199 +
200 +       if (skb2)
201 +               kfree_skb(ret ? skb : skb2);
202 +
203 +       return ret;
204 +}
205 +
206 +static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff **pskb,
207 +                  const struct net_device *indev,
208 +                  const struct net_device *outdev,
209 +                  int (*okfn)(struct sk_buff *))
210 +{
211 +       if ((*pskb)->imq_flags & IMQ_F_ENQUEUE)
212 +               return NF_QUEUE;
213 +
214 +       return NF_ACCEPT;
215 +}
216 +
217 +
218 +static int __init imq_init_hooks(void)
219 +{
220 +       int err;
221 +
222 +       if ((err = nf_register_queue_handler(PF_INET, imq_nf_queue, NULL)))
223 +               goto err1;
224 +       if ((err = nf_register_hook(&imq_ingress_ipv4)))
225 +               goto err2;
226 +       if ((err = nf_register_hook(&imq_egress_ipv4)))
227 +               goto err3;
228 +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
229 +       if ((err = nf_register_queue_handler(PF_INET6, imq_nf_queue, NULL)))
230 +               goto err4;
231 +       if ((err = nf_register_hook(&imq_ingress_ipv6)))
232 +               goto err5;
233 +       if ((err = nf_register_hook(&imq_egress_ipv6)))
234 +               goto err6;
235 +#endif
236 +       
237 +       return 0;
238 +       
239 +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
240 +err6:
241 +       nf_unregister_hook(&imq_ingress_ipv6);
242 +err5:
243 +       nf_unregister_queue_handler(PF_INET6);
244 +err4:
245 +       nf_unregister_hook(&imq_egress_ipv4);
246 +#endif
247 +err3:
248 +       nf_unregister_hook(&imq_ingress_ipv4);
249 +err2:
250 +       nf_unregister_queue_handler(PF_INET);
251 +err1:
252 +       return err;
253 +}
254 +
255 +static void __exit imq_unhook(void)
256 +{
257 +       nf_unregister_hook(&imq_ingress_ipv4);
258 +       nf_unregister_hook(&imq_egress_ipv4);
259 +       nf_unregister_queue_handler(PF_INET);
260 +#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
261 +       nf_unregister_hook(&imq_ingress_ipv6);
262 +       nf_unregister_hook(&imq_egress_ipv6);
263 +       nf_unregister_queue_handler(PF_INET6);
264 +#endif
265 +}
266 +
267 +static int __init imq_dev_init(struct net_device *dev)
268 +{
269 +       dev->hard_start_xmit    = imq_dev_xmit;
270 +       dev->type               = ARPHRD_VOID;
271 +       dev->mtu                = 1500;
272 +       dev->tx_queue_len       = 30;
273 +       dev->flags              = IFF_NOARP;
274 +       dev->priv = kmalloc(sizeof(struct net_device_stats), GFP_KERNEL);
275 +       if (dev->priv == NULL)
276 +               return -ENOMEM;
277 +       memset(dev->priv, 0, sizeof(struct net_device_stats));
278 +       dev->get_stats          = imq_get_stats;
279 +
280 +       return 0;
281 +}
282 +
283 +static void imq_dev_uninit(struct net_device *dev)
284 +{
285 +       kfree(dev->priv);
286 +}
287 +
288 +static int __init imq_init_devs(void)
289 +{
290 +       struct net_device *dev;
291 +       int i;
292 +
293 +       if (!numdevs || numdevs > IMQ_MAX_DEVS) {
294 +               printk(KERN_ERR "numdevs has to be betweed 1 and %u\n",
295 +                      IMQ_MAX_DEVS);
296 +               return -EINVAL;
297 +       }
298 +
299 +       imq_devs = kmalloc(sizeof(struct net_device) * numdevs, GFP_KERNEL);
300 +       if (!imq_devs)
301 +               return -ENOMEM;
302 +       memset(imq_devs, 0, sizeof(struct net_device) * numdevs);
303 +
304 +       /* we start counting at zero */
305 +       numdevs--;
306 +
307 +       for (i = 0, dev = imq_devs; i <= numdevs; i++, dev++) {
308 +               SET_MODULE_OWNER(dev);
309 +               strcpy(dev->name, "imq%d");
310 +               dev->init   = imq_dev_init;
311 +               dev->uninit = imq_dev_uninit;
312 +
313 +               if (register_netdev(dev) < 0)
314 +                       goto err_register;
315 +       }
316 +       return 0;
317 +
318 +err_register:
319 +       for (; i; i--)
320 +               unregister_netdev(--dev);
321 +       kfree(imq_devs);
322 +       return -EIO;
323 +}
324 +
325 +static void imq_cleanup_devs(void)
326 +{
327 +       int i;
328 +       struct net_device *dev = imq_devs;
329 +       
330 +       for (i = 0; i <= numdevs; i++)
331 +               unregister_netdev(dev++);
332 +
333 +       kfree(imq_devs);
334 +}
335 +
336 +static int __init imq_init_module(void)
337 +{
338 +       int err;
339 +
340 +       if ((err = imq_init_devs()))
341 +               return err;
342 +       if ((err = imq_init_hooks())) {
343 +               imq_cleanup_devs();
344 +               return err;
345 +       }
346 +
347 +       printk(KERN_INFO "imq driver loaded.\n");
348 +
349 +       return 0;
350 +}
351 +
352 +static void __exit imq_cleanup_module(void)
353 +{
354 +       imq_unhook();
355 +       imq_cleanup_devs();
356 +}
357 +
358 +module_init(imq_init_module);
359 +module_exit(imq_cleanup_module);
360 +MODULE_LICENSE("GPL");
361 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/include/linux/imq.h linux-2.6.0-test11/include/linux/imq.h
362 --- linux-2.6.0-test11.orig/include/linux/imq.h 1970-01-01 01:00:00.000000000 +0100
363 +++ linux-2.6.0-test11/include/linux/imq.h      2003-12-02 19:54:09.000000000 +0000
364 @@ -0,0 +1,9 @@
365 +#ifndef _IMQ_H
366 +#define _IMQ_H
367 +
368 +#define IMQ_MAX_DEVS   16
369 +
370 +#define IMQ_F_IFMASK   0x7f
371 +#define IMQ_F_ENQUEUE  0x80
372 +
373 +#endif /* _IMQ_H */
374 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/include/linux/netfilter_ipv4/ipt_IMQ.h linux-2.6.0-test11/include/linux/netfilter_ipv4/ipt_IMQ.h
375 --- linux-2.6.0-test11.orig/include/linux/netfilter_ipv4/ipt_IMQ.h      1970-01-01 01:00:00.000000000 +0100
376 +++ linux-2.6.0-test11/include/linux/netfilter_ipv4/ipt_IMQ.h   2003-12-02 19:54:06.000000000 +0000
377 @@ -0,0 +1,8 @@
378 +#ifndef _IPT_IMQ_H
379 +#define _IPT_IMQ_H
380 +
381 +struct ipt_imq_info {
382 +       unsigned int todev;     /* target imq device */
383 +};
384 +
385 +#endif /* _IPT_IMQ_H */
386 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/include/linux/pkt_sched.h linux-2.6.0-test11/include/linux/pkt_sched.h
387 --- linux-2.6.0-test11.orig/include/linux/pkt_sched.h   2003-11-30 20:43:31.000000000 +0000
388 +++ linux-2.6.0-test11/include/linux/pkt_sched.h        2003-12-02 19:53:57.000000000 +0000
389 @@ -157,6 +157,13 @@
390  
391  /* SFQ section */
392  
393 +enum
394 +{
395 +       TCA_SFQ_HASH_CLASSIC,
396 +       TCA_SFQ_HASH_DST,
397 +       TCA_SFQ_HASH_SRC,
398 +};
399 +
400  struct tc_sfq_qopt
401  {
402         unsigned        quantum;        /* Bytes per round allocated to flow */
403 @@ -164,6 +171,7 @@
404         __u32           limit;          /* Maximal packets in queue */
405         unsigned        divisor;        /* Hash divisor  */
406         unsigned        flows;          /* Maximal number of flows  */
407 +       unsigned        hash_kind;      /* Hash function to use for flow identification */
408  };
409  
410  /*
411 @@ -173,6 +181,8 @@
412   *
413   *     The only reason for this is efficiency, it is possible
414   *     to change these parameters in compile time.
415 + *     
416 + *     If you need to play with this values use esfq.
417   */
418  
419  /* RED section */
420 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/include/linux/skbuff.h linux-2.6.0-test11/include/linux/skbuff.h
421 --- linux-2.6.0-test11.orig/include/linux/skbuff.h      2003-11-30 20:43:31.000000000 +0000
422 +++ linux-2.6.0-test11/include/linux/skbuff.h   2003-12-02 19:54:09.000000000 +0000
423 @@ -112,6 +112,9 @@
424  #endif
425  
426  #endif
427 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
428 +struct nf_info;
429 +#endif
430  
431  struct sk_buff_head {
432         /* These two members must be first. */
433 @@ -234,6 +237,7 @@
434                                 data_len,
435                                 csum;
436         unsigned char           local_df,
437 +                               imq_flags,
438                                 cloned,
439                                 pkt_type,
440                                 ip_summed;
441 @@ -261,6 +265,9 @@
442  #ifdef CONFIG_NET_SCHED
443         __u32                   tc_index;               /* traffic control index */
444  #endif
445 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
446 +       struct nf_info          *nf_info;
447 +#endif
448  
449         /* These elements must be at the end, see alloc_skb() for details.  */
450         unsigned int            truesize;
451 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/net/core/skbuff.c linux-2.6.0-test11/net/core/skbuff.c
452 --- linux-2.6.0-test11.orig/net/core/skbuff.c   2003-11-30 20:43:52.000000000 +0000
453 +++ linux-2.6.0-test11/net/core/skbuff.c        2003-12-02 19:54:09.000000000 +0000
454 @@ -152,6 +152,13 @@
455         skb_shinfo(skb)->tso_size = 0;
456         skb_shinfo(skb)->tso_segs = 0;
457         skb_shinfo(skb)->frag_list = NULL;
458 +
459 +/* probably doomed to failure */
460 +#if defined(CONFIG_IMQ) || defined (CONFIG_IMQ_MODULE)
461 +       skb->imq_flags = 0;
462 +       skb->nf_info = NULL;
463 +#endif
464 +                
465  out:
466         return skb;
467  nodata:
468 @@ -313,6 +320,10 @@
469  #ifdef CONFIG_NET_SCHED
470         C(tc_index);
471  #endif
472 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
473 +       C(imq_flags);
474 +       C(nf_info);
475 +#endif
476         C(truesize);
477         atomic_set(&n->users, 1);
478         C(head);
479 @@ -368,6 +379,10 @@
480  #ifdef CONFIG_NET_SCHED
481         new->tc_index   = old->tc_index;
482  #endif
483 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
484 +       new->imq_flags=old->imq_flags;
485 +       new->nf_info=old->nf_info;
486 +#endif
487         atomic_set(&new->users, 1);
488  }
489  
490 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/net/ipv4/netfilter/Kconfig linux-2.6.0-test11/net/ipv4/netfilter/Kconfig
491 --- linux-2.6.0-test11.orig/net/ipv4/netfilter/Kconfig  2003-11-30 20:43:52.000000000 +0000
492 +++ linux-2.6.0-test11/net/ipv4/netfilter/Kconfig       2003-12-02 19:54:06.000000000 +0000
493 @@ -501,6 +501,15 @@
494  
495           To compile it as a module, choose M here.  If unsure, say N.
496  
497 +config IP_NF_TARGET_IMQ
498 +       tristate "IMQ target support"
499 +       depends on IP_NF_IPTABLES
500 +       ---help---
501 +         This option adds a `IMQ' target which is used to specify if and
502 +         to which imq device packets should get enqueued/dequeued.
503 +
504 +         To compile it as a module, choose M here.  If unsure, say N.
505 +
506  config IP_NF_TARGET_TCPMSS
507         tristate "TCPMSS target support"
508         depends on IP_NF_IPTABLES
509 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/net/ipv4/netfilter/Makefile linux-2.6.0-test11/net/ipv4/netfilter/Makefile
510 --- linux-2.6.0-test11.orig/net/ipv4/netfilter/Makefile 2003-11-30 20:43:52.000000000 +0000
511 +++ linux-2.6.0-test11/net/ipv4/netfilter/Makefile      2003-12-02 19:54:06.000000000 +0000
512 @@ -72,6 +72,7 @@
513  obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
514  obj-$(CONFIG_IP_NF_TARGET_DSCP) += ipt_DSCP.o
515  obj-$(CONFIG_IP_NF_TARGET_MARK) += ipt_MARK.o
516 +obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
517  obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
518  obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
519  obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
520 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/net/ipv4/netfilter/ipt_IMQ.c linux-2.6.0-test11/net/ipv4/netfilter/ipt_IMQ.c
521 --- linux-2.6.0-test11.orig/net/ipv4/netfilter/ipt_IMQ.c        1970-01-01 01:00:00.000000000 +0100
522 +++ linux-2.6.0-test11/net/ipv4/netfilter/ipt_IMQ.c     2003-12-03 00:01:18.000000000 +0000
523 @@ -0,0 +1,76 @@
524 +/* This target marks packets to be enqueued to an imq device */
525 +#include <linux/module.h>
526 +#include <linux/skbuff.h>
527 +#include <linux/netfilter_ipv4/ip_tables.h>
528 +#include <linux/netfilter_ipv4/ipt_IMQ.h>
529 +#include <linux/imq.h>
530 +
531 +static unsigned int imq_target(struct sk_buff **pskb,
532 +                              const struct net_device *in,
533 +                              const struct net_device *out,
534 +                              unsigned int hooknum,
535 +                              const void *targinfo,
536 +                              void *userinfo)
537 +{
538 +       struct ipt_imq_info *mr = (struct ipt_imq_info*)targinfo;
539 +
540 +       (*pskb)->imq_flags = mr->todev | IMQ_F_ENQUEUE;
541 +       (*pskb)->nfcache |= NFC_ALTERED;
542 +
543 +       return IPT_CONTINUE;
544 +}
545 +
546 +static int imq_checkentry(const char *tablename,
547 +                         const struct ipt_entry *e,
548 +                         void *targinfo,
549 +                         unsigned int targinfosize,
550 +                         unsigned int hook_mask)
551 +{
552 +       struct ipt_imq_info *mr;
553 +
554 +       if (targinfosize != IPT_ALIGN(sizeof(struct ipt_imq_info))) {
555 +               printk(KERN_WARNING "IMQ: invalid targinfosize\n");
556 +               return 0;
557 +       }
558 +       mr = (struct ipt_imq_info*)targinfo;
559 +
560 +       if (strcmp(tablename, "mangle") != 0) {
561 +               printk(KERN_WARNING
562 +                      "IMQ: IMQ can only be called from \"mangle\" table, not \"%s\"\n",
563 +                      tablename);
564 +               return 0;
565 +       }
566 +       
567 +       if (mr->todev > IMQ_MAX_DEVS) {
568 +               printk(KERN_WARNING
569 +                      "IMQ: invalid device specified, highest is %u\n",
570 +                      IMQ_MAX_DEVS);
571 +               return 0;
572 +       }
573 +       
574 +       return 1;
575 +}
576 +
577 +static struct ipt_target ipt_imq_reg = {
578 +       .name           = "IMQ",
579 +       .target         = imq_target,
580 +       .checkentry     = imq_checkentry,
581 +       .me             = THIS_MODULE
582 +};
583 +
584 +static int __init init(void)
585 +{
586 +       if (ipt_register_target(&ipt_imq_reg))
587 +               return -EINVAL;
588 +
589 +       return 0;
590 +}
591 +
592 +static void __exit fini(void)
593 +{
594 +       ipt_unregister_target(&ipt_imq_reg);
595 +}
596 +
597 +module_init(init);
598 +module_exit(fini);
599 +MODULE_LICENSE("GPL");
600 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/net/sched/Kconfig linux-2.6.0-test11/net/sched/Kconfig
601 --- linux-2.6.0-test11.orig/net/sched/Kconfig   2003-11-30 20:43:54.000000000 +0000
602 +++ linux-2.6.0-test11/net/sched/Kconfig        2003-12-02 19:53:57.000000000 +0000
603 @@ -105,6 +105,24 @@
604           To compile this code as a module, choose M here: the
605           module will be called sch_sfq.
606  
607 +config NET_SCH_ESFQ
608 +       tristate "ESFQ queue"
609 +       depends on NET_SCHED
610 +       ---help---
611 +         Say Y here if you want to use the Enhanced Stochastic Fairness
612 +         Queueing (ESFQ) packet scheduling algorithm for some of your network
613 +         devices or as a leaf discipline for the CBQ scheduling algorithm (see
614 +         the top of <file:net/sched/sch_esfq.c> for details and references
615 +         about the SFQ algorithm).
616 +         
617 +         This is an enchanced SFQ version which allows you to control the
618 +         hardcoded values in the SFQ scheduler: queue depth, hash table size,
619 +         queues limit. Also adds control to the hash function used to identify
620 +         packet flows. Hash by src or dst ip and original sfq hash.
621 +         
622 +         To compile this code as a module, choose M here: the
623 +         module will be called sch_esfq.
624 +
625  config NET_SCH_TEQL
626         tristate "TEQL queue"
627         depends on NET_SCHED
628 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/net/sched/Makefile linux-2.6.0-test11/net/sched/Makefile
629 --- linux-2.6.0-test11.orig/net/sched/Makefile  2003-11-30 20:43:54.000000000 +0000
630 +++ linux-2.6.0-test11/net/sched/Makefile       2003-12-02 19:53:57.000000000 +0000
631 @@ -15,6 +15,7 @@
632  obj-$(CONFIG_NET_SCH_HFSC)     += sch_hfsc.o
633  obj-$(CONFIG_NET_SCH_HTB)      += sch_htb.o
634  obj-$(CONFIG_NET_SCH_SFQ)      += sch_sfq.o
635 +obj-$(CONFIG_NET_SCH_ESFQ)     += sch_esfq.o
636  obj-$(CONFIG_NET_SCH_RED)      += sch_red.o
637  obj-$(CONFIG_NET_SCH_TBF)      += sch_tbf.o
638  obj-$(CONFIG_NET_SCH_PRIO)     += sch_prio.o
639 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/net/sched/sch_api.c linux-2.6.0-test11/net/sched/sch_api.c
640 --- linux-2.6.0-test11.orig/net/sched/sch_api.c 2003-11-30 20:43:54.000000000 +0000
641 +++ linux-2.6.0-test11/net/sched/sch_api.c      2003-12-02 19:53:57.000000000 +0000
642 @@ -1235,6 +1235,9 @@
643  #ifdef CONFIG_NET_SCH_SFQ
644         INIT_QDISC(sfq);
645  #endif
646 +#ifdef CONFIG_NET_SCH_ESFQ
647 +       INIT_QDISC(esfq);
648 +#endif
649  #ifdef CONFIG_NET_SCH_TBF
650         INIT_QDISC(tbf);
651  #endif
652 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/net/sched/sch_esfq.c linux-2.6.0-test11/net/sched/sch_esfq.c
653 --- linux-2.6.0-test11.orig/net/sched/sch_esfq.c        1970-01-01 01:00:00.000000000 +0100
654 +++ linux-2.6.0-test11/net/sched/sch_esfq.c     2003-12-03 00:18:29.000000000 +0000
655 @@ -0,0 +1,588 @@
656 +/*
657 + * net/sched/sch_esfq.c        Extended Stochastic Fairness Queueing discipline.
658 + *
659 + *             This program is free software; you can redistribute it and/or
660 + *             modify it under the terms of the GNU General Public License
661 + *             as published by the Free Software Foundation; either version
662 + *             2 of the License, or (at your option) any later version.
663 + *
664 + * Authors:    Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
665 + *
666 + * Changes:    Alexander Atanasov, <alex@ssi.bg>
667 + *             Added dynamic depth,limit,divisor,hash_kind options.
668 + *             Added dst and src hashes.
669 + */
670 +
671 +#include <linux/config.h>
672 +#include <linux/module.h>
673 +#include <asm/uaccess.h>
674 +#include <asm/system.h>
675 +#include <asm/bitops.h>
676 +#include <linux/types.h>
677 +#include <linux/kernel.h>
678 +#include <linux/sched.h>
679 +#include <linux/string.h>
680 +#include <linux/mm.h>
681 +#include <linux/socket.h>
682 +#include <linux/sockios.h>
683 +#include <linux/in.h>
684 +#include <linux/errno.h>
685 +#include <linux/interrupt.h>
686 +#include <linux/if_ether.h>
687 +#include <linux/inet.h>
688 +#include <linux/netdevice.h>
689 +#include <linux/etherdevice.h>
690 +#include <linux/notifier.h>
691 +#include <linux/init.h>
692 +#include <net/ip.h>
693 +#include <linux/ipv6.h>
694 +#include <net/route.h>
695 +#include <linux/skbuff.h>
696 +#include <net/sock.h>
697 +#include <net/pkt_sched.h>
698 +
699 +
700 +/*     Stochastic Fairness Queuing algorithm.
701 +       For more comments look at sch_sfq.c.
702 +       The difference is that you can change limit, depth,
703 +       hash table size and choose 3 hash types.
704 +       
705 +       classic:        same as in sch_sfq.c
706 +       dst:            destination IP address
707 +       src:            source IP address
708 +       
709 +       TODO: 
710 +               make sfq_change work.
711 +*/
712 +
713 +
714 +/* This type should contain at least SFQ_DEPTH*2 values */
715 +typedef unsigned int esfq_index;
716 +
717 +struct esfq_head
718 +{
719 +       esfq_index      next;
720 +       esfq_index      prev;
721 +};
722 +
723 +struct esfq_sched_data
724 +{
725 +/* Parameters */
726 +       int             perturb_period;
727 +       unsigned        quantum;        /* Allotment per round: MUST BE >= MTU */
728 +       int             limit;
729 +       unsigned        depth;
730 +       unsigned        hash_divisor;
731 +       unsigned        hash_kind;
732 +/* Variables */
733 +       struct timer_list perturb_timer;
734 +       int             perturbation;
735 +       esfq_index      tail;           /* Index of current slot in round */
736 +       esfq_index      max_depth;      /* Maximal depth */
737 +
738 +       esfq_index      *ht;                    /* Hash table */
739 +       esfq_index      *next;                  /* Active slots link */
740 +       short           *allot;                 /* Current allotment per slot */
741 +       unsigned short  *hash;                  /* Hash value indexed by slots */
742 +       struct sk_buff_head     *qs;            /* Slot queue */
743 +       struct esfq_head        *dep;           /* Linked list of slots, indexed by depth */
744 +};
745 +
746 +static __inline__ unsigned esfq_hash_u32(struct esfq_sched_data *q,u32 h)
747 +{
748 +       int pert = q->perturbation;
749 +
750 +       if (pert)
751 +               h = (h<<pert) ^ (h>>(0x1F - pert));
752 +
753 +       h = ntohl(h) * 2654435761UL;
754 +       return h & (q->hash_divisor-1);
755 +}
756 +
757 +static __inline__ unsigned esfq_fold_hash_classic(struct esfq_sched_data *q, u32 h, u32 h1)
758 +{
759 +       int pert = q->perturbation;
760 +
761 +       /* Have we any rotation primitives? If not, WHY? */
762 +       h ^= (h1<<pert) ^ (h1>>(0x1F - pert));
763 +       h ^= h>>10;
764 +       return h & (q->hash_divisor-1);
765 +}
766 +
767 +#ifndef IPPROTO_ESP
768 +#define IPPROTO_ESP 50
769 +#endif
770 +
771 +static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
772 +{
773 +       u32 h, h2;
774 +       u32 hs;
775 +
776 +       switch (skb->protocol) {
777 +       case __constant_htons(ETH_P_IP):
778 +       {
779 +               struct iphdr *iph = skb->nh.iph;
780 +               h = iph->daddr;
781 +               hs = iph->saddr;
782 +               h2 = hs^iph->protocol;
783 +               if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
784 +                   (iph->protocol == IPPROTO_TCP ||
785 +                    iph->protocol == IPPROTO_UDP ||
786 +                    iph->protocol == IPPROTO_ESP))
787 +                       h2 ^= *(((u32*)iph) + iph->ihl);
788 +               break;
789 +       }
790 +       case __constant_htons(ETH_P_IPV6):
791 +       {
792 +               struct ipv6hdr *iph = skb->nh.ipv6h;
793 +               h = iph->daddr.s6_addr32[3];
794 +               hs = iph->saddr.s6_addr32[3];
795 +               h2 = hs^iph->nexthdr;
796 +               if (iph->nexthdr == IPPROTO_TCP ||
797 +                   iph->nexthdr == IPPROTO_UDP ||
798 +                   iph->nexthdr == IPPROTO_ESP)
799 +                       h2 ^= *(u32*)&iph[1];
800 +               break;
801 +       }
802 +       default:
803 +               h = (u32)(unsigned long)skb->dst;
804 +               hs = (u32)(unsigned long)skb->sk;
805 +               h2 = hs^skb->protocol;
806 +       }
807 +       switch(q->hash_kind)
808 +       {
809 +       case TCA_SFQ_HASH_CLASSIC:
810 +               return esfq_fold_hash_classic(q, h, h2);
811 +       case TCA_SFQ_HASH_DST:
812 +               return esfq_hash_u32(q,h);
813 +       case TCA_SFQ_HASH_SRC:
814 +               return esfq_hash_u32(q,hs);
815 +       default:
816 +               if (net_ratelimit())
817 +                       printk(KERN_DEBUG "esfq unknown hash method, fallback to classic\n");
818 +       }
819 +       return esfq_fold_hash_classic(q, h, h2);
820 +}
821 +
822 +extern __inline__ void esfq_link(struct esfq_sched_data *q, esfq_index x)
823 +{
824 +       esfq_index p, n;
825 +       int d = q->qs[x].qlen + q->depth;
826 +
827 +       p = d;
828 +       n = q->dep[d].next;
829 +       q->dep[x].next = n;
830 +       q->dep[x].prev = p;
831 +       q->dep[p].next = q->dep[n].prev = x;
832 +}
833 +
834 +extern __inline__ void esfq_dec(struct esfq_sched_data *q, esfq_index x)
835 +{
836 +       esfq_index p, n;
837 +
838 +       n = q->dep[x].next;
839 +       p = q->dep[x].prev;
840 +       q->dep[p].next = n;
841 +       q->dep[n].prev = p;
842 +
843 +       if (n == p && q->max_depth == q->qs[x].qlen + 1)
844 +               q->max_depth--;
845 +
846 +       esfq_link(q, x);
847 +}
848 +
849 +extern __inline__ void esfq_inc(struct esfq_sched_data *q, esfq_index x)
850 +{
851 +       esfq_index p, n;
852 +       int d;
853 +
854 +       n = q->dep[x].next;
855 +       p = q->dep[x].prev;
856 +       q->dep[p].next = n;
857 +       q->dep[n].prev = p;
858 +       d = q->qs[x].qlen;
859 +       if (q->max_depth < d)
860 +               q->max_depth = d;
861 +
862 +       esfq_link(q, x);
863 +}
864 +
865 +static unsigned int esfq_drop(struct Qdisc *sch)
866 +{
867 +       struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
868 +       esfq_index d = q->max_depth;
869 +       struct sk_buff *skb;
870 +
871 +       /* Queue is full! Find the longest slot and
872 +          drop a packet from it */
873 +
874 +       if (d > 1) {
875 +               esfq_index x = q->dep[d+q->depth].next;
876 +               skb = q->qs[x].prev;
877 +               __skb_unlink(skb, &q->qs[x]);
878 +               kfree_skb(skb);
879 +               esfq_dec(q, x);
880 +               sch->q.qlen--;
881 +               sch->stats.drops++;
882 +               return 1;
883 +       }
884 +
885 +       if (d == 1) {
886 +               /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
887 +               d = q->next[q->tail];
888 +               q->next[q->tail] = q->next[d];
889 +               q->allot[q->next[d]] += q->quantum;
890 +               skb = q->qs[d].prev;
891 +               __skb_unlink(skb, &q->qs[d]);
892 +               kfree_skb(skb);
893 +               esfq_dec(q, d);
894 +               sch->q.qlen--;
895 +               q->ht[q->hash[d]] = q->depth;
896 +               sch->stats.drops++;
897 +               return 1;
898 +       }
899 +
900 +       return 0;
901 +}
902 +
903 +static int
904 +esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
905 +{
906 +       struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
907 +       unsigned hash = esfq_hash(q, skb);
908 +       unsigned depth = q->depth;
909 +       esfq_index x;
910 +
911 +       x = q->ht[hash];
912 +       if (x == depth) {
913 +               q->ht[hash] = x = q->dep[depth].next;
914 +               q->hash[x] = hash;
915 +       }
916 +       __skb_queue_tail(&q->qs[x], skb);
917 +       esfq_inc(q, x);
918 +       if (q->qs[x].qlen == 1) {               /* The flow is new */
919 +               if (q->tail == depth) { /* It is the first flow */
920 +                       q->tail = x;
921 +                       q->next[x] = x;
922 +                       q->allot[x] = q->quantum;
923 +               } else {
924 +                       q->next[x] = q->next[q->tail];
925 +                       q->next[q->tail] = x;
926 +                       q->tail = x;
927 +               }
928 +       }
929 +       if (++sch->q.qlen < q->limit-1) {
930 +               sch->stats.bytes += skb->len;
931 +               sch->stats.packets++;
932 +               return 0;
933 +       }
934 +
935 +       esfq_drop(sch);
936 +       return NET_XMIT_CN;
937 +}
938 +
939 +static int
940 +esfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
941 +{
942 +       struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
943 +       unsigned hash = esfq_hash(q, skb);
944 +       unsigned depth = q->depth;
945 +       esfq_index x;
946 +
947 +       x = q->ht[hash];
948 +       if (x == depth) {
949 +               q->ht[hash] = x = q->dep[depth].next;
950 +               q->hash[x] = hash;
951 +       }
952 +       __skb_queue_head(&q->qs[x], skb);
953 +       esfq_inc(q, x);
954 +       if (q->qs[x].qlen == 1) {               /* The flow is new */
955 +               if (q->tail == depth) { /* It is the first flow */
956 +                       q->tail = x;
957 +                       q->next[x] = x;
958 +                       q->allot[x] = q->quantum;
959 +               } else {
960 +                       q->next[x] = q->next[q->tail];
961 +                       q->next[q->tail] = x;
962 +                       q->tail = x;
963 +               }
964 +       }
965 +       if (++sch->q.qlen < q->limit - 1)
966 +               return 0;
967 +
968 +       sch->stats.drops++;
969 +       esfq_drop(sch);
970 +       return NET_XMIT_CN;
971 +}
972 +
973 +
974 +
975 +
976 +static struct sk_buff *
977 +esfq_dequeue(struct Qdisc* sch)
978 +{
979 +       struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
980 +       struct sk_buff *skb;
981 +       unsigned depth = q->depth;
982 +       esfq_index a, old_a;
983 +
984 +       /* No active slots */
985 +       if (q->tail == depth)
986 +               return NULL;
987 +       
988 +       a = old_a = q->next[q->tail];
989 +       
990 +       /* Grab packet */
991 +       skb = __skb_dequeue(&q->qs[a]);
992 +       esfq_dec(q, a);
993 +       sch->q.qlen--;
994 +       
995 +       /* Is the slot empty? */
996 +       if (q->qs[a].qlen == 0) {
997 +               a = q->next[a];
998 +               if (a == old_a) {
999 +                       q->tail = depth;
1000 +                       return skb;
1001 +               }
1002 +               q->next[q->tail] = a;
1003 +               q->allot[a] += q->quantum;
1004 +       } else if ((q->allot[a] -= skb->len) <= 0) {
1005 +               q->tail = a;
1006 +               a = q->next[a];
1007 +               q->allot[a] += q->quantum;
1008 +       }
1009 +       
1010 +       return skb;
1011 +}
1012 +
1013 +static void
1014 +esfq_reset(struct Qdisc* sch)
1015 +{
1016 +       struct sk_buff *skb;
1017 +
1018 +       while ((skb = esfq_dequeue(sch)) != NULL)
1019 +               kfree_skb(skb);
1020 +}
1021 +
1022 +static void esfq_perturbation(unsigned long arg)
1023 +{
1024 +       struct Qdisc *sch = (struct Qdisc*)arg;
1025 +       struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
1026 +
1027 +       q->perturbation = net_random()&0x1F;
1028 +       q->perturb_timer.expires = jiffies + q->perturb_period;
1029 +
1030 +       if (q->perturb_period) {
1031 +               q->perturb_timer.expires = jiffies + q->perturb_period;
1032 +               add_timer(&q->perturb_timer);
1033 +       }
1034 +}
1035 +
1036 +static int esfq_change(struct Qdisc *sch, struct rtattr *opt)
1037 +{
1038 +       struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
1039 +       struct tc_sfq_qopt *ctl = RTA_DATA(opt);
1040 +       int old_perturb = q->perturb_period;
1041 +       
1042 +       if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
1043 +               return -EINVAL;
1044 +       
1045 +       sch_tree_lock(sch);
1046 +       q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
1047 +       q->perturb_period = ctl->perturb_period*HZ;
1048 +//     q->hash_divisor = ctl->divisor;
1049 +//     q->tail = q->limit = q->depth = ctl->flows;
1050 +       
1051 +       if (ctl->limit)
1052 +               q->limit = min_t(u32, ctl->limit, q->depth);
1053 +       
1054 +       if (ctl->hash_kind) {
1055 +               q->hash_kind = ctl->hash_kind;
1056 +               if (q->hash_kind !=  TCA_SFQ_HASH_CLASSIC)
1057 +                       q->perturb_period = 0;
1058 +       }
1059 +       
1060 +       // is sch_tree_lock enough to do this ?
1061 +       while (sch->q.qlen >= q->limit-1)
1062 +               esfq_drop(sch);
1063 +       
1064 +       if (old_perturb)
1065 +               del_timer(&q->perturb_timer);
1066 +       if (q->perturb_period) {
1067 +               q->perturb_timer.expires = jiffies + q->perturb_period;
1068 +               add_timer(&q->perturb_timer);
1069 +       } else {
1070 +               q->perturbation = 0;
1071 +       }
1072 +       sch_tree_unlock(sch);
1073 +       return 0;
1074 +}
1075 +
1076 +static int esfq_init(struct Qdisc *sch, struct rtattr *opt)
1077 +{
1078 +       struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
1079 +       struct tc_sfq_qopt *ctl;
1080 +       esfq_index p = ~0UL/2;
1081 +       int i;
1082 +       
1083 +       if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
1084 +               return -EINVAL;
1085 +
1086 +       q->perturb_timer.data = (unsigned long)sch;
1087 +       q->perturb_timer.function = esfq_perturbation;
1088 +       init_timer(&q->perturb_timer);
1089 +       q->perturbation = 0;
1090 +       q->hash_kind = TCA_SFQ_HASH_CLASSIC;
1091 +       q->max_depth = 0;
1092 +       if (opt == NULL) {
1093 +               q->quantum = psched_mtu(sch->dev);
1094 +               q->perturb_period = 0;
1095 +               q->hash_divisor = 1024;
1096 +               q->tail = q->limit = q->depth = 128;
1097 +               
1098 +       } else {
1099 +               ctl = RTA_DATA(opt);
1100 +               q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
1101 +               q->perturb_period = ctl->perturb_period*HZ;
1102 +               q->hash_divisor = ctl->divisor ? : 1024;
1103 +               q->tail = q->limit = q->depth = ctl->flows ? : 128;
1104 +               
1105 +               if ( q->depth > p - 1 )
1106 +                       return -EINVAL;
1107 +               
1108 +               if (ctl->limit)
1109 +                       q->limit = min_t(u32, ctl->limit, q->depth);
1110 +               
1111 +               if (ctl->hash_kind) {
1112 +                       q->hash_kind = ctl->hash_kind;
1113 +               }
1114 +               
1115 +               if (q->perturb_period) {
1116 +                       q->perturb_timer.expires = jiffies + q->perturb_period;
1117 +                       add_timer(&q->perturb_timer);
1118 +               }
1119 +       }
1120 +       
1121 +       q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
1122 +       if (!q->ht)
1123 +               goto err_case;
1124 +               
1125 +       q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
1126 +       if (!q->dep)
1127 +               goto err_case;
1128 +       q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
1129 +       if (!q->next)
1130 +               goto err_case;
1131 +       
1132 +       q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
1133 +       if (!q->allot)
1134 +               goto err_case;
1135 +       q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
1136 +       if (!q->hash)
1137 +               goto err_case;
1138 +       q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
1139 +       if (!q->qs)
1140 +               goto err_case;
1141 +       
1142 +       for (i=0; i< q->hash_divisor; i++)
1143 +               q->ht[i] = q->depth;
1144 +       for (i=0; i<q->depth; i++) {
1145 +               skb_queue_head_init(&q->qs[i]);
1146 +               q->dep[i+q->depth].next = i+q->depth;
1147 +               q->dep[i+q->depth].prev = i+q->depth;
1148 +       }
1149 +       
1150 +       for (i=0; i<q->depth; i++)
1151 +               esfq_link(q, i);
1152 +       MOD_INC_USE_COUNT;
1153 +       return 0;
1154 +err_case:
1155 +       if (q->ht)
1156 +               kfree(q->ht);
1157 +       if (q->dep)
1158 +               kfree(q->dep);
1159 +       if (q->next)
1160 +               kfree(q->next);
1161 +       if (q->allot)
1162 +               kfree(q->allot);
1163 +       if (q->hash)
1164 +               kfree(q->hash);
1165 +       if (q->qs)
1166 +               kfree(q->qs);
1167 +       return -ENOBUFS;
1168 +}
1169 +
1170 +static void esfq_destroy(struct Qdisc *sch)
1171 +{
1172 +       struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
1173 +       del_timer(&q->perturb_timer);
1174 +       if(q->ht)
1175 +               kfree(q->ht);
1176 +       if(q->dep)
1177 +               kfree(q->dep);
1178 +       if(q->next)
1179 +               kfree(q->next);
1180 +       if(q->allot)
1181 +               kfree(q->allot);
1182 +       if(q->hash)
1183 +               kfree(q->hash);
1184 +       if(q->qs)
1185 +               kfree(q->qs);
1186 +       MOD_DEC_USE_COUNT;
1187 +}
1188 +
1189 +static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
1190 +{
1191 +       struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
1192 +       unsigned char    *b = skb->tail;
1193 +       struct tc_sfq_qopt opt;
1194 +
1195 +       opt.quantum = q->quantum;
1196 +       opt.perturb_period = q->perturb_period/HZ;
1197 +
1198 +       opt.limit = q->limit;
1199 +       opt.divisor = q->hash_divisor;
1200 +       opt.flows = q->depth;
1201 +       opt.hash_kind = q->hash_kind;
1202 +
1203 +       RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
1204 +
1205 +       return skb->len;
1206 +
1207 +rtattr_failure:
1208 +       skb_trim(skb, b - skb->data);
1209 +       return -1;
1210 +}
1211 +
1212 +struct Qdisc_ops esfq_qdisc_ops =
1213 +{
1214 +       NULL,
1215 +       NULL,
1216 +       "esfq",
1217 +       sizeof(struct esfq_sched_data),
1218 +
1219 +       esfq_enqueue,
1220 +       esfq_dequeue,
1221 +       esfq_requeue,
1222 +       esfq_drop,
1223 +
1224 +       esfq_init,
1225 +       esfq_reset,
1226 +       esfq_destroy,
1227 +       NULL, /* esfq_change - needs more work */
1228 +
1229 +       esfq_dump,
1230 +};
1231 +
1232 +#ifdef MODULE
1233 +int init_module(void)
1234 +{
1235 +       return register_qdisc(&esfq_qdisc_ops);
1236 +}
1237 +
1238 +void cleanup_module(void) 
1239 +{
1240 +       unregister_qdisc(&esfq_qdisc_ops);
1241 +}
1242 +#endif
1243 +MODULE_LICENSE("GPL");
1244 diff -u -U 2 -r -N -d linux-2.6.0-test11.orig/net/sched/sch_generic.c linux-2.6.0-test11/net/sched/sch_generic.c
1245 --- linux-2.6.0-test11.orig/net/sched/sch_generic.c     2003-11-30 20:43:54.000000000 +0000
1246 +++ linux-2.6.0-test11/net/sched/sch_generic.c  2003-12-02 19:54:09.000000000 +0000
1247 @@ -30,6 +30,9 @@
1248  #include <linux/skbuff.h>
1249  #include <linux/rtnetlink.h>
1250  #include <linux/init.h>
1251 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1252 +#include <linux/imq.h>
1253 +#endif
1254  #include <net/sock.h>
1255  #include <net/pkt_sched.h>
1256  
1257 @@ -90,7 +93,11 @@
1258                         spin_unlock(&dev->queue_lock);
1259  
1260                         if (!netif_queue_stopped(dev)) {
1261 +#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1262 +                               if (netdev_nit && !(skb->imq_flags & IMQ_F_ENQUEUE))
1263 +#else
1264                                 if (netdev_nit)
1265 +#endif
1266                                         dev_queue_xmit_nit(skb, dev);
1267  
1268                                 if (dev->hard_start_xmit(skb, dev) == 0) {
This page took 0.120165 seconds and 3 git commands to generate.