]> git.pld-linux.org Git - packages/kernel.git/blame - 2.6.4-rc1-01-esfq-imq.patch
- replaced by linux-2.4-sfq.patch
[packages/kernel.git] / 2.6.4-rc1-01-esfq-imq.patch
CommitLineData
c9d1c54c
AM
1diff -uNr linux-2.6.4-rc1/drivers/net.orig/imq.c linux-2.6.4-rc1/drivers/net/imq.c
2--- linux-2.6.4-rc1/drivers/net.orig/imq.c 1970-01-01 01:00:00.000000000 +0100
3+++ linux-2.6.4-rc1/drivers/net/imq.c 2004-03-03 03:43:30.262457760 +0100
4@@ -0,0 +1,321 @@
5+/*
6+ * Pseudo-driver for the intermediate queue device.
7+ *
8+ * This program is free software; you can redistribute it and/or
9+ * modify it under the terms of the GNU General Public License
10+ * as published by the Free Software Foundation; either version
11+ * 2 of the License, or (at your option) any later version.
12+ *
13+ * Authors: Patrick McHardy, <kaber@trash.net>
14+ *
15+ * The first version was written by Martin Devera, <devik@cdi.cz>
16+ *
17+ * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
18+ * - Update patch to 2.4.21
19+ * Sebastian Strollo <sstrollo@nortelnetworks.com>
20+ * - Fix "Dead-loop on netdevice imq"-issue
21+ */
22+
23+#include <linux/kernel.h>
24+#include <linux/module.h>
25+#include <linux/config.h>
26+#include <linux/skbuff.h>
27+#include <linux/netdevice.h>
28+#include <linux/rtnetlink.h>
29+#include <linux/if_arp.h>
30+#include <linux/netfilter.h>
31+#include <linux/netfilter_ipv4.h>
32+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
33+#include <linux/netfilter_ipv6.h>
34+#endif
35+#include <linux/imq.h>
36+#include <net/pkt_sched.h>
37+
38+static nf_hookfn imq_nf_hook;
39+
40+static struct nf_hook_ops imq_ingress_ipv4 = {
41+ .hook = imq_nf_hook,
42+ .owner = THIS_MODULE,
43+ .pf = PF_INET,
44+ .hooknum = NF_IP_PRE_ROUTING,
45+ .priority = NF_IP_PRI_MANGLE + 1
46+};
47+
48+static struct nf_hook_ops imq_egress_ipv4 = {
49+ .hook = imq_nf_hook,
50+ .owner = THIS_MODULE,
51+ .pf = PF_INET,
52+ .hooknum = NF_IP_POST_ROUTING,
53+ .priority = NF_IP_PRI_LAST
54+};
55+
56+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
57+static struct nf_hook_ops imq_ingress_ipv6 = {
58+ .hook = imq_nf_hook,
59+ .owner = THIS_MODULE,
60+ .pf = PF_INET6,
61+ .hooknum = NF_IP6_PRE_ROUTING,
62+ .priority = NF_IP6_PRI_MANGLE + 1
63+};
64+
65+static struct nf_hook_ops imq_egress_ipv6 = {
66+ .hook = imq_nf_hook,
67+ .owner = THIS_MODULE,
68+ .pf = PF_INET6,
69+ .hooknum = NF_IP6_POST_ROUTING,
70+ .priority = NF_IP6_PRI_LAST
71+};
72+#endif
73+
74+static unsigned int numdevs = 2;
75+
76+MODULE_PARM(numdevs, "i");
77+MODULE_PARM_DESC(numdevs, "number of imq devices");
78+
79+static struct net_device *imq_devs;
80+
81+
82+static struct net_device_stats *imq_get_stats(struct net_device *dev)
83+{
84+ return (struct net_device_stats *)dev->priv;
85+}
86+
87+/* called for packets kfree'd in qdiscs at places other than enqueue */
88+static void imq_skb_destructor(struct sk_buff *skb)
89+{
90+ struct nf_info *info = skb->nf_info;
91+
92+ if (info) {
93+ if (info->indev)
94+ dev_put(info->indev);
95+ if (info->outdev)
96+ dev_put(info->outdev);
97+ kfree(info);
98+ }
99+}
100+
101+static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
102+{
103+ struct net_device_stats *stats = (struct net_device_stats*) dev->priv;
104+
105+ stats->tx_bytes += skb->len;
106+ stats->tx_packets++;
107+
108+ skb->imq_flags = 0;
109+ skb->destructor = NULL;
110+
111+ dev->trans_start = jiffies;
112+ nf_reinject(skb, skb->nf_info, NF_ACCEPT);
113+ return 0;
114+}
115+
116+static int imq_nf_queue(struct sk_buff *skb, struct nf_info *info,
117+ void *data)
118+{
119+ struct net_device *dev;
120+ struct net_device_stats *stats;
121+ struct sk_buff *skb2 = NULL;
122+ struct Qdisc *q;
123+ unsigned int index = skb->imq_flags&IMQ_F_IFMASK;
124+ int ret = -1;
125+
126+ if (index > numdevs)
127+ return -1;
128+
129+ dev = imq_devs + index;
130+ if (!(dev->flags & IFF_UP)) {
131+ skb->imq_flags = 0;
132+ nf_reinject(skb, info, NF_ACCEPT);
133+ return 0;
134+ }
135+ dev->last_rx = jiffies;
136+
137+ if (skb->destructor) {
138+ skb2 = skb;
139+ skb = skb_clone(skb, GFP_ATOMIC);
140+ if (!skb)
141+ return -1;
142+ }
143+ skb->nf_info = info;
144+
145+ stats = (struct net_device_stats *)dev->priv;
146+ stats->rx_bytes+= skb->len;
147+ stats->rx_packets++;
148+
149+ spin_lock_bh(&dev->queue_lock);
150+ q = dev->qdisc;
151+ if (q->enqueue) {
152+ q->enqueue(skb_get(skb), q);
153+ if (skb_shared(skb)) {
154+ skb->destructor = imq_skb_destructor;
155+ kfree_skb(skb);
156+ ret = 0;
157+ }
158+ }
159+ if (spin_is_locked(&dev->xmit_lock))
160+ netif_schedule(dev);
161+ else
162+ while (!netif_queue_stopped(dev) && (qdisc_restart(dev) < 0));
163+ spin_unlock_bh(&dev->queue_lock);
164+
165+ if (skb2)
166+ kfree_skb(ret ? skb : skb2);
167+
168+ return ret;
169+}
170+
171+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff **pskb,
172+ const struct net_device *indev,
173+ const struct net_device *outdev,
174+ int (*okfn)(struct sk_buff *))
175+{
176+ if ((*pskb)->imq_flags & IMQ_F_ENQUEUE)
177+ return NF_QUEUE;
178+
179+ return NF_ACCEPT;
180+}
181+
182+
183+static int __init imq_init_hooks(void)
184+{
185+ int err;
186+
187+ if ((err = nf_register_queue_handler(PF_INET, imq_nf_queue, NULL)))
188+ goto err1;
189+ if ((err = nf_register_hook(&imq_ingress_ipv4)))
190+ goto err2;
191+ if ((err = nf_register_hook(&imq_egress_ipv4)))
192+ goto err3;
193+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
194+ if ((err = nf_register_queue_handler(PF_INET6, imq_nf_queue, NULL)))
195+ goto err4;
196+ if ((err = nf_register_hook(&imq_ingress_ipv6)))
197+ goto err5;
198+ if ((err = nf_register_hook(&imq_egress_ipv6)))
199+ goto err6;
200+#endif
201+
202+ return 0;
203+
204+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
205+err6:
206+ nf_unregister_hook(&imq_ingress_ipv6);
207+err5:
208+ nf_unregister_queue_handler(PF_INET6);
209+err4:
210+ nf_unregister_hook(&imq_egress_ipv4);
211+#endif
212+err3:
213+ nf_unregister_hook(&imq_ingress_ipv4);
214+err2:
215+ nf_unregister_queue_handler(PF_INET);
216+err1:
217+ return err;
218+}
219+
220+static void __exit imq_unhook(void)
221+{
222+ nf_unregister_hook(&imq_ingress_ipv4);
223+ nf_unregister_hook(&imq_egress_ipv4);
224+ nf_unregister_queue_handler(PF_INET);
225+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
226+ nf_unregister_hook(&imq_ingress_ipv6);
227+ nf_unregister_hook(&imq_egress_ipv6);
228+ nf_unregister_queue_handler(PF_INET6);
229+#endif
230+}
231+
232+static int __init imq_dev_init(struct net_device *dev)
233+{
234+ dev->hard_start_xmit = imq_dev_xmit;
235+ dev->type = ARPHRD_VOID;
236+ dev->mtu = 1500;
237+ dev->tx_queue_len = 30;
238+ dev->flags = IFF_NOARP;
239+ dev->priv = kmalloc(sizeof(struct net_device_stats), GFP_KERNEL);
240+ if (dev->priv == NULL)
241+ return -ENOMEM;
242+ memset(dev->priv, 0, sizeof(struct net_device_stats));
243+ dev->get_stats = imq_get_stats;
244+
245+ return 0;
246+}
247+
248+static void imq_dev_uninit(struct net_device *dev)
249+{
250+ kfree(dev->priv);
251+}
252+
253+static int __init imq_init_devs(void)
254+{
255+ struct net_device *dev;
256+ int i;
257+
258+ if (!numdevs || numdevs > IMQ_MAX_DEVS) {
259+ printk(KERN_ERR "numdevs has to be betweed 1 and %u\n",
260+ IMQ_MAX_DEVS);
261+ return -EINVAL;
262+ }
263+
264+ imq_devs = kmalloc(sizeof(struct net_device) * numdevs, GFP_KERNEL);
265+ if (!imq_devs)
266+ return -ENOMEM;
267+ memset(imq_devs, 0, sizeof(struct net_device) * numdevs);
268+
269+ /* we start counting at zero */
270+ numdevs--;
271+
272+ for (i = 0, dev = imq_devs; i <= numdevs; i++, dev++) {
273+ SET_MODULE_OWNER(dev);
274+ strcpy(dev->name, "imq%d");
275+ dev->init = imq_dev_init;
276+ dev->uninit = imq_dev_uninit;
277+
278+ if (register_netdev(dev) < 0)
279+ goto err_register;
280+ }
281+ return 0;
282+
283+err_register:
284+ for (; i; i--)
285+ unregister_netdev(--dev);
286+ kfree(imq_devs);
287+ return -EIO;
288+}
289+
290+static void imq_cleanup_devs(void)
291+{
292+ int i;
293+ struct net_device *dev = imq_devs;
294+
295+ for (i = 0; i <= numdevs; i++)
296+ unregister_netdev(dev++);
297+
298+ kfree(imq_devs);
299+}
300+
301+static int __init imq_init_module(void)
302+{
303+ int err;
304+
305+ if ((err = imq_init_devs()))
306+ return err;
307+ if ((err = imq_init_hooks())) {
308+ imq_cleanup_devs();
309+ return err;
310+ }
311+
312+ printk(KERN_INFO "imq driver loaded.\n");
313+
314+ return 0;
315+}
316+
317+static void __exit imq_cleanup_module(void)
318+{
319+ imq_unhook();
320+ imq_cleanup_devs();
321+}
322+
323+module_init(imq_init_module);
324+module_exit(imq_cleanup_module);
325+MODULE_LICENSE("GPL");
326diff -uNr linux-2.6.4-rc1/drivers/net.orig/Kconfig linux-2.6.4-rc1/drivers/net/Kconfig
327--- linux-2.6.4-rc1/drivers/net.orig/Kconfig 2004-03-03 03:30:33.000000000 +0100
328+++ linux-2.6.4-rc1/drivers/net/Kconfig 2004-03-03 03:43:30.237461560 +0100
329@@ -85,6 +85,20 @@
330 To compile this driver as a module, choose M here: the module
331 will be called eql. If unsure, say N.
332
333+config IMQ
334+ tristate "IMQ (intermediate queueing device) support"
335+ depends on NETDEVICES && NETFILTER
336+ ---help---
337+ The imq device(s) is used as placeholder for QoS queueing disciplines.
338+ Every packet entering/leaving the ip stack can be directed through
339+ the imq device where it's enqueued/dequeued to the attached qdisc.
340+ This allows you to treat network devices as classes and distribute
341+ bandwidth among them. Iptables is used to specify through which imq
342+ device, if any, packets travel.
343+
344+ To compile this driver as a module, choose M here: the module
345+ will be called imq. If unsure, say N.
346+
347 config TUN
348 tristate "Universal TUN/TAP device driver support"
349 depends on NETDEVICES
350diff -uNr linux-2.6.4-rc1/drivers/net.orig/Makefile linux-2.6.4-rc1/drivers/net/Makefile
351--- linux-2.6.4-rc1/drivers/net.orig/Makefile 2004-03-03 03:30:33.000000000 +0100
352+++ linux-2.6.4-rc1/drivers/net/Makefile 2004-03-03 03:43:30.240461104 +0100
353@@ -112,6 +112,7 @@
354 endif
355
356 obj-$(CONFIG_DUMMY) += dummy.o
357+obj-$(CONFIG_IMQ) += imq.o
358 obj-$(CONFIG_DE600) += de600.o
359 obj-$(CONFIG_DE620) += de620.o
360 obj-$(CONFIG_LANCE) += lance.o
361diff -uNr linux-2.6.4-rc1/include.orig/linux/imq.h linux-2.6.4-rc1/include/linux/imq.h
362--- linux-2.6.4-rc1/include.orig/linux/imq.h 1970-01-01 01:00:00.000000000 +0100
363+++ linux-2.6.4-rc1/include/linux/imq.h 2004-03-03 03:43:30.264457456 +0100
364@@ -0,0 +1,9 @@
365+#ifndef _IMQ_H
366+#define _IMQ_H
367+
368+#define IMQ_MAX_DEVS 16
369+
370+#define IMQ_F_IFMASK 0x7f
371+#define IMQ_F_ENQUEUE 0x80
372+
373+#endif /* _IMQ_H */
374diff -uNr linux-2.6.4-rc1/include.orig/linux/netfilter_ipv4/ipt_IMQ.h linux-2.6.4-rc1/include/linux/netfilter_ipv4/ipt_IMQ.h
375--- linux-2.6.4-rc1/include.orig/linux/netfilter_ipv4/ipt_IMQ.h 1970-01-01 01:00:00.000000000 +0100
376+++ linux-2.6.4-rc1/include/linux/netfilter_ipv4/ipt_IMQ.h 2004-03-03 03:43:30.265457304 +0100
377@@ -0,0 +1,8 @@
378+#ifndef _IPT_IMQ_H
379+#define _IPT_IMQ_H
380+
381+struct ipt_imq_info {
382+ unsigned int todev; /* target imq device */
383+};
384+
385+#endif /* _IPT_IMQ_H */
386diff -uNr linux-2.6.4-rc1/include.orig/linux/pkt_sched.h linux-2.6.4-rc1/include/linux/pkt_sched.h
387--- linux-2.6.4-rc1/include.orig/linux/pkt_sched.h 2004-02-27 23:21:25.000000000 +0100
388+++ linux-2.6.4-rc1/include/linux/pkt_sched.h 2004-03-03 03:43:30.297452440 +0100
389@@ -157,6 +157,13 @@
390
391 /* SFQ section */
392
393+enum
394+{
395+ TCA_SFQ_HASH_CLASSIC,
396+ TCA_SFQ_HASH_DST,
397+ TCA_SFQ_HASH_SRC,
398+};
399+
400 struct tc_sfq_qopt
401 {
402 unsigned quantum; /* Bytes per round allocated to flow */
403@@ -164,6 +171,7 @@
404 __u32 limit; /* Maximal packets in queue */
405 unsigned divisor; /* Hash divisor */
406 unsigned flows; /* Maximal number of flows */
407+ unsigned hash_kind; /* Hash function to use for flow identification */
408 };
409
410 /*
411@@ -173,6 +181,8 @@
412 *
413 * The only reason for this is efficiency, it is possible
414 * to change these parameters in compile time.
415+ *
416+ * If you need to play with this values use esfq.
417 */
418
419 /* RED section */
420diff -uNr linux-2.6.4-rc1/include.orig/linux/skbuff.h linux-2.6.4-rc1/include/linux/skbuff.h
421--- linux-2.6.4-rc1/include.orig/linux/skbuff.h 2004-02-27 23:21:03.000000000 +0100
422+++ linux-2.6.4-rc1/include/linux/skbuff.h 2004-03-03 03:43:30.309450616 +0100
423@@ -112,6 +112,9 @@
424 #endif
425
426 #endif
427+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
428+struct nf_info;
429+#endif
430
431 struct sk_buff_head {
432 /* These two members must be first. */
433@@ -237,6 +240,7 @@
434 mac_len,
435 csum;
436 unsigned char local_df,
437+ imq_flags,
438 cloned,
439 pkt_type,
440 ip_summed;
441@@ -264,6 +268,9 @@
442 #ifdef CONFIG_NET_SCHED
443 __u32 tc_index; /* traffic control index */
444 #endif
445+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
446+ struct nf_info *nf_info;
447+#endif
448
449 /* These elements must be at the end, see alloc_skb() for details. */
450 unsigned int truesize;
451diff -uNr linux-2.6.4-rc1/net.orig/core/skbuff.c linux-2.6.4-rc1/net/core/skbuff.c
452--- linux-2.6.4-rc1/net.orig/core/skbuff.c 2004-02-27 23:21:25.000000000 +0100
453+++ linux-2.6.4-rc1/net/core/skbuff.c 2004-03-03 03:43:30.316449552 +0100
454@@ -152,6 +152,13 @@
455 skb_shinfo(skb)->tso_size = 0;
456 skb_shinfo(skb)->tso_segs = 0;
457 skb_shinfo(skb)->frag_list = NULL;
458+
459+/* probably doomed to failure */
460+#if defined(CONFIG_IMQ) || defined (CONFIG_IMQ_MODULE)
461+ skb->imq_flags = 0;
462+ skb->nf_info = NULL;
463+#endif
464+
465 out:
466 return skb;
467 nodata:
468@@ -313,6 +320,10 @@
469 #ifdef CONFIG_NET_SCHED
470 C(tc_index);
471 #endif
472+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
473+ C(imq_flags);
474+ C(nf_info);
475+#endif
476 C(truesize);
477 atomic_set(&n->users, 1);
478 C(head);
479@@ -368,6 +379,10 @@
480 #ifdef CONFIG_NET_SCHED
481 new->tc_index = old->tc_index;
482 #endif
483+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
484+ new->imq_flags=old->imq_flags;
485+ new->nf_info=old->nf_info;
486+#endif
487 atomic_set(&new->users, 1);
488 }
489
490diff -uNr linux-2.6.4-rc1/net.orig/ipv4/netfilter/ipt_IMQ.c linux-2.6.4-rc1/net/ipv4/netfilter/ipt_IMQ.c
491--- linux-2.6.4-rc1/net.orig/ipv4/netfilter/ipt_IMQ.c 1970-01-01 01:00:00.000000000 +0100
492+++ linux-2.6.4-rc1/net/ipv4/netfilter/ipt_IMQ.c 2004-03-03 03:43:30.322448640 +0100
493@@ -0,0 +1,76 @@
494+/* This target marks packets to be enqueued to an imq device */
495+#include <linux/module.h>
496+#include <linux/skbuff.h>
497+#include <linux/netfilter_ipv4/ip_tables.h>
498+#include <linux/netfilter_ipv4/ipt_IMQ.h>
499+#include <linux/imq.h>
500+
501+static unsigned int imq_target(struct sk_buff **pskb,
502+ const struct net_device *in,
503+ const struct net_device *out,
504+ unsigned int hooknum,
505+ const void *targinfo,
506+ void *userinfo)
507+{
508+ struct ipt_imq_info *mr = (struct ipt_imq_info*)targinfo;
509+
510+ (*pskb)->imq_flags = mr->todev | IMQ_F_ENQUEUE;
511+ (*pskb)->nfcache |= NFC_ALTERED;
512+
513+ return IPT_CONTINUE;
514+}
515+
516+static int imq_checkentry(const char *tablename,
517+ const struct ipt_entry *e,
518+ void *targinfo,
519+ unsigned int targinfosize,
520+ unsigned int hook_mask)
521+{
522+ struct ipt_imq_info *mr;
523+
524+ if (targinfosize != IPT_ALIGN(sizeof(struct ipt_imq_info))) {
525+ printk(KERN_WARNING "IMQ: invalid targinfosize\n");
526+ return 0;
527+ }
528+ mr = (struct ipt_imq_info*)targinfo;
529+
530+ if (strcmp(tablename, "mangle") != 0) {
531+ printk(KERN_WARNING
532+ "IMQ: IMQ can only be called from \"mangle\" table, not \"%s\"\n",
533+ tablename);
534+ return 0;
535+ }
536+
537+ if (mr->todev > IMQ_MAX_DEVS) {
538+ printk(KERN_WARNING
539+ "IMQ: invalid device specified, highest is %u\n",
540+ IMQ_MAX_DEVS);
541+ return 0;
542+ }
543+
544+ return 1;
545+}
546+
547+static struct ipt_target ipt_imq_reg = {
548+ .name = "IMQ",
549+ .target = imq_target,
550+ .checkentry = imq_checkentry,
551+ .me = THIS_MODULE
552+};
553+
554+static int __init init(void)
555+{
556+ if (ipt_register_target(&ipt_imq_reg))
557+ return -EINVAL;
558+
559+ return 0;
560+}
561+
562+static void __exit fini(void)
563+{
564+ ipt_unregister_target(&ipt_imq_reg);
565+}
566+
567+module_init(init);
568+module_exit(fini);
569+MODULE_LICENSE("GPL");
570diff -uNr linux-2.6.4-rc1/net.orig/ipv4/netfilter/Kconfig linux-2.6.4-rc1/net/ipv4/netfilter/Kconfig
571--- linux-2.6.4-rc1/net.orig/ipv4/netfilter/Kconfig 2004-03-03 03:30:33.000000000 +0100
572+++ linux-2.6.4-rc1/net/ipv4/netfilter/Kconfig 2004-03-03 03:43:30.318449248 +0100
573@@ -501,6 +501,15 @@
574
575 To compile it as a module, choose M here. If unsure, say N.
576
577+config IP_NF_TARGET_IMQ
578+ tristate "IMQ target support"
579+ depends on IP_NF_IPTABLES
580+ ---help---
581+ This option adds a `IMQ' target which is used to specify if and
582+ to which imq device packets should get enqueued/dequeued.
583+
584+ To compile it as a module, choose M here. If unsure, say N.
585+
586 config IP_NF_TARGET_TCPMSS
587 tristate "TCPMSS target support"
588 depends on IP_NF_IPTABLES
589diff -uNr linux-2.6.4-rc1/net.orig/ipv4/netfilter/Makefile linux-2.6.4-rc1/net/ipv4/netfilter/Makefile
590--- linux-2.6.4-rc1/net.orig/ipv4/netfilter/Makefile 2004-03-03 03:30:33.000000000 +0100
591+++ linux-2.6.4-rc1/net/ipv4/netfilter/Makefile 2004-03-03 03:43:30.321448792 +0100
592@@ -88,6 +88,7 @@
593 obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
594 obj-$(CONFIG_IP_NF_TARGET_DSCP) += ipt_DSCP.o
595 obj-$(CONFIG_IP_NF_TARGET_MARK) += ipt_MARK.o
596+obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
597 obj-$(CONFIG_IP_NF_TARGET_IPMARK) += ipt_IPMARK.o
598 obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
599 obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
600diff -uNr linux-2.6.4-rc1/net.orig/sched/Kconfig linux-2.6.4-rc1/net/sched/Kconfig
601--- linux-2.6.4-rc1/net.orig/sched/Kconfig 2004-02-27 23:21:28.000000000 +0100
602+++ linux-2.6.4-rc1/net/sched/Kconfig 2004-03-03 03:43:30.326448032 +0100
603@@ -114,6 +114,24 @@
604 To compile this code as a module, choose M here: the
605 module will be called sch_sfq.
606
607+config NET_SCH_ESFQ
608+ tristate "ESFQ queue"
609+ depends on NET_SCHED
610+ ---help---
611+ Say Y here if you want to use the Enhanced Stochastic Fairness
612+ Queueing (ESFQ) packet scheduling algorithm for some of your network
613+ devices or as a leaf discipline for the CBQ scheduling algorithm (see
614+ the top of <file:net/sched/sch_esfq.c> for details and references
615+ about the SFQ algorithm).
616+
617+ This is an enchanced SFQ version which allows you to control the
618+ hardcoded values in the SFQ scheduler: queue depth, hash table size,
619+ queues limit. Also adds control to the hash function used to identify
620+ packet flows. Hash by src or dst ip and original sfq hash.
621+
622+ To compile this code as a module, choose M here: the
623+ module will be called sch_esfq.
624+
625 config NET_SCH_TEQL
626 tristate "TEQL queue"
627 depends on NET_SCHED
628diff -uNr linux-2.6.4-rc1/net.orig/sched/Makefile linux-2.6.4-rc1/net/sched/Makefile
629--- linux-2.6.4-rc1/net.orig/sched/Makefile 2004-02-27 23:21:02.000000000 +0100
630+++ linux-2.6.4-rc1/net/sched/Makefile 2004-03-03 03:44:30.385317696 +0100
631@@ -18,6 +18,7 @@
632 obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
633 obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
634 obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
635+obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o
636 obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
637 obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
638 obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
639diff -uNr linux-2.6.4-rc1/net.orig/sched/sch_esfq.c linux-2.6.4-rc1/net/sched/sch_esfq.c
640--- linux-2.6.4-rc1/net.orig/sched/sch_esfq.c 1970-01-01 01:00:00.000000000 +0100
641+++ linux-2.6.4-rc1/net/sched/sch_esfq.c 2004-03-03 03:43:30.332447120 +0100
642@@ -0,0 +1,586 @@
643+/*
644+ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline.
645+ *
646+ * This program is free software; you can redistribute it and/or
647+ * modify it under the terms of the GNU General Public License
648+ * as published by the Free Software Foundation; either version
649+ * 2 of the License, or (at your option) any later version.
650+ *
651+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
652+ *
653+ * Changes: Alexander Atanasov, <alex@ssi.bg>
654+ * Added dynamic depth,limit,divisor,hash_kind options.
655+ * Added dst and src hashes.
656+ */
657+
658+#include <linux/config.h>
659+#include <linux/module.h>
660+#include <asm/uaccess.h>
661+#include <asm/system.h>
662+#include <asm/bitops.h>
663+#include <linux/types.h>
664+#include <linux/kernel.h>
665+#include <linux/sched.h>
666+#include <linux/string.h>
667+#include <linux/mm.h>
668+#include <linux/socket.h>
669+#include <linux/sockios.h>
670+#include <linux/in.h>
671+#include <linux/errno.h>
672+#include <linux/interrupt.h>
673+#include <linux/if_ether.h>
674+#include <linux/inet.h>
675+#include <linux/netdevice.h>
676+#include <linux/etherdevice.h>
677+#include <linux/notifier.h>
678+#include <linux/init.h>
679+#include <net/ip.h>
680+#include <linux/ipv6.h>
681+#include <net/route.h>
682+#include <linux/skbuff.h>
683+#include <net/sock.h>
684+#include <net/pkt_sched.h>
685+
686+
687+/* Stochastic Fairness Queuing algorithm.
688+ For more comments look at sch_sfq.c.
689+ The difference is that you can change limit, depth,
690+ hash table size and choose 3 hash types.
691+
692+ classic: same as in sch_sfq.c
693+ dst: destination IP address
694+ src: source IP address
695+
696+ TODO:
697+ make sfq_change work.
698+*/
699+
700+
701+/* This type should contain at least SFQ_DEPTH*2 values */
702+typedef unsigned int esfq_index;
703+
704+struct esfq_head
705+{
706+ esfq_index next;
707+ esfq_index prev;
708+};
709+
710+struct esfq_sched_data
711+{
712+/* Parameters */
713+ int perturb_period;
714+ unsigned quantum; /* Allotment per round: MUST BE >= MTU */
715+ int limit;
716+ unsigned depth;
717+ unsigned hash_divisor;
718+ unsigned hash_kind;
719+/* Variables */
720+ struct timer_list perturb_timer;
721+ int perturbation;
722+ esfq_index tail; /* Index of current slot in round */
723+ esfq_index max_depth; /* Maximal depth */
724+
725+ esfq_index *ht; /* Hash table */
726+ esfq_index *next; /* Active slots link */
727+ short *allot; /* Current allotment per slot */
728+ unsigned short *hash; /* Hash value indexed by slots */
729+ struct sk_buff_head *qs; /* Slot queue */
730+ struct esfq_head *dep; /* Linked list of slots, indexed by depth */
731+};
732+
733+static __inline__ unsigned esfq_hash_u32(struct esfq_sched_data *q,u32 h)
734+{
735+ int pert = q->perturbation;
736+
737+ if (pert)
738+ h = (h<<pert) ^ (h>>(0x1F - pert));
739+
740+ h = ntohl(h) * 2654435761UL;
741+ return h & (q->hash_divisor-1);
742+}
743+
744+static __inline__ unsigned esfq_fold_hash_classic(struct esfq_sched_data *q, u32 h, u32 h1)
745+{
746+ int pert = q->perturbation;
747+
748+ /* Have we any rotation primitives? If not, WHY? */
749+ h ^= (h1<<pert) ^ (h1>>(0x1F - pert));
750+ h ^= h>>10;
751+ return h & (q->hash_divisor-1);
752+}
753+
754+#ifndef IPPROTO_ESP
755+#define IPPROTO_ESP 50
756+#endif
757+
758+static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
759+{
760+ u32 h, h2;
761+ u32 hs;
762+
763+ switch (skb->protocol) {
764+ case __constant_htons(ETH_P_IP):
765+ {
766+ struct iphdr *iph = skb->nh.iph;
767+ h = iph->daddr;
768+ hs = iph->saddr;
769+ h2 = hs^iph->protocol;
770+ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
771+ (iph->protocol == IPPROTO_TCP ||
772+ iph->protocol == IPPROTO_UDP ||
773+ iph->protocol == IPPROTO_ESP))
774+ h2 ^= *(((u32*)iph) + iph->ihl);
775+ break;
776+ }
777+ case __constant_htons(ETH_P_IPV6):
778+ {
779+ struct ipv6hdr *iph = skb->nh.ipv6h;
780+ h = iph->daddr.s6_addr32[3];
781+ hs = iph->saddr.s6_addr32[3];
782+ h2 = hs^iph->nexthdr;
783+ if (iph->nexthdr == IPPROTO_TCP ||
784+ iph->nexthdr == IPPROTO_UDP ||
785+ iph->nexthdr == IPPROTO_ESP)
786+ h2 ^= *(u32*)&iph[1];
787+ break;
788+ }
789+ default:
790+ h = (u32)(unsigned long)skb->dst;
791+ hs = (u32)(unsigned long)skb->sk;
792+ h2 = hs^skb->protocol;
793+ }
794+ switch(q->hash_kind)
795+ {
796+ case TCA_SFQ_HASH_CLASSIC:
797+ return esfq_fold_hash_classic(q, h, h2);
798+ case TCA_SFQ_HASH_DST:
799+ return esfq_hash_u32(q,h);
800+ case TCA_SFQ_HASH_SRC:
801+ return esfq_hash_u32(q,hs);
802+ default:
803+ if (net_ratelimit())
804+ printk(KERN_DEBUG "esfq unknown hash method, fallback to classic\n");
805+ }
806+ return esfq_fold_hash_classic(q, h, h2);
807+}
808+
809+extern __inline__ void esfq_link(struct esfq_sched_data *q, esfq_index x)
810+{
811+ esfq_index p, n;
812+ int d = q->qs[x].qlen + q->depth;
813+
814+ p = d;
815+ n = q->dep[d].next;
816+ q->dep[x].next = n;
817+ q->dep[x].prev = p;
818+ q->dep[p].next = q->dep[n].prev = x;
819+}
820+
821+extern __inline__ void esfq_dec(struct esfq_sched_data *q, esfq_index x)
822+{
823+ esfq_index p, n;
824+
825+ n = q->dep[x].next;
826+ p = q->dep[x].prev;
827+ q->dep[p].next = n;
828+ q->dep[n].prev = p;
829+
830+ if (n == p && q->max_depth == q->qs[x].qlen + 1)
831+ q->max_depth--;
832+
833+ esfq_link(q, x);
834+}
835+
836+extern __inline__ void esfq_inc(struct esfq_sched_data *q, esfq_index x)
837+{
838+ esfq_index p, n;
839+ int d;
840+
841+ n = q->dep[x].next;
842+ p = q->dep[x].prev;
843+ q->dep[p].next = n;
844+ q->dep[n].prev = p;
845+ d = q->qs[x].qlen;
846+ if (q->max_depth < d)
847+ q->max_depth = d;
848+
849+ esfq_link(q, x);
850+}
851+
852+static unsigned int esfq_drop(struct Qdisc *sch)
853+{
854+ struct esfq_sched_data *q = qdisc_priv(sch);
855+ esfq_index d = q->max_depth;
856+ struct sk_buff *skb;
857+
858+ /* Queue is full! Find the longest slot and
859+ drop a packet from it */
860+
861+ if (d > 1) {
862+ esfq_index x = q->dep[d+q->depth].next;
863+ skb = q->qs[x].prev;
864+ __skb_unlink(skb, &q->qs[x]);
865+ kfree_skb(skb);
866+ esfq_dec(q, x);
867+ sch->q.qlen--;
868+ sch->stats.drops++;
869+ return 1;
870+ }
871+
872+ if (d == 1) {
873+ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
874+ d = q->next[q->tail];
875+ q->next[q->tail] = q->next[d];
876+ q->allot[q->next[d]] += q->quantum;
877+ skb = q->qs[d].prev;
878+ __skb_unlink(skb, &q->qs[d]);
879+ kfree_skb(skb);
880+ esfq_dec(q, d);
881+ sch->q.qlen--;
882+ q->ht[q->hash[d]] = q->depth;
883+ sch->stats.drops++;
884+ return 1;
885+ }
886+
887+ return 0;
888+}
889+
890+static int
891+esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
892+{
893+ struct esfq_sched_data *q = qdisc_priv(sch);
894+ unsigned hash = esfq_hash(q, skb);
895+ unsigned depth = q->depth;
896+ esfq_index x;
897+
898+ x = q->ht[hash];
899+ if (x == depth) {
900+ q->ht[hash] = x = q->dep[depth].next;
901+ q->hash[x] = hash;
902+ }
903+ __skb_queue_tail(&q->qs[x], skb);
904+ esfq_inc(q, x);
905+ if (q->qs[x].qlen == 1) { /* The flow is new */
906+ if (q->tail == depth) { /* It is the first flow */
907+ q->tail = x;
908+ q->next[x] = x;
909+ q->allot[x] = q->quantum;
910+ } else {
911+ q->next[x] = q->next[q->tail];
912+ q->next[q->tail] = x;
913+ q->tail = x;
914+ }
915+ }
916+ if (++sch->q.qlen < q->limit-1) {
917+ sch->stats.bytes += skb->len;
918+ sch->stats.packets++;
919+ return 0;
920+ }
921+
922+ esfq_drop(sch);
923+ return NET_XMIT_CN;
924+}
925+
926+static int
927+esfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
928+{
929+ struct esfq_sched_data *q = qdisc_priv(sch);
930+ unsigned hash = esfq_hash(q, skb);
931+ unsigned depth = q->depth;
932+ esfq_index x;
933+
934+ x = q->ht[hash];
935+ if (x == depth) {
936+ q->ht[hash] = x = q->dep[depth].next;
937+ q->hash[x] = hash;
938+ }
939+ __skb_queue_head(&q->qs[x], skb);
940+ esfq_inc(q, x);
941+ if (q->qs[x].qlen == 1) { /* The flow is new */
942+ if (q->tail == depth) { /* It is the first flow */
943+ q->tail = x;
944+ q->next[x] = x;
945+ q->allot[x] = q->quantum;
946+ } else {
947+ q->next[x] = q->next[q->tail];
948+ q->next[q->tail] = x;
949+ q->tail = x;
950+ }
951+ }
952+ if (++sch->q.qlen < q->limit - 1)
953+ return 0;
954+
955+ sch->stats.drops++;
956+ esfq_drop(sch);
957+ return NET_XMIT_CN;
958+}
959+
960+
961+
962+
963+static struct sk_buff *
964+esfq_dequeue(struct Qdisc* sch)
965+{
966+ struct esfq_sched_data *q = qdisc_priv(sch);
967+ struct sk_buff *skb;
968+ unsigned depth = q->depth;
969+ esfq_index a, old_a;
970+
971+ /* No active slots */
972+ if (q->tail == depth)
973+ return NULL;
974+
975+ a = old_a = q->next[q->tail];
976+
977+ /* Grab packet */
978+ skb = __skb_dequeue(&q->qs[a]);
979+ esfq_dec(q, a);
980+ sch->q.qlen--;
981+
982+ /* Is the slot empty? */
983+ if (q->qs[a].qlen == 0) {
984+ a = q->next[a];
985+ if (a == old_a) {
986+ q->tail = depth;
987+ return skb;
988+ }
989+ q->next[q->tail] = a;
990+ q->allot[a] += q->quantum;
991+ } else if ((q->allot[a] -= skb->len) <= 0) {
992+ q->tail = a;
993+ a = q->next[a];
994+ q->allot[a] += q->quantum;
995+ }
996+
997+ return skb;
998+}
999+
1000+static void
1001+esfq_reset(struct Qdisc* sch)
1002+{
1003+ struct sk_buff *skb;
1004+
1005+ while ((skb = esfq_dequeue(sch)) != NULL)
1006+ kfree_skb(skb);
1007+}
1008+
1009+static void esfq_perturbation(unsigned long arg)
1010+{
1011+ struct Qdisc *sch = (struct Qdisc*)arg;
1012+ struct esfq_sched_data *q = qdisc_priv(sch);
1013+
1014+ q->perturbation = net_random()&0x1F;
1015+ q->perturb_timer.expires = jiffies + q->perturb_period;
1016+
1017+ if (q->perturb_period) {
1018+ q->perturb_timer.expires = jiffies + q->perturb_period;
1019+ add_timer(&q->perturb_timer);
1020+ }
1021+}
1022+
1023+static int esfq_change(struct Qdisc *sch, struct rtattr *opt)
1024+{
1025+ struct esfq_sched_data *q = qdisc_priv(sch);
1026+ struct tc_sfq_qopt *ctl = RTA_DATA(opt);
1027+ int old_perturb = q->perturb_period;
1028+
1029+ if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
1030+ return -EINVAL;
1031+
1032+ sch_tree_lock(sch);
1033+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
1034+ q->perturb_period = ctl->perturb_period*HZ;
1035+// q->hash_divisor = ctl->divisor;
1036+// q->tail = q->limit = q->depth = ctl->flows;
1037+
1038+ if (ctl->limit)
1039+ q->limit = min_t(u32, ctl->limit, q->depth);
1040+
1041+ if (ctl->hash_kind) {
1042+ q->hash_kind = ctl->hash_kind;
1043+ if (q->hash_kind != TCA_SFQ_HASH_CLASSIC)
1044+ q->perturb_period = 0;
1045+ }
1046+
1047+ // is sch_tree_lock enough to do this ?
1048+ while (sch->q.qlen >= q->limit-1)
1049+ esfq_drop(sch);
1050+
1051+ if (old_perturb)
1052+ del_timer(&q->perturb_timer);
1053+ if (q->perturb_period) {
1054+ q->perturb_timer.expires = jiffies + q->perturb_period;
1055+ add_timer(&q->perturb_timer);
1056+ } else {
1057+ q->perturbation = 0;
1058+ }
1059+ sch_tree_unlock(sch);
1060+ return 0;
1061+}
1062+
1063+static int esfq_init(struct Qdisc *sch, struct rtattr *opt)
1064+{
1065+ struct esfq_sched_data *q = qdisc_priv(sch);
1066+ struct tc_sfq_qopt *ctl;
1067+ esfq_index p = ~0UL/2;
1068+ int i;
1069+
1070+ if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
1071+ return -EINVAL;
1072+
1073+ q->perturb_timer.data = (unsigned long)sch;
1074+ q->perturb_timer.function = esfq_perturbation;
1075+ init_timer(&q->perturb_timer);
1076+ q->perturbation = 0;
1077+ q->hash_kind = TCA_SFQ_HASH_CLASSIC;
1078+ q->max_depth = 0;
1079+ if (opt == NULL) {
1080+ q->quantum = psched_mtu(sch->dev);
1081+ q->perturb_period = 0;
1082+ q->hash_divisor = 1024;
1083+ q->tail = q->limit = q->depth = 128;
1084+
1085+ } else {
1086+ ctl = RTA_DATA(opt);
1087+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
1088+ q->perturb_period = ctl->perturb_period*HZ;
1089+ q->hash_divisor = ctl->divisor ? : 1024;
1090+ q->tail = q->limit = q->depth = ctl->flows ? : 128;
1091+
1092+ if ( q->depth > p - 1 )
1093+ return -EINVAL;
1094+
1095+ if (ctl->limit)
1096+ q->limit = min_t(u32, ctl->limit, q->depth);
1097+
1098+ if (ctl->hash_kind) {
1099+ q->hash_kind = ctl->hash_kind;
1100+ }
1101+
1102+ if (q->perturb_period) {
1103+ q->perturb_timer.expires = jiffies + q->perturb_period;
1104+ add_timer(&q->perturb_timer);
1105+ }
1106+ }
1107+
1108+ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
1109+ if (!q->ht)
1110+ goto err_case;
1111+
1112+ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
1113+ if (!q->dep)
1114+ goto err_case;
1115+ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
1116+ if (!q->next)
1117+ goto err_case;
1118+
1119+ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
1120+ if (!q->allot)
1121+ goto err_case;
1122+ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
1123+ if (!q->hash)
1124+ goto err_case;
1125+ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
1126+ if (!q->qs)
1127+ goto err_case;
1128+
1129+ for (i=0; i< q->hash_divisor; i++)
1130+ q->ht[i] = q->depth;
1131+ for (i=0; i<q->depth; i++) {
1132+ skb_queue_head_init(&q->qs[i]);
1133+ q->dep[i+q->depth].next = i+q->depth;
1134+ q->dep[i+q->depth].prev = i+q->depth;
1135+ }
1136+
1137+ for (i=0; i<q->depth; i++)
1138+ esfq_link(q, i);
1139+ return 0;
1140+err_case:
1141+ if (q->ht)
1142+ kfree(q->ht);
1143+ if (q->dep)
1144+ kfree(q->dep);
1145+ if (q->next)
1146+ kfree(q->next);
1147+ if (q->allot)
1148+ kfree(q->allot);
1149+ if (q->hash)
1150+ kfree(q->hash);
1151+ if (q->qs)
1152+ kfree(q->qs);
1153+ return -ENOBUFS;
1154+}
1155+
1156+static void esfq_destroy(struct Qdisc *sch)
1157+{
1158+ struct esfq_sched_data *q = qdisc_priv(sch);
1159+ del_timer(&q->perturb_timer);
1160+ if(q->ht)
1161+ kfree(q->ht);
1162+ if(q->dep)
1163+ kfree(q->dep);
1164+ if(q->next)
1165+ kfree(q->next);
1166+ if(q->allot)
1167+ kfree(q->allot);
1168+ if(q->hash)
1169+ kfree(q->hash);
1170+ if(q->qs)
1171+ kfree(q->qs);
1172+}
1173+
1174+static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
1175+{
1176+ struct esfq_sched_data *q = qdisc_priv(sch);
1177+ unsigned char *b = skb->tail;
1178+ struct tc_sfq_qopt opt;
1179+
1180+ opt.quantum = q->quantum;
1181+ opt.perturb_period = q->perturb_period/HZ;
1182+
1183+ opt.limit = q->limit;
1184+ opt.divisor = q->hash_divisor;
1185+ opt.flows = q->depth;
1186+ opt.hash_kind = q->hash_kind;
1187+
1188+ RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
1189+
1190+ return skb->len;
1191+
1192+rtattr_failure:
1193+ skb_trim(skb, b - skb->data);
1194+ return -1;
1195+}
1196+
1197+struct Qdisc_ops esfq_qdisc_ops =
1198+{
1199+ NULL,
1200+ NULL,
1201+ "esfq",
1202+ sizeof(struct esfq_sched_data),
1203+
1204+ esfq_enqueue,
1205+ esfq_dequeue,
1206+ esfq_requeue,
1207+ esfq_drop,
1208+
1209+ esfq_init,
1210+ esfq_reset,
1211+ esfq_destroy,
1212+ NULL, /* esfq_change - needs more work */
1213+
1214+ esfq_dump,
1215+};
1216+
1217+#ifdef MODULE
1218+int init_module(void)
1219+{
1220+ return register_qdisc(&esfq_qdisc_ops);
1221+}
1222+
1223+void cleanup_module(void)
1224+{
1225+ unregister_qdisc(&esfq_qdisc_ops);
1226+}
1227+#endif
1228+MODULE_LICENSE("GPL");
1229diff -uNr linux-2.6.4-rc1/net.orig/sched/sch_generic.c linux-2.6.4-rc1/net/sched/sch_generic.c
1230--- linux-2.6.4-rc1/net.orig/sched/sch_generic.c 2004-02-27 23:20:56.000000000 +0100
1231+++ linux-2.6.4-rc1/net/sched/sch_generic.c 2004-03-03 03:43:30.336446512 +0100
1232@@ -30,6 +30,9 @@
1233 #include <linux/skbuff.h>
1234 #include <linux/rtnetlink.h>
1235 #include <linux/init.h>
1236+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1237+#include <linux/imq.h>
1238+#endif
1239 #include <linux/rcupdate.h>
1240 #include <linux/list.h>
1241 #include <net/sock.h>
1242@@ -135,11 +138,14 @@
1243 spin_unlock(&dev->queue_lock);
1244
1245 if (!netif_queue_stopped(dev)) {
1246- int ret;
1247+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
1248+ if (netdev_nit && !(skb->imq_flags & IMQ_F_ENQUEUE))
1249+#else
1250 if (netdev_nit)
1251+#endif
1252 dev_queue_xmit_nit(skb, dev);
1253
1254- ret = dev->hard_start_xmit(skb, dev);
1255+ int ret = dev->hard_start_xmit(skb, dev);
1256 if (ret == NETDEV_TX_OK) {
1257 if (!nolock) {
1258 dev->xmit_lock_owner = -1;
This page took 0.194088 seconds and 4 git commands to generate.