]> git.pld-linux.org Git - packages/kernel.git/blame - linux-2.4.18-esfq.diff
- export memchr on sparc,sparc64 (used by rtsp netfilter modules)
[packages/kernel.git] / linux-2.4.18-esfq.diff
CommitLineData
b6d93411
AM
1--- linux-2.4.18/include/linux/pkt_sched.h.orig Tue May 14 23:25:13 2002
2+++ linux-2.4.18/include/linux/pkt_sched.h Tue May 14 23:34:57 2002
3@@ -157,6 +157,13 @@
4
5 /* SFQ section */
6
7+enum
8+{
9+ TCA_SFQ_HASH_CLASSIC,
10+ TCA_SFQ_HASH_DST,
11+ TCA_SFQ_HASH_SRC,
12+};
13+
14 struct tc_sfq_qopt
15 {
16 unsigned quantum; /* Bytes per round allocated to flow */
17@@ -164,6 +171,7 @@
18 __u32 limit; /* Maximal packets in queue */
19 unsigned divisor; /* Hash divisor */
20 unsigned flows; /* Maximal number of flows */
21+ unsigned hash_kind; /* Hash function to use for flow identification */
22 };
23
24 /*
25@@ -173,6 +181,8 @@
26 *
27 * The only reason for this is efficiency, it is possible
28 * to change these parameters in compile time.
29+ *
30+ * If you need to play with this values use esfq.
31 */
32
33 /* RED section */
34--- linux-2.4.18/net/sched/Makefile.orig Tue May 14 23:06:55 2002
35+++ linux-2.4.18/net/sched/Makefile Tue May 14 23:07:08 2002
36@@ -17,6 +17,7 @@
37 obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o
38 obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o
39 obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
40+obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o
41 obj-$(CONFIG_NET_SCH_RED) += sch_red.o
42 obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
43 obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
44--- linux-2.4.18/net/sched/Config.in.orig Tue May 14 23:07:15 2002
45+++ linux-2.4.18/net/sched/Config.in Tue May 14 23:09:03 2002
46@@ -11,6 +11,7 @@
47 tristate ' The simplest PRIO pseudoscheduler' CONFIG_NET_SCH_PRIO
48 tristate ' RED queue' CONFIG_NET_SCH_RED
49 tristate ' SFQ queue' CONFIG_NET_SCH_SFQ
50+tristate ' ESFQ queue' CONFIG_NET_SCH_ESFQ
51 tristate ' TEQL queue' CONFIG_NET_SCH_TEQL
52 tristate ' TBF queue' CONFIG_NET_SCH_TBF
53 tristate ' GRED queue' CONFIG_NET_SCH_GRED
54--- linux-2.4.18/net/sched/sch_api.c.orig Wed Jun 5 23:51:28 2002
55+++ linux-2.4.18/net/sched/sch_api.c Wed Jun 5 23:50:55 2002
56@@ -1229,6 +1229,9 @@
57 #ifdef CONFIG_NET_SCH_SFQ
58 INIT_QDISC(sfq);
59 #endif
60+#ifdef CONFIG_NET_SCH_ESFQ
61+ INIT_QDISC(esfq);
62+#endif
63 #ifdef CONFIG_NET_SCH_TBF
64 INIT_QDISC(tbf);
65 #endif
66--- linux-2.4.18/Documentation/Configure.help.orig Thu May 16 01:37:22 2002
67+++ linux-2.4.18/Documentation/Configure.help Mon May 27 01:09:03 2002
68@@ -9433,6 +9433,24 @@
69 whenever you want). If you want to compile it as a module, say M
70 here and read <file:Documentation/modules.txt>.
71
72+ESFQ queue
73+CONFIG_NET_SCH_ESFQ
74+ Say Y here if you want to use the Stochastic Fairness Queueing (SFQ)
75+ packet scheduling algorithm for some of your network devices or as a
76+ leaf discipline for the CBQ scheduling algorithm (see the top of
77+ <file:net/sched/sch_esfq.c> for details and references about the SFQ
78+ algorithm).
79+
80+ This is an enchanced SFQ version which allows you to control the
81+ hardcoded values in the SFQ scheduler: queue depth, hash table size,
82+ queues limit. Also adds control to the hash function used to identify
83+ packet flows. Hash by src or dst ip and original sfq hash.
84+
85+ This code is also available as a module called sch_esfq.o ( = code
86+ which can be inserted in and removed from the running kernel
87+ whenever you want). If you want to compile it as a module, say M
88+ here and read <file:Documentation/modules.txt>.
89+
90 TEQL queue
91 CONFIG_NET_SCH_TEQL
92 Say Y here if you want to use the True Link Equalizer (TLE) packet
93--- /dev/null Mon Jul 18 02:46:18 1994
94+++ linux-2.4.18/net/sched/sch_esfq.c Thu Jun 6 05:16:02 2002
95@@ -0,0 +1,588 @@
96+/*
97+ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline.
98+ *
99+ * This program is free software; you can redistribute it and/or
100+ * modify it under the terms of the GNU General Public License
101+ * as published by the Free Software Foundation; either version
102+ * 2 of the License, or (at your option) any later version.
103+ *
104+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
105+ *
106+ * Changes: Alexander Atanasov, <alex@ssi.bg>
107+ * Added dynamic depth,limit,divisor,hash_kind options.
108+ * Added dst and src hashes.
109+ */
110+
111+#include <linux/config.h>
112+#include <linux/module.h>
113+#include <asm/uaccess.h>
114+#include <asm/system.h>
115+#include <asm/bitops.h>
116+#include <linux/types.h>
117+#include <linux/kernel.h>
118+#include <linux/sched.h>
119+#include <linux/string.h>
120+#include <linux/mm.h>
121+#include <linux/socket.h>
122+#include <linux/sockios.h>
123+#include <linux/in.h>
124+#include <linux/errno.h>
125+#include <linux/interrupt.h>
126+#include <linux/if_ether.h>
127+#include <linux/inet.h>
128+#include <linux/netdevice.h>
129+#include <linux/etherdevice.h>
130+#include <linux/notifier.h>
131+#include <linux/init.h>
132+#include <net/ip.h>
133+#include <linux/ipv6.h>
134+#include <net/route.h>
135+#include <linux/skbuff.h>
136+#include <net/sock.h>
137+#include <net/pkt_sched.h>
138+
139+
140+/* Stochastic Fairness Queuing algorithm.
141+ For more comments look at sch_sfq.c.
142+ The difference is that you can change limit, depth,
143+ hash table size and choose 3 hash types.
144+
145+ classic: same as in sch_sfq.c
146+ dst: destination IP address
147+ src: source IP address
148+
149+ TODO:
150+ make sfq_change work.
151+*/
152+
153+
154+/* This type should contain at least SFQ_DEPTH*2 values */
155+typedef unsigned int esfq_index;
156+
157+struct esfq_head
158+{
159+ esfq_index next;
160+ esfq_index prev;
161+};
162+
163+struct esfq_sched_data
164+{
165+/* Parameters */
166+ int perturb_period;
167+ unsigned quantum; /* Allotment per round: MUST BE >= MTU */
168+ int limit;
169+ unsigned depth;
170+ unsigned hash_divisor;
171+ unsigned hash_kind;
172+/* Variables */
173+ struct timer_list perturb_timer;
174+ int perturbation;
175+ esfq_index tail; /* Index of current slot in round */
176+ esfq_index max_depth; /* Maximal depth */
177+
178+ esfq_index *ht; /* Hash table */
179+ esfq_index *next; /* Active slots link */
180+ short *allot; /* Current allotment per slot */
181+ unsigned short *hash; /* Hash value indexed by slots */
182+ struct sk_buff_head *qs; /* Slot queue */
183+ struct esfq_head *dep; /* Linked list of slots, indexed by depth */
184+};
185+
186+static __inline__ unsigned esfq_hash_u32(struct esfq_sched_data *q,u32 h)
187+{
188+ int pert = q->perturbation;
189+
190+ if (pert)
191+ h = (h<<pert) ^ (h>>(0x1F - pert));
192+
193+ h = ntohl(h) * 2654435761UL;
194+ return h & (q->hash_divisor-1);
195+}
196+
197+static __inline__ unsigned esfq_fold_hash_classic(struct esfq_sched_data *q, u32 h, u32 h1)
198+{
199+ int pert = q->perturbation;
200+
201+ /* Have we any rotation primitives? If not, WHY? */
202+ h ^= (h1<<pert) ^ (h1>>(0x1F - pert));
203+ h ^= h>>10;
204+ return h & (q->hash_divisor-1);
205+}
206+
207+#ifndef IPPROTO_ESP
208+#define IPPROTO_ESP 50
209+#endif
210+
211+static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
212+{
213+ u32 h, h2;
214+ u32 hs;
215+
216+ switch (skb->protocol) {
217+ case __constant_htons(ETH_P_IP):
218+ {
219+ struct iphdr *iph = skb->nh.iph;
220+ h = iph->daddr;
221+ hs = iph->saddr;
222+ h2 = hs^iph->protocol;
223+ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
224+ (iph->protocol == IPPROTO_TCP ||
225+ iph->protocol == IPPROTO_UDP ||
226+ iph->protocol == IPPROTO_ESP))
227+ h2 ^= *(((u32*)iph) + iph->ihl);
228+ break;
229+ }
230+ case __constant_htons(ETH_P_IPV6):
231+ {
232+ struct ipv6hdr *iph = skb->nh.ipv6h;
233+ h = iph->daddr.s6_addr32[3];
234+ hs = iph->saddr.s6_addr32[3];
235+ h2 = hs^iph->nexthdr;
236+ if (iph->nexthdr == IPPROTO_TCP ||
237+ iph->nexthdr == IPPROTO_UDP ||
238+ iph->nexthdr == IPPROTO_ESP)
239+ h2 ^= *(u32*)&iph[1];
240+ break;
241+ }
242+ default:
243+ h = (u32)(unsigned long)skb->dst;
244+ hs = (u32)(unsigned long)skb->sk;
245+ h2 = hs^skb->protocol;
246+ }
247+ switch(q->hash_kind)
248+ {
249+ case TCA_SFQ_HASH_CLASSIC:
250+ return esfq_fold_hash_classic(q, h, h2);
251+ case TCA_SFQ_HASH_DST:
252+ return esfq_hash_u32(q,h);
253+ case TCA_SFQ_HASH_SRC:
254+ return esfq_hash_u32(q,hs);
255+ default:
256+ if (net_ratelimit())
257+ printk(KERN_DEBUG "esfq unknown hash method, fallback to classic\n");
258+ }
259+ return esfq_fold_hash_classic(q, h, h2);
260+}
261+
262+extern __inline__ void esfq_link(struct esfq_sched_data *q, esfq_index x)
263+{
264+ esfq_index p, n;
265+ int d = q->qs[x].qlen + q->depth;
266+
267+ p = d;
268+ n = q->dep[d].next;
269+ q->dep[x].next = n;
270+ q->dep[x].prev = p;
271+ q->dep[p].next = q->dep[n].prev = x;
272+}
273+
274+extern __inline__ void esfq_dec(struct esfq_sched_data *q, esfq_index x)
275+{
276+ esfq_index p, n;
277+
278+ n = q->dep[x].next;
279+ p = q->dep[x].prev;
280+ q->dep[p].next = n;
281+ q->dep[n].prev = p;
282+
283+ if (n == p && q->max_depth == q->qs[x].qlen + 1)
284+ q->max_depth--;
285+
286+ esfq_link(q, x);
287+}
288+
289+extern __inline__ void esfq_inc(struct esfq_sched_data *q, esfq_index x)
290+{
291+ esfq_index p, n;
292+ int d;
293+
294+ n = q->dep[x].next;
295+ p = q->dep[x].prev;
296+ q->dep[p].next = n;
297+ q->dep[n].prev = p;
298+ d = q->qs[x].qlen;
299+ if (q->max_depth < d)
300+ q->max_depth = d;
301+
302+ esfq_link(q, x);
303+}
304+
305+static int esfq_drop(struct Qdisc *sch)
306+{
307+ struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
308+ esfq_index d = q->max_depth;
309+ struct sk_buff *skb;
310+
311+ /* Queue is full! Find the longest slot and
312+ drop a packet from it */
313+
314+ if (d > 1) {
315+ esfq_index x = q->dep[d+q->depth].next;
316+ skb = q->qs[x].prev;
317+ __skb_unlink(skb, &q->qs[x]);
318+ kfree_skb(skb);
319+ esfq_dec(q, x);
320+ sch->q.qlen--;
321+ sch->stats.drops++;
322+ return 1;
323+ }
324+
325+ if (d == 1) {
326+ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
327+ d = q->next[q->tail];
328+ q->next[q->tail] = q->next[d];
329+ q->allot[q->next[d]] += q->quantum;
330+ skb = q->qs[d].prev;
331+ __skb_unlink(skb, &q->qs[d]);
332+ kfree_skb(skb);
333+ esfq_dec(q, d);
334+ sch->q.qlen--;
335+ q->ht[q->hash[d]] = q->depth;
336+ sch->stats.drops++;
337+ return 1;
338+ }
339+
340+ return 0;
341+}
342+
343+static int
344+esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
345+{
346+ struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
347+ unsigned hash = esfq_hash(q, skb);
348+ unsigned depth = q->depth;
349+ esfq_index x;
350+
351+ x = q->ht[hash];
352+ if (x == depth) {
353+ q->ht[hash] = x = q->dep[depth].next;
354+ q->hash[x] = hash;
355+ }
356+ __skb_queue_tail(&q->qs[x], skb);
357+ esfq_inc(q, x);
358+ if (q->qs[x].qlen == 1) { /* The flow is new */
359+ if (q->tail == depth) { /* It is the first flow */
360+ q->tail = x;
361+ q->next[x] = x;
362+ q->allot[x] = q->quantum;
363+ } else {
364+ q->next[x] = q->next[q->tail];
365+ q->next[q->tail] = x;
366+ q->tail = x;
367+ }
368+ }
369+ if (++sch->q.qlen < q->limit-1) {
370+ sch->stats.bytes += skb->len;
371+ sch->stats.packets++;
372+ return 0;
373+ }
374+
375+ esfq_drop(sch);
376+ return NET_XMIT_CN;
377+}
378+
379+static int
380+esfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
381+{
382+ struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
383+ unsigned hash = esfq_hash(q, skb);
384+ unsigned depth = q->depth;
385+ esfq_index x;
386+
387+ x = q->ht[hash];
388+ if (x == depth) {
389+ q->ht[hash] = x = q->dep[depth].next;
390+ q->hash[x] = hash;
391+ }
392+ __skb_queue_head(&q->qs[x], skb);
393+ esfq_inc(q, x);
394+ if (q->qs[x].qlen == 1) { /* The flow is new */
395+ if (q->tail == depth) { /* It is the first flow */
396+ q->tail = x;
397+ q->next[x] = x;
398+ q->allot[x] = q->quantum;
399+ } else {
400+ q->next[x] = q->next[q->tail];
401+ q->next[q->tail] = x;
402+ q->tail = x;
403+ }
404+ }
405+ if (++sch->q.qlen < q->limit - 1)
406+ return 0;
407+
408+ sch->stats.drops++;
409+ esfq_drop(sch);
410+ return NET_XMIT_CN;
411+}
412+
413+
414+
415+
416+static struct sk_buff *
417+esfq_dequeue(struct Qdisc* sch)
418+{
419+ struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
420+ struct sk_buff *skb;
421+ unsigned depth = q->depth;
422+ esfq_index a, old_a;
423+
424+ /* No active slots */
425+ if (q->tail == depth)
426+ return NULL;
427+
428+ a = old_a = q->next[q->tail];
429+
430+ /* Grab packet */
431+ skb = __skb_dequeue(&q->qs[a]);
432+ esfq_dec(q, a);
433+ sch->q.qlen--;
434+
435+ /* Is the slot empty? */
436+ if (q->qs[a].qlen == 0) {
437+ a = q->next[a];
438+ if (a == old_a) {
439+ q->tail = depth;
440+ return skb;
441+ }
442+ q->next[q->tail] = a;
443+ q->allot[a] += q->quantum;
444+ } else if ((q->allot[a] -= skb->len) <= 0) {
445+ q->tail = a;
446+ a = q->next[a];
447+ q->allot[a] += q->quantum;
448+ }
449+
450+ return skb;
451+}
452+
453+static void
454+esfq_reset(struct Qdisc* sch)
455+{
456+ struct sk_buff *skb;
457+
458+ while ((skb = esfq_dequeue(sch)) != NULL)
459+ kfree_skb(skb);
460+}
461+
462+static void esfq_perturbation(unsigned long arg)
463+{
464+ struct Qdisc *sch = (struct Qdisc*)arg;
465+ struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
466+
467+ q->perturbation = net_random()&0x1F;
468+ q->perturb_timer.expires = jiffies + q->perturb_period;
469+
470+ if (q->perturb_period) {
471+ q->perturb_timer.expires = jiffies + q->perturb_period;
472+ add_timer(&q->perturb_timer);
473+ }
474+}
475+
476+static int esfq_change(struct Qdisc *sch, struct rtattr *opt)
477+{
478+ struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
479+ struct tc_sfq_qopt *ctl = RTA_DATA(opt);
480+ int old_perturb = q->perturb_period;
481+
482+ if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
483+ return -EINVAL;
484+
485+ sch_tree_lock(sch);
486+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
487+ q->perturb_period = ctl->perturb_period*HZ;
488+// q->hash_divisor = ctl->divisor;
489+// q->tail = q->limit = q->depth = ctl->flows;
490+
491+ if (ctl->limit)
492+ q->limit = min_t(u32, ctl->limit, q->depth);
493+
494+ if (ctl->hash_kind) {
495+ q->hash_kind = ctl->hash_kind;
496+ if (q->hash_kind != TCA_SFQ_HASH_CLASSIC)
497+ q->perturb_period = 0;
498+ }
499+
500+ // is sch_tree_lock enough to do this ?
501+ while (sch->q.qlen >= q->limit-1)
502+ esfq_drop(sch);
503+
504+ if (old_perturb)
505+ del_timer(&q->perturb_timer);
506+ if (q->perturb_period) {
507+ q->perturb_timer.expires = jiffies + q->perturb_period;
508+ add_timer(&q->perturb_timer);
509+ } else {
510+ q->perturbation = 0;
511+ }
512+ sch_tree_unlock(sch);
513+ return 0;
514+}
515+
516+static int esfq_init(struct Qdisc *sch, struct rtattr *opt)
517+{
518+ struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
519+ struct tc_sfq_qopt *ctl;
520+ esfq_index p = ~0UL/2;
521+ int i;
522+
523+ if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
524+ return -EINVAL;
525+
526+ q->perturb_timer.data = (unsigned long)sch;
527+ q->perturb_timer.function = esfq_perturbation;
528+ init_timer(&q->perturb_timer);
529+ q->perturbation = 0;
530+ q->hash_kind = TCA_SFQ_HASH_CLASSIC;
531+ q->max_depth = 0;
532+ if (opt == NULL) {
533+ q->quantum = psched_mtu(sch->dev);
534+ q->perturb_period = 0;
535+ q->hash_divisor = 1024;
536+ q->tail = q->limit = q->depth = 128;
537+
538+ } else {
539+ ctl = RTA_DATA(opt);
540+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
541+ q->perturb_period = ctl->perturb_period*HZ;
542+ q->hash_divisor = ctl->divisor ? : 1024;
543+ q->tail = q->limit = q->depth = ctl->flows ? : 128;
544+
545+ if ( q->depth > p - 1 )
546+ return -EINVAL;
547+
548+ if (ctl->limit)
549+ q->limit = min_t(u32, ctl->limit, q->depth);
550+
551+ if (ctl->hash_kind) {
552+ q->hash_kind = ctl->hash_kind;
553+ }
554+
555+ if (q->perturb_period) {
556+ q->perturb_timer.expires = jiffies + q->perturb_period;
557+ add_timer(&q->perturb_timer);
558+ }
559+ }
560+
561+ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
562+ if (!q->ht)
563+ goto err_case;
564+
565+ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
566+ if (!q->dep)
567+ goto err_case;
568+ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
569+ if (!q->next)
570+ goto err_case;
571+
572+ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
573+ if (!q->allot)
574+ goto err_case;
575+ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
576+ if (!q->hash)
577+ goto err_case;
578+ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
579+ if (!q->qs)
580+ goto err_case;
581+
582+ for (i=0; i< q->hash_divisor; i++)
583+ q->ht[i] = q->depth;
584+ for (i=0; i<q->depth; i++) {
585+ skb_queue_head_init(&q->qs[i]);
586+ q->dep[i+q->depth].next = i+q->depth;
587+ q->dep[i+q->depth].prev = i+q->depth;
588+ }
589+
590+ for (i=0; i<q->depth; i++)
591+ esfq_link(q, i);
592+ MOD_INC_USE_COUNT;
593+ return 0;
594+err_case:
595+ if (q->ht)
596+ kfree(q->ht);
597+ if (q->dep)
598+ kfree(q->dep);
599+ if (q->next)
600+ kfree(q->next);
601+ if (q->allot)
602+ kfree(q->allot);
603+ if (q->hash)
604+ kfree(q->hash);
605+ if (q->qs)
606+ kfree(q->qs);
607+ return -ENOBUFS;
608+}
609+
610+static void esfq_destroy(struct Qdisc *sch)
611+{
612+ struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
613+ del_timer(&q->perturb_timer);
614+ if(q->ht)
615+ kfree(q->ht);
616+ if(q->dep)
617+ kfree(q->dep);
618+ if(q->next)
619+ kfree(q->next);
620+ if(q->allot)
621+ kfree(q->allot);
622+ if(q->hash)
623+ kfree(q->hash);
624+ if(q->qs)
625+ kfree(q->qs);
626+ MOD_DEC_USE_COUNT;
627+}
628+
629+static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
630+{
631+ struct esfq_sched_data *q = (struct esfq_sched_data *)sch->data;
632+ unsigned char *b = skb->tail;
633+ struct tc_sfq_qopt opt;
634+
635+ opt.quantum = q->quantum;
636+ opt.perturb_period = q->perturb_period/HZ;
637+
638+ opt.limit = q->limit;
639+ opt.divisor = q->hash_divisor;
640+ opt.flows = q->depth;
641+ opt.hash_kind = q->hash_kind;
642+
643+ RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
644+
645+ return skb->len;
646+
647+rtattr_failure:
648+ skb_trim(skb, b - skb->data);
649+ return -1;
650+}
651+
652+struct Qdisc_ops esfq_qdisc_ops =
653+{
654+ NULL,
655+ NULL,
656+ "esfq",
657+ sizeof(struct esfq_sched_data),
658+
659+ esfq_enqueue,
660+ esfq_dequeue,
661+ esfq_requeue,
662+ esfq_drop,
663+
664+ esfq_init,
665+ esfq_reset,
666+ esfq_destroy,
667+ NULL, /* esfq_change - needs more work */
668+
669+ esfq_dump,
670+};
671+
672+#ifdef MODULE
673+int init_module(void)
674+{
675+ return register_qdisc(&esfq_qdisc_ops);
676+}
677+
678+void cleanup_module(void)
679+{
680+ unregister_qdisc(&esfq_qdisc_ops);
681+}
682+#endif
683+MODULE_LICENSE("GPL");
This page took 0.187775 seconds and 4 git commands to generate.