]> git.pld-linux.org Git - packages/kernel.git/blame - ds9-2.2.21-2.diff
- obsolete
[packages/kernel.git] / ds9-2.2.21-2.diff
CommitLineData
b0727ac3
KT
1diff -urN ../v2.2.21/linux/include/linux/netdevice.h linux/include/linux/netdevice.h
2--- ../v2.2.21/linux/include/linux/netdevice.h Sun Nov 4 10:16:15 2001
3+++ linux/include/linux/netdevice.h Wed Jul 31 00:00:59 2002
4@@ -268,6 +268,7 @@
5 struct Qdisc *qdisc;
6 struct Qdisc *qdisc_sleeping;
7 struct Qdisc *qdisc_list;
8+ struct Qdisc *qdisc_ingress;
9 unsigned long tx_queue_len; /* Max frames per queue allowed */
10
11 /* Bridge stuff */
12diff -urN ../v2.2.21/linux/include/linux/pkt_cls.h linux/include/linux/pkt_cls.h
13--- ../v2.2.21/linux/include/linux/pkt_cls.h Sat Oct 21 12:10:47 2000
14+++ linux/include/linux/pkt_cls.h Wed Jul 31 00:00:59 2002
15@@ -143,4 +143,20 @@
16
17 #define TCA_FW_MAX TCA_FW_POLICE
18
19+/* TC index filter */
20+
21+enum
22+{
23+ TCA_TCINDEX_UNSPEC,
24+ TCA_TCINDEX_HASH,
25+ TCA_TCINDEX_MASK,
26+ TCA_TCINDEX_SHIFT,
27+ TCA_TCINDEX_FALL_THROUGH,
28+ TCA_TCINDEX_CLASSID,
29+ TCA_TCINDEX_POLICE,
30+};
31+
32+#define TCA_TCINDEX_MAX TCA_TCINDEX_POLICE
33+
34+
35 #endif
36diff -urN ../v2.2.21/linux/include/linux/pkt_sched.h linux/include/linux/pkt_sched.h
37--- ../v2.2.21/linux/include/linux/pkt_sched.h Tue Apr 28 18:10:10 1998
38+++ linux/include/linux/pkt_sched.h Sun Aug 4 14:54:40 2002
39@@ -71,6 +71,7 @@
40
41 #define TC_H_UNSPEC (0U)
42 #define TC_H_ROOT (0xFFFFFFFFU)
43+#define TC_H_INGRESS (0xFFFFFFF1U)
44
45 struct tc_ratespec
46 {
c8436fd6 47@@ -188,6 +189,61 @@
b0727ac3
KT
48 unsigned char Wlog; /* log(W) */
49 unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
50 unsigned char Scell_log; /* cell size for idle damping */
51+ unsigned char flags;
52+#define TC_RED_ECN 1
53+};
54+
55+struct tc_red_xstats
56+{
57+ __u32 early; /* Early drops */
58+ __u32 pdrop; /* Drops due to queue limits */
59+ __u32 other; /* Drops due to drop() calls */
60+ __u32 marked; /* Marked packets */
61+};
62+
63+/* GRED section */
64+
65+#define MAX_DPs 16
66+
67+enum
68+{
c8436fd6
KT
69+ TCA_GRED_UNSPEC,
70+ TCA_GRED_PARMS,
71+ TCA_GRED_STAB,
72+ TCA_GRED_DPS,
b0727ac3
KT
73+};
74+
75+#define TCA_SET_OFF TCA_GRED_PARMS
76+struct tc_gred_qopt
77+{
c8436fd6 78+ __u32 limit; /* HARD maximal queue length (bytes)
b0727ac3 79+*/
c8436fd6 80+ __u32 qth_min; /* Min average length threshold (bytes)
b0727ac3 81+*/
c8436fd6 82+ __u32 qth_max; /* Max average length threshold (bytes)
b0727ac3 83+*/
c8436fd6
KT
84+ __u32 DP; /* upto 2^32 DPs */
85+ __u32 backlog;
86+ __u32 qave;
87+ __u32 forced;
88+ __u32 early;
89+ __u32 other;
90+ __u32 pdrop;
91+
92+ unsigned char Wlog; /* log(W) */
93+ unsigned char Plog; /* log(P_max/(qth_max-qth_min)) */
94+ unsigned char Scell_log; /* cell size for idle damping */
95+ __u8 prio; /* prio of this VQ */
96+ __u32 packets;
97+ __u32 bytesin;
b0727ac3
KT
98+};
99+/* gred setup */
100+struct tc_gred_sopt
101+{
c8436fd6
KT
102+ __u32 DPs;
103+ __u32 def_DP;
104+ __u8 grio;
b0727ac3 105+
b0727ac3
KT
106 };
107
c8436fd6
KT
108 /* HTB section */
109@@ -323,6 +379,20 @@
110
b0727ac3 111 #define TCA_CBQ_MAX TCA_CBQ_POLICE
c8436fd6 112
b0727ac3
KT
113+
114+/* dsmark section */
115+
116+enum {
117+ TCA_DSMARK_UNSPEC,
118+ TCA_DSMARK_INDICES,
119+ TCA_DSMARK_DEFAULT_INDEX,
120+ TCA_DSMARK_SET_TC_INDEX,
121+ TCA_DSMARK_MASK,
122+ TCA_DSMARK_VALUE
123+};
124+
125+#define TCA_DSMARK_MAX TCA_DSMARK_VALUE
c8436fd6
KT
126+
127 /* ATM section */
b0727ac3 128
c8436fd6 129 enum {
b0727ac3
KT
130diff -urN ../v2.2.21/linux/include/linux/skbuff.h linux/include/linux/skbuff.h
131--- ../v2.2.21/linux/include/linux/skbuff.h Sat Oct 21 12:11:03 2000
132+++ linux/include/linux/skbuff.h Wed Jul 31 00:00:59 2002
c8436fd6 133@@ -115,6 +115,11 @@
b0727ac3
KT
134 __u32 ifield;
135 } private;
136 #endif
137+
138+#ifdef CONFIG_NET_SCHED
c8436fd6 139+ __u32 tc_index; /* traffic control index */
b0727ac3
KT
140+#endif
141+
c8436fd6 142 #if defined(CONFIG_ATM) && !defined(CONFIG_ATM_SKB)
b0727ac3 143
c8436fd6 144 /* ----- For updated drivers ----------------------------------------------- */
b0727ac3
KT
145diff -urN ../v2.2.21/linux/include/net/dsfield.h linux/include/net/dsfield.h
146--- ../v2.2.21/linux/include/net/dsfield.h Thu Jan 1 00:00:00 1970
147+++ linux/include/net/dsfield.h Wed Jul 31 00:00:59 2002
148@@ -0,0 +1,79 @@
149+/* include/net/dsfield.h - Manipulation of the Differentiated Services field */
150+
151+/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
152+
153+
154+#ifndef __NET_DSFIELD_H
155+#define __NET_DSFIELD_H
156+
157+#include <linux/types.h>
158+#include <linux/ip.h>
159+#include <linux/ipv6.h>
160+#include <asm/byteorder.h>
161+
162+
163+extern __inline__ __u8 ipv4_get_dsfield(struct iphdr *iph)
164+{
165+ return iph->tos;
166+}
167+
168+
169+extern __inline__ __u8 ipv6_get_dsfield(struct ipv6hdr *ipv6h)
170+{
171+ return ntohs(*(__u16 *) ipv6h) >> 4;
172+}
173+
174+
175+extern __inline__ void ipv4_change_dsfield(struct iphdr *iph,__u8 mask,
176+ __u8 value)
177+{
178+ __u32 check = ntohs(iph->check);
179+ __u8 dsfield;
180+
181+ dsfield = (iph->tos & mask) | value;
182+ check += iph->tos;
183+ if ((check+1) >> 16) check = (check+1) & 0xffff;
184+ check -= dsfield;
185+ check += check >> 16; /* adjust carry */
186+ iph->check = htons(check);
187+ iph->tos = dsfield;
188+}
189+
190+
191+extern __inline__ void ipv6_change_dsfield(struct ipv6hdr *ipv6h,__u8 mask,
192+ __u8 value)
193+{
194+ __u16 tmp;
195+
196+ tmp = ntohs(*(__u16 *) ipv6h);
197+ tmp = (tmp & ((mask << 4) | 0xf00f)) | (value << 4);
198+ *(__u16 *) ipv6h = htons(tmp);
199+}
200+
201+
202+#if 0 /* put this later into asm-i386 or such ... */
203+
204+extern __inline__ void ip_change_dsfield(struct iphdr *iph,__u16 dsfield)
205+{
206+ __u16 check;
207+
208+ __asm__ __volatile__("
209+ movw 10(%1),%0
210+ xchg %b0,%h0
211+ addb 1(%1),%b0
212+ adcb $0,%h0
213+ adcw $1,%0
214+ cmc
215+ sbbw %2,%0
216+ sbbw $0,%0
217+ movb %b2,1(%1)
218+ xchg %b0,%h0
219+ movw %0,10(%1)"
220+ : "=&r" (check)
221+ : "r" (iph), "r" (dsfield)
222+ : "cc");
223+}
224+
225+#endif
226+
227+#endif
228diff -urN ../v2.2.21/linux/include/net/pkt_cls.h linux/include/net/pkt_cls.h
229--- ../v2.2.21/linux/include/net/pkt_cls.h Sat Oct 21 12:11:28 2000
230+++ linux/include/net/pkt_cls.h Thu Aug 1 00:12:52 2002
231@@ -77,16 +77,6 @@
232 return -1;
233 }
234
235-extern __inline__ unsigned long cls_set_class(unsigned long *clp, unsigned long cl)
236-{
237- unsigned long old_cl;
238-
239- old_cl = *clp;
240- *clp = cl;
241- synchronize_bh();
242- return old_cl;
243-}
244-
245 extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
246 extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
247
248diff -urN ../v2.2.21/linux/include/net/pkt_sched.h linux/include/net/pkt_sched.h
249--- ../v2.2.21/linux/include/net/pkt_sched.h Sat Oct 21 12:10:57 2000
250+++ linux/include/net/pkt_sched.h Wed Aug 7 00:23:36 2002
251@@ -8,6 +8,7 @@
252 #define PSCHED_CLOCK_SOURCE PSCHED_JIFFIES
253
254 #include <linux/pkt_sched.h>
255+#include <linux/ip.h>
256 #include <net/pkt_cls.h>
257
258 struct rtattr;
259@@ -78,6 +79,7 @@
260 unsigned flags;
261 #define TCQ_F_BUILTIN 1
262 #define TCQ_F_THROTTLED 2
263+#define TCQ_F_INGRES 4
264 struct Qdisc_ops *ops;
265 struct Qdisc *next;
266 u32 handle;
267@@ -106,6 +108,111 @@
268 int refcnt;
269 };
270
271+#ifndef MODULE_LICENSE
272+#define MODULE_LICENSE(X)
273+#endif
274+
275+#ifndef NET_XMIT_SUCCESS
276+#define NET_XMIT_SUCCESS 0
277+#define NET_XMIT_DROP 1 /* skb dropped */
278+#define NET_XMIT_CN 2 /* congestion notification */
279+#define NET_XMIT_POLICED 3 /* skb is shot by police */
280+#define NET_XMIT_BYPASS 4 /* packet does not leave via dequeue;
281+ (TC use only - dev_queue_xmit
282+ returns this as NET_XMIT_SUCCESS) */
283+#endif
284+
285+#define likely(e) (e)
286+#define unlikely(e) (e)
287+
288+#ifndef min_t
289+#define min_t(type,x,y) \
290+ ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
291+#define max_t(type,x,y) \
292+ ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; })
293+#endif
294+
295+static inline void list_del_init(struct list_head *entry)
296+{
297+ __list_del(entry->prev, entry->next);
298+ INIT_LIST_HEAD(entry);
299+}
300+
301+static inline void __skb_queue_purge(struct sk_buff_head *list)
302+{
303+ struct sk_buff *skb;
304+ while ((skb=__skb_dequeue(list))!=NULL)
305+ kfree_skb(skb);
306+}
307+#define del_timer_sync(t) del_timer(t)
308+
309+#define netif_schedule qdisc_wakeup
310+#define netif_queue_stopped(D) (D->tbusy)
311+#ifndef BUG_TRAP
312+#define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x ") failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
313+#endif
314+
315+static inline void IP_ECN_set_ce(struct iphdr *iph)
316+{
317+ u32 check = iph->check;
318+ check += __constant_htons(0xFFFE);
319+ iph->check = check + (check>=0xFFFF);
320+ iph->tos |= 1;
321+}
322+
323+static inline void sch_tree_lock(struct Qdisc *q)
324+{
325+ start_bh_atomic();
326+}
327+
328+static inline void sch_tree_unlock(struct Qdisc *q)
329+{
330+ end_bh_atomic();
331+}
332+
333+static inline void tcf_tree_lock(struct tcf_proto *tp)
334+{
335+ wmb();
336+}
337+
338+static inline void tcf_tree_unlock(struct tcf_proto *tp)
339+{
340+ synchronize_bh();
341+}
342+
343+static inline void sch_dev_queue_lock(struct device *dev)
344+{
345+ start_bh_atomic();
346+}
347+
348+static inline void sch_dev_queue_unlock(struct device *dev)
349+{
350+ end_bh_atomic();
351+}
352+
353+
354+static inline unsigned long
355+cls_set_class(struct tcf_proto *tp, unsigned long *clp, unsigned long cl)
356+{
357+ unsigned long old_cl;
358+
359+ old_cl = *clp;
360+ wmb();
361+ *clp = cl;
362+ synchronize_bh();
363+ return old_cl;
364+}
365+
366+static inline unsigned long
367+__cls_set_class(unsigned long *clp, unsigned long cl)
368+{
369+ unsigned long old_cl;
370+
371+ old_cl = *clp;
372+ *clp = cl;
373+ return old_cl;
374+}
375+
376
377 /*
378 Timer resolution MUST BE < 10% of min_schedulable_packet_size/bandwidth
379@@ -176,7 +283,7 @@
380
381 #define PSCHED_EXPORTLIST_2
382
383-#if ~0UL == 0xFFFFFFFF
384+#if BITS_PER_LONG <= 32
385
386 #define PSCHED_WATCHER unsigned long
387
388@@ -207,7 +314,7 @@
389
390 #define PSCHED_US2JIFFIE(delay) (((delay)+psched_clock_per_hz-1)/psched_clock_per_hz)
391
392-#if CPU == 586 || CPU == 686
393+#ifdef CONFIG_X86_TSC
394
395 #define PSCHED_GET_TIME(stamp) \
396 ({ u64 __cur; \
397@@ -313,8 +420,8 @@
398 #define PSCHED_TDIFF(tv1, tv2) (long)((tv1) - (tv2))
399 #define PSCHED_TDIFF_SAFE(tv1, tv2, bound, guard) \
400 ({ \
401- long __delta = (tv1) - (tv2); \
402- if ( __delta > (bound)) { __delta = (bound); guard; } \
403+ long long __delta = (tv1) - (tv2); \
404+ if ( __delta > (long long)(bound)) { __delta = (bound); guard; } \
405 __delta; \
406 })
407
408@@ -349,6 +456,7 @@
409 struct tc_stats stats;
410 };
411
412+extern int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st);
413 extern void tcf_police_destroy(struct tcf_police *p);
414 extern struct tcf_police * tcf_police_locate(struct rtattr *rta, struct rtattr *est);
415 extern int tcf_police_dump(struct sk_buff *skb, struct tcf_police *p);
416@@ -364,6 +472,8 @@
417 extern struct Qdisc_ops noop_qdisc_ops;
418 extern struct Qdisc_ops pfifo_qdisc_ops;
419 extern struct Qdisc_ops bfifo_qdisc_ops;
420+
421+extern int call_in_ingress(struct sk_buff *skb);
422
423 int register_qdisc(struct Qdisc_ops *qops);
424 int unregister_qdisc(struct Qdisc_ops *qops);
425diff -urN ../v2.2.21/linux/net/core/skbuff.c linux/net/core/skbuff.c
426--- ../v2.2.21/linux/net/core/skbuff.c Sat Oct 21 12:10:41 2000
427+++ linux/net/core/skbuff.c Sun Aug 4 13:18:42 2002
428@@ -195,6 +195,9 @@
429 #ifdef CONFIG_IP_FIREWALL
430 skb->fwmark = 0;
431 #endif
432+#ifdef CONFIG_NET_SCHED
433+ skb->tc_index = 0;
434+#endif
435 memset(skb->cb, 0, sizeof(skb->cb));
436 skb->priority = 0;
437 }
438@@ -307,6 +310,9 @@
439 #ifdef CONFIG_IP_FIREWALL
440 n->fwmark = skb->fwmark;
441 #endif
442+#ifdef CONFIG_NET_SCHED
443+ n->tc_index = skb->tc_index;
444+#endif
445 return n;
446 }
447
448@@ -355,6 +361,9 @@
449 n->security=skb->security;
450 #ifdef CONFIG_IP_FIREWALL
451 n->fwmark = skb->fwmark;
452+#endif
453+#ifdef CONFIG_NET_SCHED
454+ n->tc_index = skb->tc_index;
455 #endif
456
457 return n;
458diff -urN ../v2.2.21/linux/net/ipv4/ip_input.c linux/net/ipv4/ip_input.c
459--- ../v2.2.21/linux/net/ipv4/ip_input.c Sun Nov 4 10:16:16 2001
460+++ linux/net/ipv4/ip_input.c Tue Aug 6 11:02:54 2002
461@@ -98,6 +98,7 @@
462 * Jos Vos : Do accounting *before* call_in_firewall
463 * Willy Konynenberg : Transparent proxying support
464 * Stephan Uphoff : Check IP header length field
465+ * Jamal Hadi Salim : Ingress policer support
466 *
467 *
468 *
469@@ -149,6 +150,11 @@
470 #include <linux/mroute.h>
471 #include <linux/netlink.h>
472
473+#ifdef CONFIG_FIREWALL
474+#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
475+#include <net/pkt_sched.h>
476+#endif
477+#endif
478 /*
479 * SNMP management statistics
480 */
481@@ -469,6 +475,11 @@
482 fwres = call_in_firewall(PF_INET, dev, iph, &rport, &skb);
483 if (fwres < FW_ACCEPT && fwres != FW_REJECT)
484 goto drop;
485+#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
486+ if (FW_ACCEPT != call_in_ingress(skb))
487+ goto drop;
488+#endif
489+
490 iph = skb->nh.iph;
491 #endif /* CONFIG_FIREWALL */
492
493diff -urN ../v2.2.21/linux/net/netsyms.c linux/net/netsyms.c
494--- ../v2.2.21/linux/net/netsyms.c Sat Aug 4 12:52:33 2001
495+++ linux/net/netsyms.c Tue Aug 6 10:40:17 2002
496@@ -548,6 +548,7 @@
497 EXPORT_SYMBOL(unregister_qdisc);
498 EXPORT_SYMBOL(qdisc_get_rtab);
499 EXPORT_SYMBOL(qdisc_put_rtab);
500+EXPORT_SYMBOL(qdisc_copy_stats);
501 #ifdef CONFIG_NET_ESTIMATOR
502 EXPORT_SYMBOL(qdisc_new_estimator);
503 EXPORT_SYMBOL(qdisc_kill_estimator);
504@@ -560,6 +561,9 @@
505 EXPORT_SYMBOL(tcf_police_dump);
506 #endif
507 #endif
508+#endif
509+#ifdef CONFIG_NET_SCH_INGRESS
510+EXPORT_SYMBOL(call_in_ingress);
511 #endif
512 #ifdef CONFIG_NET_CLS
513 EXPORT_SYMBOL(register_tcf_proto_ops);
514diff -urN ../v2.2.21/linux/net/sched/Config.in linux/net/sched/Config.in
515--- ../v2.2.21/linux/net/sched/Config.in Sat Oct 21 12:10:47 2000
516+++ linux/net/sched/Config.in Wed Jul 31 00:00:59 2002
517@@ -12,12 +12,17 @@
518 tristate 'SFQ queue' CONFIG_NET_SCH_SFQ
519 tristate 'TEQL queue' CONFIG_NET_SCH_TEQL
520 tristate 'TBF queue' CONFIG_NET_SCH_TBF
521+tristate 'GRED queue' CONFIG_NET_SCH_GRED
522+tristate 'Diffserv field marker' CONFIG_NET_SCH_DSMARK
523+tristate 'Ingress Qdisc/policing' CONFIG_NET_SCH_INGRESS
524+
525 bool 'QoS support' CONFIG_NET_QOS
526 if [ "$CONFIG_NET_QOS" = "y" ]; then
527 bool 'Rate estimator' CONFIG_NET_ESTIMATOR
528 fi
529 bool 'Packet classifier API' CONFIG_NET_CLS
530 if [ "$CONFIG_NET_CLS" = "y" ]; then
531+ tristate 'TC index classifier' CONFIG_NET_CLS_TCINDEX
532 tristate 'Routing table based classifier' CONFIG_NET_CLS_ROUTE4
533 if [ "$CONFIG_NET_CLS_ROUTE4" != "n" ]; then
534 define_bool CONFIG_NET_CLS_ROUTE y
535@@ -27,7 +32,7 @@
536 if [ "$CONFIG_NET_QOS" = "y" ]; then
537 tristate 'Special RSVP classifier' CONFIG_NET_CLS_RSVP
538 tristate 'Special RSVP classifier for IPv6' CONFIG_NET_CLS_RSVP6
539- bool 'Ingres traffic policing' CONFIG_NET_CLS_POLICE
540+ bool 'Traffic policing (needed for in/egress)' CONFIG_NET_CLS_POLICE
541 fi
542 fi
543
544diff -urN ../v2.2.21/linux/net/sched/Makefile linux/net/sched/Makefile
545--- ../v2.2.21/linux/net/sched/Makefile Sat Oct 21 12:10:47 2000
546+++ linux/net/sched/Makefile Wed Jul 31 00:00:59 2002
547@@ -28,6 +28,14 @@
548
549 endif
550
551+ifeq ($(CONFIG_NET_SCH_INGRESS), y)
552+O_OBJS += sch_ingress.o
553+else
554+ ifeq ($(CONFIG_NET_SCH_INGRESS), m)
555+ M_OBJS += sch_ingress.o
556+ endif
557+endif
558+
559 ifeq ($(CONFIG_NET_SCH_CBQ), y)
560 O_OBJS += sch_cbq.o
561 else
562@@ -98,6 +106,30 @@
563 else
564 ifeq ($(CONFIG_NET_SCH_TEQL), m)
565 M_OBJS += sch_teql.o
566+ endif
567+endif
568+
569+ifeq ($(CONFIG_NET_SCH_GRED), y)
570+O_OBJS += sch_gred.o
571+else
572+ ifeq ($(CONFIG_NET_SCH_GRED), m)
573+ M_OBJS += sch_gred.o
574+ endif
575+endif
576+
577+ifeq ($(CONFIG_NET_SCH_DSMARK), y)
578+O_OBJS += sch_dsmark.o
579+else
580+ ifeq ($(CONFIG_NET_SCH_DSMARK), m)
581+ M_OBJS += sch_dsmark.o
582+ endif
583+endif
584+
585+ifeq ($(CONFIG_NET_CLS_TCINDEX), y)
586+O_OBJS += cls_tcindex.o
587+else
588+ ifeq ($(CONFIG_NET_CLS_TCINDEX), m)
589+ M_OBJS += cls_tcindex.o
590 endif
591 endif
592
593diff -urN ../v2.2.21/linux/net/sched/cls_api.c linux/net/sched/cls_api.c
594--- ../v2.2.21/linux/net/sched/cls_api.c Sat Oct 21 12:10:50 2000
595+++ linux/net/sched/cls_api.c Sun Aug 4 16:57:48 2002
596@@ -217,8 +217,10 @@
597 kfree(tp);
598 goto errout;
599 }
600+ sch_dev_queue_lock(dev);
601 tp->next = *back;
602 *back = tp;
603+ sch_dev_queue_unlock(dev);
604 } else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind))
605 goto errout;
606
607@@ -438,6 +440,9 @@
608 #endif
609 #ifdef CONFIG_NET_CLS_RSVP
610 INIT_TC_FILTER(rsvp);
611+#endif
612+#ifdef CONFIG_NET_CLS_TCINDEX
613+ INIT_TC_FILTER(tcindex);
614 #endif
615 #ifdef CONFIG_NET_CLS_RSVP6
616 INIT_TC_FILTER(rsvp6);
617diff -urN ../v2.2.21/linux/net/sched/cls_fw.c linux/net/sched/cls_fw.c
618--- ../v2.2.21/linux/net/sched/cls_fw.c Sat Oct 21 12:10:57 2000
619+++ linux/net/sched/cls_fw.c Thu Aug 1 00:34:59 2002
620@@ -136,7 +136,7 @@
621 unsigned long cl;
622 head->ht[h] = f->next;
623
624- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
625+ if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
626 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
627 #ifdef CONFIG_NET_CLS_POLICE
628 tcf_police_release(f->police);
629@@ -161,10 +161,11 @@
630 if (*fp == f) {
631 unsigned long cl;
632
633+ tcf_tree_lock(tp);
634 *fp = f->next;
635- synchronize_bh();
636+ tcf_tree_unlock(tp);
637
638- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
639+ if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
640 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
641 #ifdef CONFIG_NET_CLS_POLICE
642 tcf_police_release(f->police);
643@@ -203,7 +204,7 @@
644
645 f->res.classid = *(u32*)RTA_DATA(tb[TCA_FW_CLASSID-1]);
646 cl = tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid);
647- cl = cls_set_class(&f->res.class, cl);
648+ cl = cls_set_class(tp, &f->res.class, cl);
649 if (cl)
650 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
651 }
652@@ -211,8 +212,9 @@
653 if (tb[TCA_FW_POLICE-1]) {
654 struct tcf_police *police = tcf_police_locate(tb[TCA_FW_POLICE-1], tca[TCA_RATE-1]);
655
656+ tcf_tree_lock(tp);
657 police = xchg(&f->police, police);
658- synchronize_bh();
659+ tcf_tree_unlock(tp);
660
661 tcf_police_release(police);
662 }
663@@ -229,8 +231,9 @@
664 return -ENOBUFS;
665 memset(head, 0, sizeof(*head));
666
667+ tcf_tree_lock(tp);
668 tp->root = head;
669- synchronize_bh();
670+ tcf_tree_unlock(tp);
671 }
672
673 f = kmalloc(sizeof(struct fw_filter), GFP_KERNEL);
674@@ -245,7 +248,7 @@
675 if (RTA_PAYLOAD(tb[TCA_FW_CLASSID-1]) != 4)
676 goto errout;
677 f->res.classid = *(u32*)RTA_DATA(tb[TCA_FW_CLASSID-1]);
678- cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
679+ cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
680 }
681
682 #ifdef CONFIG_NET_CLS_POLICE
683@@ -254,8 +257,9 @@
684 #endif
685
686 f->next = head->ht[fw_hash(handle)];
687- wmb();
688+ tcf_tree_lock(tp);
689 head->ht[fw_hash(handle)] = f;
690+ tcf_tree_unlock(tp);
691
692 *arg = (unsigned long)f;
693 return 0;
694@@ -294,7 +298,6 @@
695 }
696 }
697
698-#ifdef CONFIG_RTNETLINK
699 static int fw_dump(struct tcf_proto *tp, unsigned long fh,
700 struct sk_buff *skb, struct tcmsg *t)
701 {
702@@ -335,7 +338,8 @@
703 rta->rta_len = skb->tail - b;
704 #ifdef CONFIG_NET_CLS_POLICE
705 if (f->police) {
706- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
707+ if (qdisc_copy_stats(skb, &f->police->stats))
708+ goto rtattr_failure;
709 }
710 #endif
711 return skb->len;
712@@ -344,8 +348,6 @@
713 skb_trim(skb, b - skb->data);
714 return -1;
715 }
716-#endif
717-
718
719 struct tcf_proto_ops cls_fw_ops = {
720 NULL,
721@@ -359,11 +361,7 @@
722 fw_change,
723 fw_delete,
724 fw_walk,
725-#ifdef CONFIG_RTNETLINK
726 fw_dump
727-#else
728- NULL
729-#endif
730 };
731
732 #ifdef MODULE
733diff -urN ../v2.2.21/linux/net/sched/cls_route.c linux/net/sched/cls_route.c
734--- ../v2.2.21/linux/net/sched/cls_route.c Sat Oct 21 12:10:50 2000
735+++ linux/net/sched/cls_route.c Sun Aug 4 17:08:47 2002
736@@ -83,11 +83,11 @@
737 return id&0xF;
738 }
739
740-static void route4_reset_fastmap(struct route4_head *head, u32 id)
741+static void route4_reset_fastmap(struct device *dev, struct route4_head *head, u32 id)
742 {
743- start_bh_atomic();
744+ sch_dev_queue_lock(dev);
745 memset(head->fastmap, 0, sizeof(head->fastmap));
746- end_bh_atomic();
747+ sch_dev_queue_unlock(dev);
748 }
749
750 static void __inline__
751@@ -297,7 +297,7 @@
752 unsigned long cl;
753
754 b->ht[h2] = f->next;
755- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
756+ if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
757 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
758 #ifdef CONFIG_NET_CLS_POLICE
759 tcf_police_release(f->police);
760@@ -316,25 +316,27 @@
761 {
762 struct route4_head *head = (struct route4_head*)tp->root;
763 struct route4_filter **fp, *f = (struct route4_filter*)arg;
764- unsigned h = f->handle;
765+ unsigned h = 0;
766 struct route4_bucket *b;
767 int i;
768
769 if (!head || !f)
770 return -EINVAL;
771
772+ h = f->handle;
773 b = f->bkt;
774
775 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
776 if (*fp == f) {
777 unsigned long cl;
778
779+ tcf_tree_lock(tp);
780 *fp = f->next;
781- synchronize_bh();
782+ tcf_tree_unlock(tp);
783
784- route4_reset_fastmap(head, f->id);
785+ route4_reset_fastmap(tp->q->dev, head, f->id);
786
787- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
788+ if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
789 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
790
791 #ifdef CONFIG_NET_CLS_POLICE
792@@ -349,8 +351,9 @@
793 return 0;
794
795 /* OK, session has no flows */
796+ tcf_tree_lock(tp);
797 head->table[to_hash(h)] = NULL;
798- synchronize_bh();
799+ tcf_tree_unlock(tp);
800
801 kfree(b);
802 return 0;
803@@ -387,7 +390,7 @@
804 unsigned long cl;
805
806 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
807- cl = cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
808+ cl = cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
809 if (cl)
810 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
811 }
812@@ -395,8 +398,9 @@
813 if (tb[TCA_ROUTE4_POLICE-1]) {
814 struct tcf_police *police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
815
816+ tcf_tree_lock(tp);
817 police = xchg(&f->police, police);
818- synchronize_bh();
819+ tcf_tree_unlock(tp);
820
821 tcf_police_release(police);
822 }
823@@ -412,8 +416,9 @@
824 return -ENOBUFS;
825 memset(head, 0, sizeof(struct route4_head));
826
827+ tcf_tree_lock(tp);
828 tp->root = head;
829- synchronize_bh();
830+ tcf_tree_unlock(tp);
831 }
832
833 f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
834@@ -475,8 +480,9 @@
835 goto errout;
836 memset(b, 0, sizeof(*b));
837
838+ tcf_tree_lock(tp);
839 head->table[h1] = b;
840- synchronize_bh();
841+ tcf_tree_unlock(tp);
842 }
843 f->bkt = b;
844
845@@ -489,17 +495,18 @@
846 goto errout;
847 }
848
849- cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
850+ cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
851 #ifdef CONFIG_NET_CLS_POLICE
852 if (tb[TCA_ROUTE4_POLICE-1])
853 f->police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
854 #endif
855
856 f->next = f1;
857- wmb();
858+ tcf_tree_lock(tp);
859 *ins_f = f;
860+ tcf_tree_unlock(tp);
861
862- route4_reset_fastmap(head, f->id);
863+ route4_reset_fastmap(tp->q->dev, head, f->id);
864 *arg = (unsigned long)f;
865 return 0;
866
867@@ -543,7 +550,6 @@
868 }
869 }
870
871-#ifdef CONFIG_RTNETLINK
872 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
873 struct sk_buff *skb, struct tcmsg *t)
874 {
875@@ -589,7 +595,8 @@
876 rta->rta_len = skb->tail - b;
877 #ifdef CONFIG_NET_CLS_POLICE
878 if (f->police) {
879- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
880+ if (qdisc_copy_stats(skb, &f->police->stats))
881+ goto rtattr_failure;
882 }
883 #endif
884 return skb->len;
885@@ -598,7 +605,6 @@
886 skb_trim(skb, b - skb->data);
887 return -1;
888 }
889-#endif
890
891 struct tcf_proto_ops cls_route4_ops = {
892 NULL,
893@@ -612,11 +618,7 @@
894 route4_change,
895 route4_delete,
896 route4_walk,
897-#ifdef CONFIG_RTNETLINK
898 route4_dump
899-#else
900- NULL
901-#endif
902 };
903
904 #ifdef MODULE
905@@ -630,3 +632,4 @@
906 unregister_tcf_proto_ops(&cls_route4_ops);
907 }
908 #endif
909+MODULE_LICENSE("GPL");
910diff -urN ../v2.2.21/linux/net/sched/cls_rsvp.c linux/net/sched/cls_rsvp.c
911--- ../v2.2.21/linux/net/sched/cls_rsvp.c Thu Apr 30 05:46:59 1998
912+++ linux/net/sched/cls_rsvp.c Tue Nov 13 01:29:33 2001
913@@ -39,3 +39,4 @@
914 #define RSVP_OPS cls_rsvp_ops
915
916 #include "cls_rsvp.h"
917+MODULE_LICENSE("GPL");
918diff -urN ../v2.2.21/linux/net/sched/cls_rsvp.h linux/net/sched/cls_rsvp.h
919--- ../v2.2.21/linux/net/sched/cls_rsvp.h Sat Oct 21 12:10:57 2000
920+++ linux/net/sched/cls_rsvp.h Sun Mar 31 03:18:28 2002
921@@ -282,7 +282,7 @@
922 unsigned long cl;
923
924 s->ht[h2] = f->next;
925- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
926+ if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
927 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
928 #ifdef CONFIG_NET_CLS_POLICE
929 tcf_police_release(f->police);
930@@ -310,10 +310,11 @@
931 unsigned long cl;
932
933
934+ tcf_tree_lock(tp);
935 *fp = f->next;
936- synchronize_bh();
937+ tcf_tree_unlock(tp);
938
939- if ((cl = cls_set_class(&f->res.class, 0)) != 0)
940+ if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
941 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
942
943 #ifdef CONFIG_NET_CLS_POLICE
944@@ -332,8 +333,9 @@
945 for (sp = &((struct rsvp_head*)tp->root)->ht[h&0xFF];
946 *sp; sp = &(*sp)->next) {
947 if (*sp == s) {
948+ tcf_tree_lock(tp);
949 *sp = s->next;
950- synchronize_bh();
951+ tcf_tree_unlock(tp);
952
953 kfree(s);
954 return 0;
955@@ -446,7 +448,7 @@
956 unsigned long cl;
957
958 f->res.classid = *(u32*)RTA_DATA(tb[TCA_RSVP_CLASSID-1]);
959- cl = cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
960+ cl = cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
961 if (cl)
962 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
963 }
964@@ -454,8 +456,9 @@
965 if (tb[TCA_RSVP_POLICE-1]) {
966 struct tcf_police *police = tcf_police_locate(tb[TCA_RSVP_POLICE-1], tca[TCA_RATE-1]);
967
968+ tcf_tree_lock(tp);
969 police = xchg(&f->police, police);
970- synchronize_bh();
971+ tcf_tree_unlock(tp);
972
973 tcf_police_release(police);
974 }
975@@ -536,7 +539,7 @@
976
977 f->sess = s;
978 if (f->tunnelhdr == 0)
979- cls_set_class(&f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
980+ cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
981 #ifdef CONFIG_NET_CLS_POLICE
982 if (tb[TCA_RSVP_POLICE-1])
983 f->police = tcf_police_locate(tb[TCA_RSVP_POLICE-1], tca[TCA_RATE-1]);
984@@ -612,7 +615,6 @@
985 }
986 }
987
988-#ifdef CONFIG_RTNETLINK
989 static int rsvp_dump(struct tcf_proto *tp, unsigned long fh,
990 struct sk_buff *skb, struct tcmsg *t)
991 {
992@@ -659,7 +661,8 @@
993 rta->rta_len = skb->tail - b;
994 #ifdef CONFIG_NET_CLS_POLICE
995 if (f->police) {
996- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &f->police->stats);
997+ if (qdisc_copy_stats(skb, &f->police->stats))
998+ goto rtattr_failure;
999 }
1000 #endif
1001 return skb->len;
1002@@ -668,7 +671,6 @@
1003 skb_trim(skb, b - skb->data);
1004 return -1;
1005 }
1006-#endif
1007
1008 struct tcf_proto_ops RSVP_OPS = {
1009 NULL,
1010@@ -682,11 +684,7 @@
1011 rsvp_change,
1012 rsvp_delete,
1013 rsvp_walk,
1014-#ifdef CONFIG_RTNETLINK
1015 rsvp_dump
1016-#else
1017- NULL
1018-#endif
1019 };
1020
1021 #ifdef MODULE
1022diff -urN ../v2.2.21/linux/net/sched/cls_rsvp6.c linux/net/sched/cls_rsvp6.c
1023--- ../v2.2.21/linux/net/sched/cls_rsvp6.c Thu Apr 30 05:46:59 1998
1024+++ linux/net/sched/cls_rsvp6.c Tue Nov 13 01:29:33 2001
1025@@ -40,3 +40,4 @@
1026 #define RSVP_OPS cls_rsvp6_ops
1027
1028 #include "cls_rsvp.h"
1029+MODULE_LICENSE("GPL");
1030diff -urN ../v2.2.21/linux/net/sched/cls_tcindex.c linux/net/sched/cls_tcindex.c
1031--- ../v2.2.21/linux/net/sched/cls_tcindex.c Thu Jan 1 00:00:00 1970
1032+++ linux/net/sched/cls_tcindex.c Sun Mar 31 03:18:28 2002
1033@@ -0,0 +1,509 @@
1034+/*
1035+ * net/sched/cls_tcindex.c Packet classifier for skb->tc_index
1036+ *
1037+ * Written 1998,1999 by Werner Almesberger, EPFL ICA
1038+ */
1039+
1040+#include <linux/config.h>
1041+#include <linux/module.h>
1042+#include <linux/types.h>
1043+#include <linux/kernel.h>
1044+#include <linux/skbuff.h>
1045+#include <linux/errno.h>
1046+#include <linux/netdevice.h>
1047+#include <net/ip.h>
1048+#include <net/pkt_sched.h>
1049+#include <net/route.h>
1050+
1051+
1052+/*
1053+ * Not quite sure if we need all the xchgs Alexey uses when accessing things.
1054+ * Can always add them later ... :)
1055+ */
1056+
1057+/*
1058+ * Passing parameters to the root seems to be done more awkwardly than really
1059+ * necessary. At least, u32 doesn't seem to use such dirty hacks. To be
1060+ * verified. FIXME.
1061+ */
1062+
1063+#define PERFECT_HASH_THRESHOLD 64 /* use perfect hash if not bigger */
1064+#define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
1065+
1066+
1067+#if 1 /* control */
1068+#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
1069+#else
1070+#define DPRINTK(format,args...)
1071+#endif
1072+
1073+#if 0 /* data */
1074+#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
1075+#else
1076+#define D2PRINTK(format,args...)
1077+#endif
1078+
1079+
1080+#define PRIV(tp) ((struct tcindex_data *) (tp)->root)
1081+
1082+
1083+struct tcindex_filter_result {
1084+ struct tcf_police *police;
1085+ struct tcf_result res;
1086+};
1087+
1088+struct tcindex_filter {
1089+ __u16 key;
1090+ struct tcindex_filter_result result;
1091+ struct tcindex_filter *next;
1092+};
1093+
1094+
1095+struct tcindex_data {
1096+ struct tcindex_filter_result *perfect; /* perfect hash; NULL if none */
1097+ struct tcindex_filter **h; /* imperfect hash; only used if !perfect;
1098+ NULL if unused */
1099+ __u16 mask; /* AND key with mask */
1100+ int shift; /* shift ANDed key to the right */
1101+ int hash; /* hash table size; 0 if undefined */
1102+ int alloc_hash; /* allocated size */
1103+ int fall_through; /* 0: only classify if explicit match */
1104+};
1105+
1106+
1107+static struct tcindex_filter_result *lookup(struct tcindex_data *p,__u16 key)
1108+{
1109+ struct tcindex_filter *f;
1110+
1111+ if (p->perfect)
1112+ return p->perfect[key].res.class ? p->perfect+key : NULL;
1113+ if (!p->h)
1114+ return NULL;
1115+ for (f = p->h[key % p->hash]; f; f = f->next) {
1116+ if (f->key == key)
1117+ return &f->result;
1118+ }
1119+ return NULL;
1120+}
1121+
1122+
1123+static int tcindex_classify(struct sk_buff *skb, struct tcf_proto *tp,
1124+ struct tcf_result *res)
1125+{
1126+ struct tcindex_data *p = PRIV(tp);
1127+ struct tcindex_filter_result *f;
1128+
1129+ D2PRINTK("tcindex_classify(skb %p,tp %p,res %p),p %p\n",skb,tp,res,p);
1130+
1131+ f = lookup(p,(skb->tc_index & p->mask) >> p->shift);
1132+ if (!f) {
1133+ if (!p->fall_through)
1134+ return -1;
1135+ res->classid = TC_H_MAKE(TC_H_MAJ(tp->q->handle),
1136+ (skb->tc_index& p->mask) >> p->shift);
1137+ res->class = 0;
1138+ D2PRINTK("alg 0x%x\n",res->classid);
1139+ return 0;
1140+ }
1141+ *res = f->res;
1142+ D2PRINTK("map 0x%x\n",res->classid);
1143+#ifdef CONFIG_NET_CLS_POLICE
1144+ if (f->police) {
1145+ int result;
1146+
1147+ result = tcf_police(skb,f->police);
1148+ D2PRINTK("police %d\n",res);
1149+ return result;
1150+ }
1151+#endif
1152+ return 0;
1153+}
1154+
1155+
1156+static unsigned long tcindex_get(struct tcf_proto *tp, u32 handle)
1157+{
1158+ struct tcindex_data *p = PRIV(tp);
1159+ struct tcindex_filter_result *r;
1160+
1161+ DPRINTK("tcindex_get(tp %p,handle 0x%08x)\n",tp,handle);
1162+ if (p->perfect && handle >= p->alloc_hash)
1163+ return 0;
1164+ r = lookup(PRIV(tp),handle);
1165+ return r && r->res.class ? (unsigned long) r : 0;
1166+}
1167+
1168+
1169+static void tcindex_put(struct tcf_proto *tp, unsigned long f)
1170+{
1171+ DPRINTK("tcindex_put(tp %p,f 0x%lx)\n",tp,f);
1172+}
1173+
1174+
1175+static int tcindex_init(struct tcf_proto *tp)
1176+{
1177+ struct tcindex_data *p;
1178+
1179+ DPRINTK("tcindex_init(tp %p)\n",tp);
1180+ MOD_INC_USE_COUNT;
1181+ p = kmalloc(sizeof(struct tcindex_data),GFP_KERNEL);
1182+ if (!p) {
1183+ MOD_DEC_USE_COUNT;
1184+ return -ENOMEM;
1185+ }
1186+ tp->root = p;
1187+ p->perfect = NULL;
1188+ p->h = NULL;
1189+ p->hash = 0;
1190+ p->mask = 0xffff;
1191+ p->shift = 0;
1192+ p->fall_through = 1;
1193+ return 0;
1194+}
1195+
1196+
1197+static int tcindex_delete(struct tcf_proto *tp, unsigned long arg)
1198+{
1199+ struct tcindex_data *p = PRIV(tp);
1200+ struct tcindex_filter_result *r = (struct tcindex_filter_result *) arg;
1201+ struct tcindex_filter *f = NULL;
1202+ unsigned long cl;
1203+
1204+ DPRINTK("tcindex_delete(tp %p,arg 0x%lx),p %p,f %p\n",tp,arg,p,f);
1205+ if (p->perfect) {
1206+ if (!r->res.class)
1207+ return -ENOENT;
1208+ } else {
1209+ int i;
1210+ struct tcindex_filter **walk = NULL;
1211+
1212+ for (i = 0; i < p->hash; i++)
1213+ for (walk = p->h+i; *walk; walk = &(*walk)->next)
1214+ if (&(*walk)->result == r)
1215+ goto found;
1216+ return -ENOENT;
1217+
1218+found:
1219+ f = *walk;
1220+ tcf_tree_lock(tp);
1221+ *walk = f->next;
1222+ tcf_tree_unlock(tp);
1223+ }
1224+ cl = __cls_set_class(&r->res.class,0);
1225+ if (cl)
1226+ tp->q->ops->cl_ops->unbind_tcf(tp->q,cl);
1227+#ifdef CONFIG_NET_CLS_POLICE
1228+ tcf_police_release(r->police);
1229+#endif
1230+ if (f)
1231+ kfree(f);
1232+ return 0;
1233+}
1234+
1235+
1236+/*
1237+ * There are no parameters for tcindex_init, so we overload tcindex_change
1238+ */
1239+
1240+
1241+static int tcindex_change(struct tcf_proto *tp,unsigned long base,u32 handle,
1242+ struct rtattr **tca,unsigned long *arg)
1243+{
1244+ struct tcindex_filter_result new_filter_result = {
1245+ NULL, /* no policing */
1246+ { 0,0 }, /* no classification */
1247+ };
1248+ struct rtattr *opt = tca[TCA_OPTIONS-1];
1249+ struct rtattr *tb[TCA_TCINDEX_MAX];
1250+ struct tcindex_data *p = PRIV(tp);
1251+ struct tcindex_filter *f;
1252+ struct tcindex_filter_result *r = (struct tcindex_filter_result *) *arg;
1253+ struct tcindex_filter **walk;
1254+ int hash,shift;
1255+ __u16 mask;
1256+
1257+ DPRINTK("tcindex_change(tp %p,handle 0x%08x,tca %p,arg %p),opt %p,"
1258+ "p %p,r %p\n",tp,handle,tca,arg,opt,p,r);
1259+ if (arg)
1260+ DPRINTK("*arg = 0x%lx\n",*arg);
1261+ if (!opt)
1262+ return 0;
1263+ if (rtattr_parse(tb,TCA_TCINDEX_MAX,RTA_DATA(opt),RTA_PAYLOAD(opt)) < 0)
1264+ return -EINVAL;
1265+ if (!tb[TCA_TCINDEX_HASH-1]) {
1266+ hash = p->hash;
1267+ } else {
1268+ if (RTA_PAYLOAD(tb[TCA_TCINDEX_HASH-1]) < sizeof(int))
1269+ return -EINVAL;
1270+ hash = *(int *) RTA_DATA(tb[TCA_TCINDEX_HASH-1]);
1271+ }
1272+ if (!tb[TCA_TCINDEX_MASK-1]) {
1273+ mask = p->mask;
1274+ } else {
1275+ if (RTA_PAYLOAD(tb[TCA_TCINDEX_MASK-1]) < sizeof(__u16))
1276+ return -EINVAL;
1277+ mask = *(__u16 *) RTA_DATA(tb[TCA_TCINDEX_MASK-1]);
1278+ }
1279+ if (!tb[TCA_TCINDEX_SHIFT-1])
1280+ shift = p->shift;
1281+ else {
1282+ if (RTA_PAYLOAD(tb[TCA_TCINDEX_SHIFT-1]) < sizeof(__u16))
1283+ return -EINVAL;
1284+ shift = *(int *) RTA_DATA(tb[TCA_TCINDEX_SHIFT-1]);
1285+ }
1286+ if (p->perfect && hash <= (mask >> shift))
1287+ return -EBUSY;
1288+ if (p->perfect && hash > p->alloc_hash)
1289+ return -EBUSY;
1290+ if (p->h && hash != p->alloc_hash)
1291+ return -EBUSY;
1292+ p->hash = hash;
1293+ p->mask = mask;
1294+ p->shift = shift;
1295+ if (tb[TCA_TCINDEX_FALL_THROUGH-1]) {
1296+ if (RTA_PAYLOAD(tb[TCA_TCINDEX_FALL_THROUGH-1]) < sizeof(int))
1297+ return -EINVAL;
1298+ p->fall_through =
1299+ *(int *) RTA_DATA(tb[TCA_TCINDEX_FALL_THROUGH-1]);
1300+ }
1301+ DPRINTK("classid/police %p/%p\n",tb[TCA_TCINDEX_CLASSID-1],
1302+ tb[TCA_TCINDEX_POLICE-1]);
1303+ if (!tb[TCA_TCINDEX_CLASSID-1] && !tb[TCA_TCINDEX_POLICE-1])
1304+ return 0;
1305+ if (!hash) {
1306+ if ((mask >> shift) < PERFECT_HASH_THRESHOLD) {
1307+ p->hash = (mask >> shift)+1;
1308+ } else {
1309+ p->hash = DEFAULT_HASH_SIZE;
1310+ }
1311+ }
1312+ if (!p->perfect && !p->h) {
1313+ p->alloc_hash = p->hash;
1314+ DPRINTK("hash %d mask %d\n",p->hash,p->mask);
1315+ if (p->hash > (mask >> shift)) {
1316+ p->perfect = kmalloc(p->hash*
1317+ sizeof(struct tcindex_filter_result),GFP_KERNEL);
1318+ if (!p->perfect)
1319+ return -ENOMEM;
1320+ memset(p->perfect, 0,
1321+ p->hash * sizeof(struct tcindex_filter_result));
1322+ } else {
1323+ p->h = kmalloc(p->hash*sizeof(struct tcindex_filter *),
1324+ GFP_KERNEL);
1325+ if (!p->h)
1326+ return -ENOMEM;
1327+ memset(p->h, 0, p->hash*sizeof(struct tcindex_filter *));
1328+ }
1329+ }
1330+ /*
1331+ * Note: this could be as restrictive as
1332+ * if (handle & ~(mask >> shift))
1333+ * but then, we'd fail handles that may become valid after some
1334+ * future mask change. While this is extremely unlikely to ever
1335+ * matter, the check below is safer (and also more
1336+ * backwards-compatible).
1337+ */
1338+ if (p->perfect && handle >= p->alloc_hash)
1339+ return -EINVAL;
1340+ if (p->perfect) {
1341+ r = p->perfect+handle;
1342+ } else {
1343+ r = lookup(p,handle);
1344+ DPRINTK("r=%p\n",r);
1345+ if (!r)
1346+ r = &new_filter_result;
1347+ }
1348+ DPRINTK("r=%p\n",r);
1349+ if (tb[TCA_TCINDEX_CLASSID-1]) {
1350+ unsigned long cl = cls_set_class(tp,&r->res.class,0);
1351+
1352+ if (cl)
1353+ tp->q->ops->cl_ops->unbind_tcf(tp->q,cl);
1354+ r->res.classid = *(__u32 *) RTA_DATA(tb[TCA_TCINDEX_CLASSID-1]);
1355+ r->res.class = tp->q->ops->cl_ops->bind_tcf(tp->q,base,
1356+ r->res.classid);
1357+ if (!r->res.class) {
1358+ r->res.classid = 0;
1359+ return -ENOENT;
1360+ }
1361+ }
1362+#ifdef CONFIG_NET_CLS_POLICE
1363+ {
1364+ struct tcf_police *police;
1365+
1366+ police = tb[TCA_TCINDEX_POLICE-1] ?
1367+ tcf_police_locate(tb[TCA_TCINDEX_POLICE-1],NULL) : NULL;
1368+ tcf_tree_lock(tp);
1369+ police = xchg(&r->police,police);
1370+ tcf_tree_unlock(tp);
1371+ tcf_police_release(police);
1372+ }
1373+#endif
1374+ if (r != &new_filter_result)
1375+ return 0;
1376+ f = kmalloc(sizeof(struct tcindex_filter),GFP_KERNEL);
1377+ if (!f)
1378+ return -ENOMEM;
1379+ f->key = handle;
1380+ f->result = new_filter_result;
1381+ f->next = NULL;
1382+ for (walk = p->h+(handle % p->hash); *walk; walk = &(*walk)->next)
1383+ /* nothing */;
1384+ wmb();
1385+ *walk = f;
1386+ return 0;
1387+}
1388+
1389+
1390+static void tcindex_walk(struct tcf_proto *tp, struct tcf_walker *walker)
1391+{
1392+ struct tcindex_data *p = PRIV(tp);
1393+ struct tcindex_filter *f,*next;
1394+ int i;
1395+
1396+ DPRINTK("tcindex_walk(tp %p,walker %p),p %p\n",tp,walker,p);
1397+ if (p->perfect) {
1398+ for (i = 0; i < p->hash; i++) {
1399+ if (!p->perfect[i].res.class)
1400+ continue;
1401+ if (walker->count >= walker->skip) {
1402+ if (walker->fn(tp,
1403+ (unsigned long) (p->perfect+i), walker)
1404+ < 0) {
1405+ walker->stop = 1;
1406+ return;
1407+ }
1408+ }
1409+ walker->count++;
1410+ }
1411+ }
1412+ if (!p->h)
1413+ return;
1414+ for (i = 0; i < p->hash; i++) {
1415+ for (f = p->h[i]; f; f = next) {
1416+ next = f->next;
1417+ if (walker->count >= walker->skip) {
1418+ if (walker->fn(tp,(unsigned long) &f->result,
1419+ walker) < 0) {
1420+ walker->stop = 1;
1421+ return;
1422+ }
1423+ }
1424+ walker->count++;
1425+ }
1426+ }
1427+}
1428+
1429+
1430+static int tcindex_destroy_element(struct tcf_proto *tp,
1431+ unsigned long arg, struct tcf_walker *walker)
1432+{
1433+ return tcindex_delete(tp,arg);
1434+}
1435+
1436+
1437+static void tcindex_destroy(struct tcf_proto *tp)
1438+{
1439+ struct tcindex_data *p = PRIV(tp);
1440+ struct tcf_walker walker;
1441+
1442+ DPRINTK("tcindex_destroy(tp %p),p %p\n",tp,p);
1443+ walker.count = 0;
1444+ walker.skip = 0;
1445+ walker.fn = &tcindex_destroy_element;
1446+ tcindex_walk(tp,&walker);
1447+ if (p->perfect)
1448+ kfree(p->perfect);
1449+ if (p->h)
1450+ kfree(p->h);
1451+ kfree(p);
1452+ tp->root = NULL;
1453+ MOD_DEC_USE_COUNT;
1454+}
1455+
1456+
1457+static int tcindex_dump(struct tcf_proto *tp, unsigned long fh,
1458+ struct sk_buff *skb, struct tcmsg *t)
1459+{
1460+ struct tcindex_data *p = PRIV(tp);
1461+ struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh;
1462+ unsigned char *b = skb->tail;
1463+ struct rtattr *rta;
1464+
1465+ DPRINTK("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n",
1466+ tp,fh,skb,t,p,r,b);
1467+ DPRINTK("p->perfect %p p->h %p\n",p->perfect,p->h);
1468+ rta = (struct rtattr *) b;
1469+ RTA_PUT(skb,TCA_OPTIONS,0,NULL);
1470+ if (!fh) {
1471+ t->tcm_handle = ~0; /* whatever ... */
1472+ RTA_PUT(skb,TCA_TCINDEX_HASH,sizeof(p->hash),&p->hash);
1473+ RTA_PUT(skb,TCA_TCINDEX_MASK,sizeof(p->mask),&p->mask);
1474+ RTA_PUT(skb,TCA_TCINDEX_SHIFT,sizeof(p->shift),&p->shift);
1475+ RTA_PUT(skb,TCA_TCINDEX_FALL_THROUGH,sizeof(p->fall_through),
1476+ &p->fall_through);
1477+ } else {
1478+ if (p->perfect) {
1479+ t->tcm_handle = r-p->perfect;
1480+ } else {
1481+ struct tcindex_filter *f;
1482+ int i;
1483+
1484+ t->tcm_handle = 0;
1485+ for (i = 0; !t->tcm_handle && i < p->hash; i++) {
1486+ for (f = p->h[i]; !t->tcm_handle && f;
1487+ f = f->next) {
1488+ if (&f->result == r)
1489+ t->tcm_handle = f->key;
1490+ }
1491+ }
1492+ }
1493+ DPRINTK("handle = %d\n",t->tcm_handle);
1494+ if (r->res.class)
1495+ RTA_PUT(skb, TCA_TCINDEX_CLASSID, 4, &r->res.classid);
1496+#ifdef CONFIG_NET_CLS_POLICE
1497+ if (r->police) {
1498+ struct rtattr *p_rta = (struct rtattr *) skb->tail;
1499+
1500+ RTA_PUT(skb,TCA_TCINDEX_POLICE,0,NULL);
1501+ if (tcf_police_dump(skb,r->police) < 0)
1502+ goto rtattr_failure;
1503+ p_rta->rta_len = skb->tail-(u8 *) p_rta;
1504+ }
1505+#endif
1506+ }
1507+ rta->rta_len = skb->tail-b;
1508+ return skb->len;
1509+
1510+rtattr_failure:
1511+ skb_trim(skb, b - skb->data);
1512+ return -1;
1513+}
1514+
1515+struct tcf_proto_ops cls_tcindex_ops = {
1516+ NULL,
1517+ "tcindex",
1518+ tcindex_classify,
1519+ tcindex_init,
1520+ tcindex_destroy,
1521+
1522+ tcindex_get,
1523+ tcindex_put,
1524+ tcindex_change,
1525+ tcindex_delete,
1526+ tcindex_walk,
1527+ tcindex_dump
1528+};
1529+
1530+
1531+#ifdef MODULE
1532+int init_module(void)
1533+{
1534+ return register_tcf_proto_ops(&cls_tcindex_ops);
1535+}
1536+
1537+void cleanup_module(void)
1538+{
1539+ unregister_tcf_proto_ops(&cls_tcindex_ops);
1540+}
1541+#endif
1542+MODULE_LICENSE("GPL");
1543diff -urN ../v2.2.21/linux/net/sched/cls_u32.c linux/net/sched/cls_u32.c
1544--- ../v2.2.21/linux/net/sched/cls_u32.c Sat Aug 4 12:52:33 2001
1545+++ linux/net/sched/cls_u32.c Sun Mar 31 03:18:28 2002
1546@@ -52,8 +52,6 @@
1547 #include <net/sock.h>
1548 #include <net/pkt_sched.h>
1549
1550-#define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x ") failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
1551-
1552
1553 struct tc_u_knode
1554 {
1555@@ -164,7 +162,7 @@
1556 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
1557 goto next_ht;
1558
1559- if (n->sel.flags&(TC_U32_EAT|TC_U32_VAROFFSET)) {
1560+ if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
1561 off2 = n->sel.off + 3;
1562 if (n->sel.flags&TC_U32_VAROFFSET)
1563 off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
1564@@ -307,7 +305,7 @@
1565 {
1566 unsigned long cl;
1567
1568- if ((cl = cls_set_class(&n->res.class, 0)) != 0)
1569+ if ((cl = __cls_set_class(&n->res.class, 0)) != 0)
1570 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
1571 #ifdef CONFIG_NET_CLS_POLICE
1572 tcf_police_release(n->police);
1573@@ -326,8 +324,9 @@
1574 if (ht) {
1575 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
1576 if (*kp == key) {
1577+ tcf_tree_lock(tp);
1578 *kp = key->next;
1579- synchronize_bh();
1580+ tcf_tree_unlock(tp);
1581
1582 u32_destroy_key(tp, key);
1583 return 0;
1584@@ -346,7 +345,6 @@
1585 for (h=0; h<=ht->divisor; h++) {
1586 while ((n = ht->ht[h]) != NULL) {
1587 ht->ht[h] = n->next;
1588- synchronize_bh();
1589
1590 u32_destroy_key(tp, n);
1591 }
1592@@ -465,8 +463,9 @@
1593 ht_down->refcnt++;
1594 }
1595
1596+ sch_tree_lock(q);
1597 ht_down = xchg(&n->ht_down, ht_down);
1598- synchronize_bh();
1599+ sch_tree_unlock(q);
1600
1601 if (ht_down)
1602 ht_down->refcnt--;
1603@@ -475,7 +474,9 @@
1604 unsigned long cl;
1605
1606 n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
1607- cl = cls_set_class(&n->res.class, q->ops->cl_ops->bind_tcf(q, base, n->res.classid));
1608+ sch_tree_lock(q);
1609+ cl = __cls_set_class(&n->res.class, q->ops->cl_ops->bind_tcf(q, base, n->res.classid));
1610+ sch_tree_unlock(q);
1611 if (cl)
1612 q->ops->cl_ops->unbind_tcf(q, cl);
1613 }
1614@@ -483,8 +484,9 @@
1615 if (tb[TCA_U32_POLICE-1]) {
1616 struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
1617
1618+ sch_tree_lock(q);
1619 police = xchg(&n->police, police);
1620- synchronize_bh();
1621+ sch_tree_unlock(q);
1622
1623 tcf_police_release(police);
1624 }
1625@@ -633,7 +635,6 @@
1626 }
1627 }
1628
1629-#ifdef CONFIG_RTNETLINK
1630 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
1631 struct sk_buff *skb, struct tcmsg *t)
1632 {
1633@@ -682,7 +683,8 @@
1634 rta->rta_len = skb->tail - b;
1635 #ifdef CONFIG_NET_CLS_POLICE
1636 if (TC_U32_KEY(n->handle) && n->police) {
1637- RTA_PUT(skb, TCA_STATS, sizeof(struct tc_stats), &n->police->stats);
1638+ if (qdisc_copy_stats(skb, &n->police->stats))
1639+ goto rtattr_failure;
1640 }
1641 #endif
1642 return skb->len;
1643@@ -691,7 +693,6 @@
1644 skb_trim(skb, b - skb->data);
1645 return -1;
1646 }
1647-#endif
1648
1649 struct tcf_proto_ops cls_u32_ops = {
1650 NULL,
1651@@ -705,11 +706,7 @@
1652 u32_change,
1653 u32_delete,
1654 u32_walk,
1655-#ifdef CONFIG_RTNETLINK
1656 u32_dump
1657-#else
1658- NULL
1659-#endif
1660 };
1661
1662 #ifdef MODULE
1663@@ -723,3 +720,4 @@
1664 unregister_tcf_proto_ops(&cls_u32_ops);
1665 }
1666 #endif
1667+MODULE_LICENSE("GPL");
1668diff -urN ../v2.2.21/linux/net/sched/police.c linux/net/sched/police.c
1669--- ../v2.2.21/linux/net/sched/police.c Sat Oct 21 12:10:47 2000
1670+++ linux/net/sched/police.c Wed Aug 7 23:28:12 2002
1671@@ -31,8 +31,6 @@
1672 #include <net/sock.h>
1673 #include <net/pkt_sched.h>
1674
1675-#define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x ") failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
1676-
1677 #define L2T(p,L) ((p)->R_tab->data[(L)>>(p)->R_tab->rate.cell_log])
1678 #define L2T_P(p,L) ((p)->P_tab->data[(L)>>(p)->P_tab->rate.cell_log])
1679
1680@@ -74,6 +72,7 @@
1681 for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->next) {
1682 if (*p1p == p) {
1683 *p1p = p->next;
1684+ synchronize_bh();
1685 #ifdef CONFIG_NET_ESTIMATOR
1686 qdisc_kill_estimator(&p->stats);
1687 #endif
1688@@ -145,7 +144,9 @@
1689 #endif
1690 h = tcf_police_hash(p->index);
1691 p->next = tcf_police_ht[h];
1692+ wmb();
1693 tcf_police_ht[h] = p;
1694+ synchronize_bh();
1695 return p;
1696
1697 failure:
1698diff -urN ../v2.2.21/linux/net/sched/sch_api.c linux/net/sched/sch_api.c
1699--- ../v2.2.21/linux/net/sched/sch_api.c Sat Oct 21 12:10:47 2000
1700+++ linux/net/sched/sch_api.c Wed Aug 7 23:27:29 2002
1701@@ -11,7 +11,10 @@
1702 * Fixes:
1703 *
1704 * Rani Assaf <rani@magic.metawire.com> :980802: JIFFIES and CPU clock sources are repaired.
1705+ * J Hadi Salim (hadi@nortelnetworks.com):981128: "Append" message
1706+ *
1707 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
1708+ * Jamal Hadi Salim <hadi@nortelnetworks.com>: 990501: ingress support
1709 */
1710
1711 #include <linux/config.h>
1712@@ -31,6 +34,7 @@
1713 #include <linux/init.h>
1714 #include <linux/proc_fs.h>
1715 #include <linux/kmod.h>
1716+#include <linux/firewall.h>
1717
1718 #include <net/sock.h>
1719 #include <net/pkt_sched.h>
1720@@ -40,8 +44,6 @@
1721 #include <asm/system.h>
1722 #include <asm/bitops.h>
1723
1724-#define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x ") failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
1725-
1726 #ifdef CONFIG_RTNETLINK
1727 static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
1728 struct Qdisc *old, struct Qdisc *new);
1729@@ -95,9 +97,15 @@
1730
1731 ---enqueue
1732
1733- enqueue returns number of enqueued packets i.e. this number is 1,
1734- if packet was enqueued successfully and <1 if something (not
1735- necessary THIS packet) was dropped.
1736+ enqueue returns 0, if packet was enqueued successfully.
1737+ If packet (this one or another one) was dropped, it returns
1738+ not zero error code.
1739+ NET_XMIT_DROP - this packet dropped
1740+ Expected action: do not backoff, but wait until queue will clear.
1741+ NET_XMIT_CN - probably this packet enqueued, but another one dropped.
1742+ Expected action: backoff or ignore
1743+ NET_XMIT_POLICED - dropped by police.
1744+ Expected action: backoff or error to real-time apps.
1745
1746 Auxiliary routines:
1747
1748@@ -139,9 +147,11 @@
1749 {
1750 struct Qdisc_ops *q, **qp;
1751
1752- for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
1753- if (strcmp(qops->id, q->id) == 0)
1754+ for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next) {
1755+ if (strcmp(qops->id, q->id) == 0) {
1756 return -EEXIST;
1757+ }
1758+ }
1759
1760 if (qops->enqueue == NULL)
1761 qops->enqueue = noop_qdisc_ops.enqueue;
1762@@ -158,14 +168,17 @@
1763 int unregister_qdisc(struct Qdisc_ops *qops)
1764 {
1765 struct Qdisc_ops *q, **qp;
1766+ int err = -ENOENT;
1767+
1768 for (qp = &qdisc_base; (q=*qp)!=NULL; qp = &q->next)
1769 if (q == qops)
1770 break;
1771- if (!q)
1772- return -ENOENT;
1773- *qp = q->next;
1774- q->next = NULL;
1775- return 0;
1776+ if (q) {
1777+ *qp = q->next;
1778+ q->next = NULL;
1779+ err = 0;
1780+ }
1781+ return err;
1782 }
1783
1784 /* We know handle. Find qdisc among all qdisc's attached to device
1785@@ -192,6 +205,7 @@
1786 if (cops == NULL)
1787 return NULL;
1788 cl = cops->get(p, classid);
1789+
1790 if (cl == 0)
1791 return NULL;
1792 leaf = cops->leaf(p, cl);
1793@@ -203,15 +217,15 @@
1794
1795 struct Qdisc_ops *qdisc_lookup_ops(struct rtattr *kind)
1796 {
1797- struct Qdisc_ops *q;
1798+ struct Qdisc_ops *q = NULL;
1799
1800 if (kind) {
1801 for (q = qdisc_base; q; q = q->next) {
1802 if (rtattr_strcmp(kind, q->id) == 0)
1803- return q;
1804+ break;
1805 }
1806 }
1807- return NULL;
1808+ return q;
1809 }
1810
1811 static struct qdisc_rate_table *qdisc_rtab_list;
1812@@ -285,17 +299,32 @@
1813 dev_deactivate(dev);
1814
1815 start_bh_atomic();
1816- oqdisc = dev->qdisc_sleeping;
1817+ if (qdisc && qdisc->flags&TCQ_F_INGRES) {
1818+ oqdisc = dev->qdisc_ingress;
1819+ /* Prune old scheduler */
1820+ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1) {
1821+ /* delete */
1822+ qdisc_reset(oqdisc);
1823+ dev->qdisc_ingress = NULL;
1824+ } else { /* new */
1825+ dev->qdisc_ingress = qdisc;
1826+ }
1827+
1828+ } else {
1829+
1830+ oqdisc = dev->qdisc_sleeping;
1831+
1832+ /* Prune old scheduler */
1833+ if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
1834+ qdisc_reset(oqdisc);
1835+
1836+ /* ... and graft new one */
1837+ if (qdisc == NULL)
1838+ qdisc = &noop_qdisc;
1839+ dev->qdisc_sleeping = qdisc;
1840+ dev->qdisc = &noop_qdisc;
1841+ }
1842
1843- /* Prune old scheduler */
1844- if (oqdisc && atomic_read(&oqdisc->refcnt) <= 1)
1845- qdisc_reset(oqdisc);
1846-
1847- /* ... and graft new one */
1848- if (qdisc == NULL)
1849- qdisc = &noop_qdisc;
1850- dev->qdisc_sleeping = qdisc;
1851- dev->qdisc = &noop_qdisc;
1852 end_bh_atomic();
1853
1854 if (dev->flags & IFF_UP)
1855@@ -315,9 +344,15 @@
1856 struct Qdisc *new, struct Qdisc **old)
1857 {
1858 int err = 0;
1859+ struct Qdisc *q = *old;
1860+
1861
1862- if (parent == NULL) {
1863- *old = dev_graft_qdisc(dev, new);
1864+ if (parent == NULL) {
1865+ if (q && q->flags&TCQ_F_INGRES) {
1866+ *old = dev_graft_qdisc(dev, q);
1867+ } else {
1868+ *old = dev_graft_qdisc(dev, new);
1869+ }
1870 } else {
1871 struct Qdisc_class_ops *cops = parent->ops->cl_ops;
1872
1873@@ -334,8 +369,6 @@
1874 return err;
1875 }
1876
1877-#ifdef CONFIG_RTNETLINK
1878-
1879 /*
1880 Allocate and initialize new qdisc.
1881
1882@@ -376,7 +409,7 @@
1883 goto err_out;
1884
1885 /* Grrr... Resolve race condition with module unload */
1886-
1887+
1888 err = -EINVAL;
1889 if (ops != qdisc_lookup_ops(kind))
1890 goto err_out;
1891@@ -384,6 +417,10 @@
1892 memset(sch, 0, size);
1893
1894 skb_queue_head_init(&sch->q);
1895+
1896+ if (handle == TC_H_INGRESS)
1897+ sch->flags |= TCQ_F_INGRES;
1898+
1899 sch->ops = ops;
1900 sch->enqueue = ops->enqueue;
1901 sch->dequeue = ops->dequeue;
1902@@ -395,7 +432,11 @@
1903 if (handle == 0)
1904 goto err_out;
1905 }
1906- sch->handle = handle;
1907+
1908+ if (handle == TC_H_INGRESS)
1909+ sch->handle =TC_H_MAKE(TC_H_INGRESS, 0);
1910+ else
1911+ sch->handle = handle;
1912
1913 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
1914 sch->next = dev->qdisc_list;
1915@@ -493,12 +534,16 @@
1916
1917 if (clid) {
1918 if (clid != TC_H_ROOT) {
1919- if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
1920- return -ENOENT;
1921- q = qdisc_leaf(p, clid);
1922- } else
1923+ if (TC_H_MAJ(clid) != TC_H_MAJ(TC_H_INGRESS)) {
1924+ if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
1925+ return -ENOENT;
1926+ q = qdisc_leaf(p, clid);
1927+ } else { /* ingress */
1928+ q = dev->qdisc_ingress;
1929+ }
1930+ } else {
1931 q = dev->qdisc_sleeping;
1932-
1933+ }
1934 if (!q)
1935 return -ENOENT;
1936
1937@@ -521,7 +566,9 @@
1938 return err;
1939 if (q) {
1940 qdisc_notify(skb, n, clid, q, NULL);
1941+ sch_dev_queue_lock(dev);
1942 qdisc_destroy(q);
1943+ sch_dev_queue_unlock(dev);
1944 }
1945 } else {
1946 qdisc_notify(skb, n, clid, NULL, q);
1947@@ -548,9 +595,13 @@
1948
1949 if (clid) {
1950 if (clid != TC_H_ROOT) {
1951- if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
1952- return -ENOENT;
1953- q = qdisc_leaf(p, clid);
1954+ if (clid != TC_H_INGRESS) {
1955+ if ((p = qdisc_lookup(dev, TC_H_MAJ(clid))) == NULL)
1956+ return -ENOENT;
1957+ q = qdisc_leaf(p, clid);
1958+ } else { /*ingress */
1959+ q = dev->qdisc_ingress;
1960+ }
1961 } else {
1962 q = dev->qdisc_sleeping;
1963 }
1964@@ -628,7 +679,10 @@
1965 create_n_graft:
1966 if (!(n->nlmsg_flags&NLM_F_CREATE))
1967 return -ENOENT;
1968- q = qdisc_create(dev, tcm->tcm_handle, tca, &err);
1969+ if (clid == TC_H_INGRESS)
1970+ q = qdisc_create(dev, tcm->tcm_parent, tca, &err);
1971+ else
1972+ q = qdisc_create(dev, tcm->tcm_handle, tca, &err);
1973 if (q == NULL)
1974 return err;
1975
1976@@ -637,17 +691,36 @@
1977 struct Qdisc *old_q = NULL;
1978 err = qdisc_graft(dev, p, clid, q, &old_q);
1979 if (err) {
1980- if (q)
1981+ if (q) {
1982+ sch_dev_queue_lock(dev);
1983 qdisc_destroy(q);
1984+ sch_dev_queue_unlock(dev);
1985+ }
1986 return err;
1987 }
1988 qdisc_notify(skb, n, clid, old_q, q);
1989- if (old_q)
1990+ if (old_q) {
1991+ sch_dev_queue_lock(dev);
1992 qdisc_destroy(old_q);
1993+ sch_dev_queue_unlock(dev);
1994+ }
1995 }
1996 return 0;
1997 }
1998
1999+int qdisc_copy_stats(struct sk_buff *skb, struct tc_stats *st)
2000+{
2001+ start_bh_atomic();
2002+ RTA_PUT(skb, TCA_STATS, sizeof(*st), st);
2003+ end_bh_atomic();
2004+ return 0;
2005+
2006+rtattr_failure:
2007+ end_bh_atomic();
2008+ return -1;
2009+}
2010+
2011+
2012 static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
2013 u32 pid, u32 seq, unsigned flags, int event)
2014 {
2015@@ -667,7 +740,8 @@
2016 if (q->ops->dump && q->ops->dump(q, skb) < 0)
2017 goto rtattr_failure;
2018 q->stats.qlen = q->q.qlen;
2019- RTA_PUT(skb, TCA_STATS, sizeof(q->stats), &q->stats);
2020+ if (qdisc_copy_stats(skb, &q->stats))
2021+ goto rtattr_failure;
2022 nlh->nlmsg_len = skb->tail - b;
2023 return skb->len;
2024
2025@@ -723,8 +797,9 @@
2026 if (q_idx < s_q_idx)
2027 continue;
2028 if (tc_fill_qdisc(skb, q, 0, NETLINK_CB(cb->skb).pid,
2029- cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0)
2030+ cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWQDISC) <= 0) {
2031 goto done;
2032+ }
2033 }
2034 }
2035
2036@@ -956,6 +1031,13 @@
2037
2038 return skb->len;
2039 }
2040+
2041+#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
2042+int call_in_ingress(struct sk_buff *skb)
2043+{
2044+ if (!skb->dev->qdisc_ingress) return FW_ACCEPT;
2045+ return skb->dev->qdisc_ingress->enqueue(skb,skb->dev->qdisc_ingress);
2046+}
2047 #endif
2048
2049 int psched_us_per_tick = 1;
2050@@ -967,8 +1049,9 @@
2051 {
2052 int len;
2053
2054- len = sprintf(buffer, "%08x %08x\n",
2055- psched_tick_per_us, psched_us_per_tick);
2056+ len = sprintf(buffer, "%08x %08x %08x %08x\n",
2057+ psched_tick_per_us, psched_us_per_tick,
2058+ 1000000, HZ);
2059
2060 len -= offset;
2061
2062@@ -1011,7 +1094,7 @@
2063 static void psched_tick(unsigned long);
2064
2065 static struct timer_list psched_timer =
2066- { NULL, NULL, 0, 0L, psched_tick };
2067+ { function: psched_tick };
2068
2069 static void psched_tick(unsigned long dummy)
2070 {
2071@@ -1022,7 +1105,7 @@
2072 psched_timer.expires = jiffies + 1*HZ;
2073 #else
2074 unsigned long now = jiffies;
2075- psched_time_base = ((u64)now)<<PSCHED_JSCALE;
2076+ psched_time_base += ((u64)(now-psched_time_mark))<<PSCHED_JSCALE;
2077 psched_time_mark = now;
2078 psched_timer.expires = now + 60*60*HZ;
2079 #endif
2080@@ -1072,9 +1155,7 @@
2081
2082 __initfunc(int pktsched_init(void))
2083 {
2084-#ifdef CONFIG_RTNETLINK
2085 struct rtnetlink_link *link_p;
2086-#endif
2087 #ifdef CONFIG_PROC_FS
2088 struct proc_dir_entry *ent;
2089 #endif
2090@@ -1090,7 +1171,6 @@
2091 #endif
2092 #endif
2093
2094-#ifdef CONFIG_RTNETLINK
2095 link_p = rtnetlink_links[PF_UNSPEC];
2096
2097 /* Setup rtnetlink links. It is made here to avoid
2098@@ -1107,11 +1187,10 @@
2099 link_p[RTM_GETTCLASS-RTM_BASE].doit = tc_ctl_tclass;
2100 link_p[RTM_GETTCLASS-RTM_BASE].dumpit = tc_dump_tclass;
2101 }
2102-#endif
2103
2104 #define INIT_QDISC(name) { \
2105 extern struct Qdisc_ops name##_qdisc_ops; \
2106- register_qdisc(&##name##_qdisc_ops); \
2107+ register_qdisc(& name##_qdisc_ops); \
2108 }
2109
2110 INIT_QDISC(pfifo);
2111@@ -1134,6 +1213,9 @@
2112 #endif
2113 #ifdef CONFIG_NET_SCH_GRED
2114 INIT_QDISC(gred);
2115+#endif
2116+#ifdef CONFIG_NET_SCH_INGRESS
2117+ INIT_QDISC(ingress);
2118 #endif
2119 #ifdef CONFIG_NET_SCH_DSMARK
2120 INIT_QDISC(dsmark);
2121diff -urN ../v2.2.21/linux/net/sched/sch_cbq.c linux/net/sched/sch_cbq.c
2122--- ../v2.2.21/linux/net/sched/sch_cbq.c Sat Oct 21 12:11:29 2000
2123+++ linux/net/sched/sch_cbq.c Thu Aug 1 23:43:46 2002
2124@@ -192,8 +192,6 @@
2125
2126 #define L2T(cl,len) ((cl)->R_tab->data[(len)>>(cl)->R_tab->rate.cell_log])
2127
2128-#define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x ") failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
2129-
2130
2131 static __inline__ unsigned cbq_hash(u32 h)
2132 {
2133@@ -284,6 +282,7 @@
2134 case TC_POLICE_SHOT:
2135 return NULL;
2136 default:
2137+ break;
2138 }
2139 #endif
2140 if (cl->level == 0)
2141@@ -397,6 +396,7 @@
2142 struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
2143 struct cbq_class *cl = cbq_classify(skb, sch);
2144 int len = skb->len;
2145+ int ret = NET_XMIT_POLICED;
2146
2147 #ifdef CONFIG_NET_CLS_POLICE
2148 q->rx_class = cl;
2149@@ -405,14 +405,14 @@
2150 #ifdef CONFIG_NET_CLS_POLICE
2151 cl->q->__parent = sch;
2152 #endif
2153- if (cl->q->enqueue(skb, cl->q) == 1) {
2154+ if ((ret = cl->q->enqueue(skb, cl->q)) == 0) {
2155 sch->q.qlen++;
2156 sch->stats.packets++;
2157 sch->stats.bytes+=len;
2158 cbq_mark_toplevel(q, cl);
2159 if (!cl->next_alive)
2160 cbq_activate_class(cl);
2161- return 1;
2162+ return 0;
2163 }
2164 }
2165
2166@@ -423,7 +423,7 @@
2167 cbq_mark_toplevel(q, cl);
2168 cl->stats.drops++;
2169 }
2170- return 0;
2171+ return ret;
2172 }
2173
2174 static int
2175@@ -431,11 +431,12 @@
2176 {
2177 struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
2178 struct cbq_class *cl;
2179+ int ret;
2180
2181 if ((cl = q->tx_class) == NULL) {
2182 kfree_skb(skb);
2183 sch->stats.drops++;
2184- return 0;
2185+ return NET_XMIT_CN;
2186 }
2187 q->tx_class = NULL;
2188
2189@@ -445,15 +446,15 @@
2190 q->rx_class = cl;
2191 cl->q->__parent = sch;
2192 #endif
2193- if (cl->q->ops->requeue(skb, cl->q) == 1) {
2194+ if ((ret = cl->q->ops->requeue(skb, cl->q)) == 0) {
2195 sch->q.qlen++;
2196 if (!cl->next_alive)
2197 cbq_activate_class(cl);
2198- return 1;
2199+ return 0;
2200 }
2201 sch->stats.drops++;
2202 cl->stats.drops++;
2203- return 0;
2204+ return ret;
2205 }
2206
2207 /* Overlimit actions */
2208@@ -597,8 +598,9 @@
2209 static void cbq_watchdog(unsigned long arg)
2210 {
2211 struct Qdisc *sch = (struct Qdisc*)arg;
2212+
2213 sch->flags &= ~TCQ_F_THROTTLED;
2214- qdisc_wakeup(sch->dev);
2215+ netif_schedule(sch->dev);
2216 }
2217
2218 static unsigned long cbq_undelay_prio(struct cbq_sched_data *q, int prio)
2219@@ -666,7 +668,7 @@
2220 }
2221
2222 sch->flags &= ~TCQ_F_THROTTLED;
2223- qdisc_wakeup(sch->dev);
2224+ netif_schedule(sch->dev);
2225 }
2226
2227
2228@@ -688,7 +690,7 @@
2229 q->rx_class = cl;
2230 cl->q->__parent = sch;
2231
2232- if (cl->q->enqueue(skb, cl->q) == 1) {
2233+ if (cl->q->enqueue(skb, cl->q) == 0) {
2234 sch->q.qlen++;
2235 sch->stats.packets++;
2236 sch->stats.bytes+=len;
2237@@ -877,7 +879,7 @@
2238
2239 /* Start round */
2240 do {
2241- struct cbq_class *borrow = NULL;
2242+ struct cbq_class *borrow = cl;
2243
2244 if (cl->q->q.qlen &&
2245 (borrow = cbq_under_limit(cl)) == NULL)
2246@@ -1052,16 +1054,11 @@
2247
2248 if (sch->q.qlen) {
2249 sch->stats.overlimits++;
2250- if (q->wd_expires && !sch->dev->tbusy) {
2251+ if (q->wd_expires && !netif_queue_stopped(sch->dev)) {
2252 long delay = PSCHED_US2JIFFIE(q->wd_expires);
2253 del_timer(&q->wd_timer);
2254 if (delay <= 0)
2255 delay = 1;
2256- if (delay > 10*HZ) {
2257- if (net_ratelimit())
2258- printk(KERN_DEBUG "CBQ delay %ld > 10sec\n", delay);
2259- delay = 10*HZ;
2260- }
2261 q->wd_timer.expires = jiffies + delay;
2262 add_timer(&q->wd_timer);
2263 sch->flags |= TCQ_F_THROTTLED;
2264@@ -1248,8 +1245,10 @@
2265
2266 cl = cl_head;
2267 do {
2268- if (cl->q->ops->drop && cl->q->ops->drop(cl->q))
2269+ if (cl->q->ops->drop && cl->q->ops->drop(cl->q)) {
2270+ sch->q.qlen--;
2271 return 1;
2272+ }
2273 } while ((cl = cl->next_alive) != cl_head);
2274 }
2275 return 0;
2276@@ -1457,8 +1456,6 @@
2277 return 0;
2278 }
2279
2280-#ifdef CONFIG_RTNETLINK
2281-
2282 static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
2283 {
2284 unsigned char *b = skb->tail;
2285@@ -1580,6 +1577,16 @@
2286 return 0;
2287 }
2288
2289+int cbq_copy_xstats(struct sk_buff *skb, struct tc_cbq_xstats *st)
2290+{
2291+ RTA_PUT(skb, TCA_XSTATS, sizeof(*st), st);
2292+ return 0;
2293+
2294+rtattr_failure:
2295+ return -1;
2296+}
2297+
2298+
2299 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
2300 {
2301 struct cbq_sched_data *q = (struct cbq_sched_data*)sch->data;
2302@@ -1591,8 +1598,13 @@
2303 if (cbq_dump_attr(skb, &q->link) < 0)
2304 goto rtattr_failure;
2305 rta->rta_len = skb->tail - b;
2306+ sch_dev_queue_lock(sch->dev);
2307 q->link.xstats.avgidle = q->link.avgidle;
2308- RTA_PUT(skb, TCA_XSTATS, sizeof(q->link.xstats), &q->link.xstats);
2309+ if (cbq_copy_xstats(skb, &q->link.xstats)) {
2310+ sch_dev_queue_unlock(sch->dev);
2311+ goto rtattr_failure;
2312+ }
2313+ sch_dev_queue_unlock(sch->dev);
2314 return skb->len;
2315
2316 rtattr_failure:
2317@@ -1622,12 +1634,19 @@
2318 goto rtattr_failure;
2319 rta->rta_len = skb->tail - b;
2320 cl->stats.qlen = cl->q->q.qlen;
2321- RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
2322+ if (qdisc_copy_stats(skb, &cl->stats))
2323+ goto rtattr_failure;
2324+ sch_dev_queue_lock(sch->dev);
2325 cl->xstats.avgidle = cl->avgidle;
2326 cl->xstats.undertime = 0;
2327 if (!PSCHED_IS_PASTPERFECT(cl->undertime))
2328 cl->xstats.undertime = PSCHED_TDIFF(cl->undertime, q->now);
2329- RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
2330+ q->link.xstats.avgidle = q->link.avgidle;
2331+ if (cbq_copy_xstats(skb, &cl->xstats)) {
2332+ sch_dev_queue_unlock(sch->dev);
2333+ goto rtattr_failure;
2334+ }
2335+ sch_dev_queue_unlock(sch->dev);
2336
2337 return skb->len;
2338
2339@@ -1636,8 +1655,6 @@
2340 return -1;
2341 }
2342
2343-#endif
2344-
2345 static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
2346 struct Qdisc **old)
2347 {
2348@@ -1653,8 +1670,11 @@
2349 new->reshape_fail = cbq_reshape_fail;
2350 #endif
2351 }
2352- if ((*old = xchg(&cl->q, new)) != NULL)
2353- qdisc_reset(*old);
2354+ sch_tree_lock(sch);
2355+ *old = cl->q;
2356+ cl->q = new;
2357+ qdisc_reset(*old);
2358+ sch_tree_unlock(sch);
2359
2360 return 0;
2361 }
2362@@ -1718,9 +1738,13 @@
2363 }
2364
2365 for (h = 0; h < 16; h++) {
2366- for (cl = q->classes[h]; cl; cl = cl->next)
2367+ struct cbq_class *next;
2368+
2369+ for (cl = q->classes[h]; cl; cl = next) {
2370+ next = cl->next;
2371 if (cl != &q->link)
2372 cbq_destroy_class(cl);
2373+ }
2374 }
2375
2376 qdisc_put_rtab(q->link.R_tab);
2377@@ -1729,19 +1753,20 @@
2378
2379 static void cbq_put(struct Qdisc *sch, unsigned long arg)
2380 {
2381- struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
2382 struct cbq_class *cl = (struct cbq_class*)arg;
2383
2384- start_bh_atomic();
2385 if (--cl->refcnt == 0) {
2386 #ifdef CONFIG_NET_CLS_POLICE
2387+ struct cbq_sched_data *q = (struct cbq_sched_data *)sch->data;
2388+
2389+ sch_dev_queue_lock(sch->dev);
2390 if (q->rx_class == cl)
2391 q->rx_class = NULL;
2392+ sch_dev_queue_unlock(sch->dev);
2393 #endif
2394+
2395 cbq_destroy_class(cl);
2396 }
2397- end_bh_atomic();
2398- return;
2399 }
2400
2401 static int
2402@@ -1802,7 +1827,7 @@
2403 }
2404
2405 /* Change class parameters */
2406- start_bh_atomic();
2407+ sch_tree_lock(sch);
2408
2409 if (cl->next_alive != NULL)
2410 cbq_deactivate_class(cl);
2411@@ -1834,7 +1859,7 @@
2412 if (cl->q->q.qlen)
2413 cbq_activate_class(cl);
2414
2415- end_bh_atomic();
2416+ sch_tree_unlock(sch);
2417
2418 #ifdef CONFIG_NET_ESTIMATOR
2419 if (tca[TCA_RATE-1]) {
2420@@ -1901,7 +1926,7 @@
2421 cl->quantum = cl->allot;
2422 cl->weight = cl->R_tab->rate.rate;
2423
2424- start_bh_atomic();
2425+ sch_tree_lock(sch);
2426 cbq_link_class(cl);
2427 cl->borrow = cl->tparent;
2428 if (cl->tparent != &q->link)
2429@@ -1925,7 +1950,7 @@
2430 #endif
2431 if (tb[TCA_CBQ_FOPT-1])
2432 cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
2433- end_bh_atomic();
2434+ sch_tree_unlock(sch);
2435
2436 #ifdef CONFIG_NET_ESTIMATOR
2437 if (tca[TCA_RATE-1])
2438@@ -1948,7 +1973,7 @@
2439 if (cl->filters || cl->children || cl == &q->link)
2440 return -EBUSY;
2441
2442- start_bh_atomic();
2443+ sch_tree_lock(sch);
2444
2445 if (cl->next_alive)
2446 cbq_deactivate_class(cl);
2447@@ -1970,12 +1995,11 @@
2448 cbq_sync_defmap(cl);
2449
2450 cbq_rmprio(q, cl);
2451+ sch_tree_unlock(sch);
2452
2453 if (--cl->refcnt == 0)
2454 cbq_destroy_class(cl);
2455
2456- end_bh_atomic();
2457-
2458 return 0;
2459 }
2460
2461@@ -2052,9 +2076,7 @@
2462 cbq_bind_filter,
2463 cbq_unbind_filter,
2464
2465-#ifdef CONFIG_RTNETLINK
2466 cbq_dump_class,
2467-#endif
2468 };
2469
2470 struct Qdisc_ops cbq_qdisc_ops =
2471@@ -2074,9 +2096,7 @@
2472 cbq_destroy,
2473 NULL /* cbq_change */,
2474
2475-#ifdef CONFIG_RTNETLINK
2476 cbq_dump,
2477-#endif
2478 };
2479
2480 #ifdef MODULE
2481@@ -2090,3 +2110,4 @@
2482 unregister_qdisc(&cbq_qdisc_ops);
2483 }
2484 #endif
2485+MODULE_LICENSE("GPL");
2486diff -urN ../v2.2.21/linux/net/sched/sch_csz.c linux/net/sched/sch_csz.c
2487--- ../v2.2.21/linux/net/sched/sch_csz.c Sat Oct 21 12:10:47 2000
2488+++ linux/net/sched/sch_csz.c Thu Aug 1 23:53:01 2002
2489@@ -477,7 +477,7 @@
2490 if (this->q.qlen >= this->limit || this->L_tab == NULL) {
2491 sch->stats.drops++;
2492 kfree_skb(skb);
2493- return 0;
2494+ return NET_XMIT_DROP;
2495 }
2496
2497 R = csz_update(sch);
2498@@ -505,7 +505,7 @@
2499 sch->q.qlen++;
2500 sch->stats.bytes += skb->len;
2501 sch->stats.packets++;
2502- return 1;
2503+ return 0;
2504 }
2505
2506 static __inline__ struct sk_buff *
2507@@ -795,7 +795,6 @@
2508 return 0;
2509 }
2510
2511-#ifdef CONFIG_RTNETLINK
2512 static int csz_dump(struct Qdisc *sch, struct sk_buff *skb)
2513 {
2514 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
2515@@ -817,8 +816,6 @@
2516 skb_trim(skb, b - skb->data);
2517 return -1;
2518 }
2519-#endif
2520-
2521
2522 static int csz_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,
2523 struct Qdisc **old)
2524@@ -885,7 +882,7 @@
2525
2526 a = &q->flow[cl];
2527
2528- start_bh_atomic();
2529+ sch_dev_queue_lock(sch->dev);
2530 #if 0
2531 a->rate_log = copt->rate_log;
2532 #endif
2533@@ -899,7 +896,7 @@
2534 if (tb[TCA_CSZ_RTAB-1])
2535 memcpy(a->L_tab, RTA_DATA(tb[TCA_CSZ_RTAB-1]), 1024);
2536
2537- end_bh_atomic();
2538+ sch_dev_queue_unlock(sch->dev);
2539 return 0;
2540 }
2541 /* NI */
2542@@ -920,19 +917,18 @@
2543
2544 a = &q->flow[cl];
2545
2546- start_bh_atomic();
2547+ sch_dev_queue_lock(sch->dev);
2548 a->fprev->fnext = a->fnext;
2549 a->fnext->fprev = a->fprev;
2550 a->sprev->snext = a->snext;
2551 a->snext->sprev = a->sprev;
2552 a->start = a->finish = 0;
2553 kfree(xchg(&q->flow[cl].L_tab, NULL));
2554- end_bh_atomic();
2555+ sch_dev_queue_unlock(sch->dev);
2556
2557 return 0;
2558 }
2559
2560-#ifdef CONFIG_RTNETLINK
2561 static int csz_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm)
2562 {
2563 struct csz_sched_data *q = (struct csz_sched_data *)sch->data;
2564@@ -978,7 +974,6 @@
2565 skb_trim(skb, b - skb->data);
2566 return -1;
2567 }
2568-#endif
2569
2570 static void csz_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2571 {
2572@@ -1030,9 +1025,7 @@
2573 csz_bind,
2574 csz_put,
2575
2576-#ifdef CONFIG_RTNETLINK
2577 csz_dump_class,
2578-#endif
2579 };
2580
2581 struct Qdisc_ops csz_qdisc_ops =
2582@@ -1052,9 +1045,7 @@
2583 csz_destroy,
2584 NULL /* csz_change */,
2585
2586-#ifdef CONFIG_RTNETLINK
2587 csz_dump,
2588-#endif
2589 };
2590
2591
2592@@ -1069,3 +1060,4 @@
2593 unregister_qdisc(&csz_qdisc_ops);
2594 }
2595 #endif
2596+MODULE_LICENSE("GPL");
2597diff -urN ../v2.2.21/linux/net/sched/sch_dsmark.c linux/net/sched/sch_dsmark.c
2598--- ../v2.2.21/linux/net/sched/sch_dsmark.c Thu Jan 1 00:00:00 1970
2599+++ linux/net/sched/sch_dsmark.c Sun Mar 31 03:18:30 2002
2600@@ -0,0 +1,484 @@
2601+/* net/sched/sch_dsmark.c - Differentiated Services field marker */
2602+
2603+/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
2604+
2605+
2606+#include <linux/config.h>
2607+#include <linux/module.h>
2608+#include <linux/types.h>
2609+#include <linux/string.h>
2610+#include <linux/errno.h>
2611+#include <linux/skbuff.h>
2612+#include <linux/netdevice.h> /* for pkt_sched */
2613+#include <linux/rtnetlink.h>
2614+#include <net/pkt_sched.h>
2615+#include <net/dsfield.h>
2616+#include <asm/byteorder.h>
2617+
2618+
2619+#if 1 /* control */
2620+#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
2621+#else
2622+#define DPRINTK(format,args...)
2623+#endif
2624+
2625+#if 0 /* data */
2626+#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
2627+#else
2628+#define D2PRINTK(format,args...)
2629+#endif
2630+
2631+
2632+#define PRIV(sch) ((struct dsmark_qdisc_data *) (sch)->data)
2633+
2634+
2635+/*
2636+ * classid class marking
2637+ * ------- ----- -------
2638+ * n/a 0 n/a
2639+ * x:0 1 use entry [0]
2640+ * ... ... ...
2641+ * x:y y>0 y+1 use entry [y]
2642+ * ... ... ...
2643+ * x:indices-1 indices use entry [indices-1]
2644+ * ... ... ...
2645+ * x:y y+1 use entry [y & (indices-1)]
2646+ * ... ... ...
2647+ * 0xffff 0x10000 use entry [indices-1]
2648+ */
2649+
2650+
2651+#define NO_DEFAULT_INDEX (1 << 16)
2652+
2653+struct dsmark_qdisc_data {
2654+ struct Qdisc *q;
2655+ struct tcf_proto *filter_list;
2656+ __u8 *mask; /* "owns" the array */
2657+ __u8 *value;
2658+ __u16 indices;
2659+ __u32 default_index; /* index range is 0...0xffff */
2660+ int set_tc_index;
2661+};
2662+
2663+
2664+/* ------------------------- Class/flow operations ------------------------- */
2665+
2666+
2667+static int dsmark_graft(struct Qdisc *sch,unsigned long arg,
2668+ struct Qdisc *new,struct Qdisc **old)
2669+{
2670+ struct dsmark_qdisc_data *p = PRIV(sch);
2671+
2672+ DPRINTK("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",sch,p,new,
2673+ old);
2674+ if (!new)
2675+ new = &noop_qdisc;
2676+ sch_tree_lock(sch);
2677+ *old = xchg(&p->q,new);
2678+ if (*old)
2679+ qdisc_reset(*old);
2680+ sch_tree_unlock(sch); /* @@@ move up ? */
2681+ return 0;
2682+}
2683+
2684+
2685+static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
2686+{
2687+ struct dsmark_qdisc_data *p = PRIV(sch);
2688+
2689+ return p->q;
2690+}
2691+
2692+
2693+static unsigned long dsmark_get(struct Qdisc *sch,u32 classid)
2694+{
2695+ struct dsmark_qdisc_data *p __attribute__((unused)) = PRIV(sch);
2696+
2697+ DPRINTK("dsmark_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid);
2698+ return TC_H_MIN(classid)+1;
2699+}
2700+
2701+
2702+static unsigned long dsmark_bind_filter(struct Qdisc *sch,
2703+ unsigned long parent, u32 classid)
2704+{
2705+ return dsmark_get(sch,classid);
2706+}
2707+
2708+
2709+static void dsmark_put(struct Qdisc *sch, unsigned long cl)
2710+{
2711+}
2712+
2713+
2714+static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
2715+ struct rtattr **tca, unsigned long *arg)
2716+{
2717+ struct dsmark_qdisc_data *p = PRIV(sch);
2718+ struct rtattr *opt = tca[TCA_OPTIONS-1];
2719+ struct rtattr *tb[TCA_DSMARK_MAX];
2720+
2721+ DPRINTK("dsmark_change(sch %p,[qdisc %p],classid %x,parent %x),"
2722+ "arg 0x%lx\n",sch,p,classid,parent,*arg);
2723+ if (*arg > p->indices)
2724+ return -ENOENT;
2725+ if (!opt || rtattr_parse(tb, TCA_DSMARK_MAX, RTA_DATA(opt),
2726+ RTA_PAYLOAD(opt)))
2727+ return -EINVAL;
2728+ if (tb[TCA_DSMARK_MASK-1]) {
2729+ if (!RTA_PAYLOAD(tb[TCA_DSMARK_MASK-1]))
2730+ return -EINVAL;
2731+ p->mask[*arg-1] = *(__u8 *) RTA_DATA(tb[TCA_DSMARK_MASK-1]);
2732+ }
2733+ if (tb[TCA_DSMARK_VALUE-1]) {
2734+ if (!RTA_PAYLOAD(tb[TCA_DSMARK_VALUE-1]))
2735+ return -EINVAL;
2736+ p->value[*arg-1] = *(__u8 *) RTA_DATA(tb[TCA_DSMARK_VALUE-1]);
2737+ }
2738+ return 0;
2739+}
2740+
2741+
2742+static int dsmark_delete(struct Qdisc *sch,unsigned long arg)
2743+{
2744+ struct dsmark_qdisc_data *p = PRIV(sch);
2745+
2746+ if (!arg || arg > p->indices)
2747+ return -EINVAL;
2748+ p->mask[arg-1] = 0xff;
2749+ p->value[arg-1] = 0;
2750+ return 0;
2751+}
2752+
2753+
2754+static void dsmark_walk(struct Qdisc *sch,struct qdisc_walker *walker)
2755+{
2756+ struct dsmark_qdisc_data *p = PRIV(sch);
2757+ int i;
2758+
2759+ DPRINTK("dsmark_walk(sch %p,[qdisc %p],walker %p)\n",sch,p,walker);
2760+ if (walker->stop)
2761+ return;
2762+ for (i = 0; i < p->indices; i++) {
2763+ if (p->mask[i] == 0xff && !p->value[i])
2764+ continue;
2765+ if (walker->count >= walker->skip) {
2766+ if (walker->fn(sch, i+1, walker) < 0) {
2767+ walker->stop = 1;
2768+ break;
2769+ }
2770+ }
2771+ walker->count++;
2772+ }
2773+}
2774+
2775+
2776+static struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,unsigned long cl)
2777+{
2778+ struct dsmark_qdisc_data *p = PRIV(sch);
2779+
2780+ return &p->filter_list;
2781+}
2782+
2783+
2784+/* --------------------------- Qdisc operations ---------------------------- */
2785+
2786+
2787+static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
2788+{
2789+ struct dsmark_qdisc_data *p = PRIV(sch);
2790+ struct tcf_result res;
2791+ int result;
2792+ int ret = NET_XMIT_POLICED;
2793+
2794+ D2PRINTK("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
2795+ if (p->set_tc_index) {
2796+ switch (skb->protocol) {
2797+ case __constant_htons(ETH_P_IP):
2798+ skb->tc_index = ipv4_get_dsfield(skb->nh.iph);
2799+ break;
2800+ case __constant_htons(ETH_P_IPV6):
2801+ skb->tc_index = ipv6_get_dsfield(skb->nh.ipv6h);
2802+ break;
2803+ default:
2804+ skb->tc_index = 0;
2805+ break;
2806+ };
2807+ }
2808+ result = TC_POLICE_OK; /* be nice to gcc */
2809+ if (TC_H_MAJ(skb->priority) == sch->handle) {
2810+ skb->tc_index = TC_H_MIN(skb->priority);
2811+ } else {
2812+ result = tc_classify(skb,p->filter_list,&res);
2813+ D2PRINTK("result %d class 0x%04x\n",result,res.classid);
2814+ switch (result) {
2815+#ifdef CONFIG_NET_CLS_POLICE
2816+ case TC_POLICE_SHOT:
2817+ kfree_skb(skb);
2818+ break;
2819+#if 0
2820+ case TC_POLICE_RECLASSIFY:
2821+ /* FIXME: what to do here ??? */
2822+#endif
2823+#endif
2824+ case TC_POLICE_OK:
2825+ skb->tc_index = TC_H_MIN(res.classid);
2826+ break;
2827+ case TC_POLICE_UNSPEC:
2828+ /* fall through */
2829+ default:
2830+ if (p->default_index != NO_DEFAULT_INDEX)
2831+ skb->tc_index = p->default_index;
2832+ break;
2833+ };
2834+ }
2835+ if (
2836+#ifdef CONFIG_NET_CLS_POLICE
2837+ result == TC_POLICE_SHOT ||
2838+#endif
2839+
2840+ ((ret = p->q->enqueue(skb,p->q)) != 0)) {
2841+ sch->stats.drops++;
2842+ return ret;
2843+ }
2844+ sch->stats.bytes += skb->len;
2845+ sch->stats.packets++;
2846+ sch->q.qlen++;
2847+ return ret;
2848+}
2849+
2850+
2851+static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
2852+{
2853+ struct dsmark_qdisc_data *p = PRIV(sch);
2854+ struct sk_buff *skb;
2855+ int index;
2856+
2857+ D2PRINTK("dsmark_dequeue(sch %p,[qdisc %p])\n",sch,p);
2858+ skb = p->q->ops->dequeue(p->q);
2859+ if (!skb)
2860+ return NULL;
2861+ sch->q.qlen--;
2862+ index = skb->tc_index & (p->indices-1);
2863+ D2PRINTK("index %d->%d\n",skb->tc_index,index);
2864+ switch (skb->protocol) {
2865+ case __constant_htons(ETH_P_IP):
2866+ ipv4_change_dsfield(skb->nh.iph,
2867+ p->mask[index],p->value[index]);
2868+ break;
2869+ case __constant_htons(ETH_P_IPV6):
2870+ ipv6_change_dsfield(skb->nh.ipv6h,
2871+ p->mask[index],p->value[index]);
2872+ break;
2873+ default:
2874+ /*
2875+ * Only complain if a change was actually attempted.
2876+ * This way, we can send non-IP traffic through dsmark
2877+ * and don't need yet another qdisc as a bypass.
2878+ */
2879+ if (p->mask[index] != 0xff || p->value[index])
2880+ printk(KERN_WARNING "dsmark_dequeue: "
2881+ "unsupported protocol %d\n",
2882+ htons(skb->protocol));
2883+ break;
2884+ };
2885+ return skb;
2886+}
2887+
2888+
2889+static int dsmark_requeue(struct sk_buff *skb,struct Qdisc *sch)
2890+{
2891+ int ret;
2892+ struct dsmark_qdisc_data *p = PRIV(sch);
2893+
2894+ D2PRINTK("dsmark_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
2895+ if ((ret = p->q->ops->requeue(skb, p->q)) == 0) {
2896+ sch->q.qlen++;
2897+ return 0;
2898+ }
2899+ sch->stats.drops++;
2900+ return ret;
2901+}
2902+
2903+
2904+static int dsmark_drop(struct Qdisc *sch)
2905+{
2906+ struct dsmark_qdisc_data *p = PRIV(sch);
2907+
2908+ DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n",sch,p);
2909+ if (!p->q->ops->drop)
2910+ return 0;
2911+ if (!p->q->ops->drop(p->q))
2912+ return 0;
2913+ sch->q.qlen--;
2914+ return 1;
2915+}
2916+
2917+
2918+int dsmark_init(struct Qdisc *sch,struct rtattr *opt)
2919+{
2920+ struct dsmark_qdisc_data *p = PRIV(sch);
2921+ struct rtattr *tb[TCA_DSMARK_MAX];
2922+ __u16 tmp;
2923+
2924+ DPRINTK("dsmark_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt);
2925+ if (rtattr_parse(tb,TCA_DSMARK_MAX,RTA_DATA(opt),RTA_PAYLOAD(opt)) < 0 ||
2926+ !tb[TCA_DSMARK_INDICES-1] ||
2927+ RTA_PAYLOAD(tb[TCA_DSMARK_INDICES-1]) < sizeof(__u16))
2928+ return -EINVAL;
2929+ memset(p,0,sizeof(*p));
2930+ p->filter_list = NULL;
2931+ p->indices = *(__u16 *) RTA_DATA(tb[TCA_DSMARK_INDICES-1]);
2932+ if (!p->indices)
2933+ return -EINVAL;
2934+ for (tmp = p->indices; tmp != 1; tmp >>= 1) {
2935+ if (tmp & 1)
2936+ return -EINVAL;
2937+ }
2938+ p->default_index = NO_DEFAULT_INDEX;
2939+ if (tb[TCA_DSMARK_DEFAULT_INDEX-1]) {
2940+ if (RTA_PAYLOAD(tb[TCA_DSMARK_DEFAULT_INDEX-1]) < sizeof(__u16))
2941+ return -EINVAL;
2942+ p->default_index =
2943+ *(__u16 *) RTA_DATA(tb[TCA_DSMARK_DEFAULT_INDEX-1]);
2944+ }
2945+ p->set_tc_index = !!tb[TCA_DSMARK_SET_TC_INDEX-1];
2946+ p->mask = kmalloc(p->indices*2,GFP_KERNEL);
2947+ if (!p->mask)
2948+ return -ENOMEM;
2949+ p->value = p->mask+p->indices;
2950+ memset(p->mask,0xff,p->indices);
2951+ memset(p->value,0,p->indices);
2952+ if (!(p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
2953+ p->q = &noop_qdisc;
2954+ DPRINTK("dsmark_init: qdisc %p\n",&p->q);
2955+ MOD_INC_USE_COUNT;
2956+ return 0;
2957+}
2958+
2959+
2960+static void dsmark_reset(struct Qdisc *sch)
2961+{
2962+ struct dsmark_qdisc_data *p = PRIV(sch);
2963+
2964+ DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n",sch,p);
2965+ qdisc_reset(p->q);
2966+ sch->q.qlen = 0;
2967+}
2968+
2969+
2970+static void dsmark_destroy(struct Qdisc *sch)
2971+{
2972+ struct dsmark_qdisc_data *p = PRIV(sch);
2973+ struct tcf_proto *tp;
2974+
2975+ DPRINTK("dsmark_destroy(sch %p,[qdisc %p])\n",sch,p);
2976+ while (p->filter_list) {
2977+ tp = p->filter_list;
2978+ p->filter_list = tp->next;
2979+ tp->ops->destroy(tp);
2980+ }
2981+ qdisc_destroy(p->q);
2982+ p->q = &noop_qdisc;
2983+ kfree(p->mask);
2984+ MOD_DEC_USE_COUNT;
2985+}
2986+
2987+
2988+static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
2989+ struct sk_buff *skb, struct tcmsg *tcm)
2990+{
2991+ struct dsmark_qdisc_data *p = PRIV(sch);
2992+ unsigned char *b = skb->tail;
2993+ struct rtattr *rta;
2994+
2995+ DPRINTK("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n",sch,p,cl);
2996+ if (!cl || cl > p->indices)
2997+ return -EINVAL;
2998+ tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle),cl-1);
2999+ rta = (struct rtattr *) b;
3000+ RTA_PUT(skb,TCA_OPTIONS,0,NULL);
3001+ RTA_PUT(skb,TCA_DSMARK_MASK,1,&p->mask[cl-1]);
3002+ RTA_PUT(skb,TCA_DSMARK_VALUE,1,&p->value[cl-1]);
3003+ rta->rta_len = skb->tail-b;
3004+ return skb->len;
3005+
3006+rtattr_failure:
3007+ skb_trim(skb,b-skb->data);
3008+ return -1;
3009+}
3010+
3011+static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
3012+{
3013+ struct dsmark_qdisc_data *p = PRIV(sch);
3014+ unsigned char *b = skb->tail;
3015+ struct rtattr *rta;
3016+
3017+ rta = (struct rtattr *) b;
3018+ RTA_PUT(skb,TCA_OPTIONS,0,NULL);
3019+ RTA_PUT(skb,TCA_DSMARK_INDICES,sizeof(__u16),&p->indices);
3020+ if (p->default_index != NO_DEFAULT_INDEX) {
3021+ __u16 tmp = p->default_index;
3022+
3023+ RTA_PUT(skb,TCA_DSMARK_DEFAULT_INDEX, sizeof(__u16), &tmp);
3024+ }
3025+ if (p->set_tc_index)
3026+ RTA_PUT(skb, TCA_DSMARK_SET_TC_INDEX, 0, NULL);
3027+ rta->rta_len = skb->tail-b;
3028+ return skb->len;
3029+
3030+rtattr_failure:
3031+ skb_trim(skb,b-skb->data);
3032+ return -1;
3033+}
3034+
3035+static struct Qdisc_class_ops dsmark_class_ops =
3036+{
3037+ dsmark_graft, /* graft */
3038+ dsmark_leaf, /* leaf */
3039+ dsmark_get, /* get */
3040+ dsmark_put, /* put */
3041+ dsmark_change, /* change */
3042+ dsmark_delete, /* delete */
3043+ dsmark_walk, /* walk */
3044+
3045+ dsmark_find_tcf, /* tcf_chain */
3046+ dsmark_bind_filter, /* bind_tcf */
3047+ dsmark_put, /* unbind_tcf */
3048+
3049+ dsmark_dump_class, /* dump */
3050+};
3051+
3052+struct Qdisc_ops dsmark_qdisc_ops =
3053+{
3054+ NULL, /* next */
3055+ &dsmark_class_ops, /* cl_ops */
3056+ "dsmark",
3057+ sizeof(struct dsmark_qdisc_data),
3058+
3059+ dsmark_enqueue, /* enqueue */
3060+ dsmark_dequeue, /* dequeue */
3061+ dsmark_requeue, /* requeue */
3062+ dsmark_drop, /* drop */
3063+
3064+ dsmark_init, /* init */
3065+ dsmark_reset, /* reset */
3066+ dsmark_destroy, /* destroy */
3067+ NULL, /* change */
3068+
3069+ dsmark_dump /* dump */
3070+};
3071+
3072+#ifdef MODULE
3073+int init_module(void)
3074+{
3075+ return register_qdisc(&dsmark_qdisc_ops);
3076+}
3077+
3078+
3079+void cleanup_module(void)
3080+{
3081+ unregister_qdisc(&dsmark_qdisc_ops);
3082+}
3083+#endif
3084+MODULE_LICENSE("GPL");
3085diff -urN ../v2.2.21/linux/net/sched/sch_fifo.c linux/net/sched/sch_fifo.c
3086--- ../v2.2.21/linux/net/sched/sch_fifo.c Sat Oct 21 12:10:47 2000
3087+++ linux/net/sched/sch_fifo.c Sun Mar 31 03:18:30 2002
3088@@ -51,14 +51,14 @@
3089 sch->stats.backlog += skb->len;
3090 sch->stats.bytes += skb->len;
3091 sch->stats.packets++;
3092- return 1;
3093+ return 0;
3094 }
3095 sch->stats.drops++;
3096 #ifdef CONFIG_NET_CLS_POLICE
3097 if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
3098 #endif
3099 kfree_skb(skb);
3100- return 0;
3101+ return NET_XMIT_DROP;
3102 }
3103
3104 static int
3105@@ -66,7 +66,7 @@
3106 {
3107 __skb_queue_head(&sch->q, skb);
3108 sch->stats.backlog += skb->len;
3109- return 1;
3110+ return 0;
3111 }
3112
3113 static struct sk_buff *
3114@@ -110,21 +110,21 @@
3115 __skb_queue_tail(&sch->q, skb);
3116 sch->stats.bytes += skb->len;
3117 sch->stats.packets++;
3118- return 1;
3119+ return 0;
3120 }
3121 sch->stats.drops++;
3122 #ifdef CONFIG_NET_CLS_POLICE
3123 if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
3124 #endif
3125 kfree_skb(skb);
3126- return 0;
3127+ return NET_XMIT_DROP;
3128 }
3129
3130 static int
3131 pfifo_requeue(struct sk_buff *skb, struct Qdisc* sch)
3132 {
3133 __skb_queue_head(&sch->q, skb);
3134- return 1;
3135+ return 0;
3136 }
3137
3138
3139@@ -152,7 +152,6 @@
3140 return 0;
3141 }
3142
3143-#ifdef CONFIG_RTNETLINK
3144 static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
3145 {
3146 struct fifo_sched_data *q = (void*)sch->data;
3147@@ -168,7 +167,6 @@
3148 skb_trim(skb, b - skb->data);
3149 return -1;
3150 }
3151-#endif
3152
3153 struct Qdisc_ops pfifo_qdisc_ops =
3154 {
3155@@ -187,9 +185,7 @@
3156 NULL,
3157 fifo_init,
3158
3159-#ifdef CONFIG_RTNETLINK
3160 fifo_dump,
3161-#endif
3162 };
3163
3164 struct Qdisc_ops bfifo_qdisc_ops =
3165@@ -208,7 +204,5 @@
3166 fifo_reset,
3167 NULL,
3168 fifo_init,
3169-#ifdef CONFIG_RTNETLINK
3170 fifo_dump,
3171-#endif
3172 };
3173diff -urN ../v2.2.21/linux/net/sched/sch_generic.c linux/net/sched/sch_generic.c
3174--- ../v2.2.21/linux/net/sched/sch_generic.c Sat Oct 21 12:11:45 2000
3175+++ linux/net/sched/sch_generic.c Wed Aug 7 23:26:44 2002
3176@@ -7,6 +7,8 @@
3177 * 2 of the License, or (at your option) any later version.
3178 *
3179 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
3180+ * Jamal Hadi Salim, <hadi@nortelnetworks.com>
3181+ * - Ingress support
3182 */
3183
3184 #include <asm/uaccess.h>
3185@@ -30,8 +32,6 @@
3186 #include <net/sock.h>
3187 #include <net/pkt_sched.h>
3188
3189-#define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x ") failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
3190-
3191 /* Main transmission queue. */
3192
3193 struct Qdisc_head qdisc_head = { &qdisc_head };
3194@@ -149,7 +149,7 @@
3195 noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
3196 {
3197 kfree_skb(skb);
3198- return 0;
3199+ return NET_XMIT_CN;
3200 }
3201
3202 static struct sk_buff *
3203@@ -164,7 +164,7 @@
3204 if (net_ratelimit())
3205 printk(KERN_DEBUG "%s deferred output. It is buggy.\n", skb->dev->name);
3206 kfree_skb(skb);
3207- return 0;
3208+ return NET_XMIT_CN;
3209 }
3210
3211 struct Qdisc_ops noop_qdisc_ops =
3212@@ -206,7 +206,7 @@
3213 {
3214 { NULL },
3215 NULL,
3216- NULL,
3217+ noop_dequeue,
3218 TCQ_F_BUILTIN,
3219 &noqueue_qdisc_ops,
3220 };
3221@@ -227,14 +227,14 @@
3222 list = ((struct sk_buff_head*)qdisc->data) +
3223 prio2band[skb->priority&TC_PRIO_MAX];
3224
3225- if (list->qlen <= skb->dev->tx_queue_len) {
3226+ if (list->qlen <= qdisc->dev->tx_queue_len) {
3227 __skb_queue_tail(list, skb);
3228 qdisc->q.qlen++;
3229- return 1;
3230+ return 0;
3231 }
3232 qdisc->stats.drops++;
3233 kfree_skb(skb);
3234- return 0;
3235+ return NET_XMIT_DROP;
3236 }
3237
3238 static struct sk_buff *
3239@@ -264,7 +264,7 @@
3240
3241 __skb_queue_head(list, skb);
3242 qdisc->q.qlen++;
3243- return 1;
3244+ return 0;
3245 }
3246
3247 static void
3248@@ -333,39 +333,39 @@
3249 void qdisc_reset(struct Qdisc *qdisc)
3250 {
3251 struct Qdisc_ops *ops = qdisc->ops;
3252- start_bh_atomic();
3253+
3254 if (ops->reset)
3255 ops->reset(qdisc);
3256- end_bh_atomic();
3257 }
3258
3259 void qdisc_destroy(struct Qdisc *qdisc)
3260 {
3261 struct Qdisc_ops *ops = qdisc->ops;
3262+ struct device *dev;
3263
3264 if (!atomic_dec_and_test(&qdisc->refcnt))
3265 return;
3266
3267+ dev = qdisc->dev;
3268+
3269 #ifdef CONFIG_NET_SCHED
3270- if (qdisc->dev) {
3271+ if (dev) {
3272 struct Qdisc *q, **qp;
3273- for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next)
3274+ for (qp = &qdisc->dev->qdisc_list; (q=*qp) != NULL; qp = &q->next) {
3275 if (q == qdisc) {
3276 *qp = q->next;
3277- q->next = NULL;
3278 break;
3279 }
3280+ }
3281 }
3282 #ifdef CONFIG_NET_ESTIMATOR
3283 qdisc_kill_estimator(&qdisc->stats);
3284 #endif
3285 #endif
3286- start_bh_atomic();
3287 if (ops->reset)
3288 ops->reset(qdisc);
3289 if (ops->destroy)
3290 ops->destroy(qdisc);
3291- end_bh_atomic();
3292 if (!(qdisc->flags&TCQ_F_BUILTIN))
3293 kfree(qdisc);
3294 }
3295@@ -380,19 +380,20 @@
3296 */
3297
3298 if (dev->qdisc_sleeping == &noop_qdisc) {
3299+ struct Qdisc *qdisc;
3300 if (dev->tx_queue_len) {
3301- struct Qdisc *qdisc;
3302 qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
3303 if (qdisc == NULL) {
3304 printk(KERN_INFO "%s: activation failed\n", dev->name);
3305 return;
3306 }
3307- dev->qdisc_sleeping = qdisc;
3308- } else
3309- dev->qdisc_sleeping = &noqueue_qdisc;
3310+ } else {
3311+ qdisc = &noqueue_qdisc;
3312+ }
3313+ dev->qdisc_sleeping = qdisc;
3314 }
3315
3316- start_bh_atomic();
3317+ sch_dev_queue_lock(dev);
3318 if ((dev->qdisc = dev->qdisc_sleeping) != &noqueue_qdisc) {
3319 dev->qdisc->tx_timeo = 5*HZ;
3320 dev->qdisc->tx_last = jiffies - dev->qdisc->tx_timeo;
3321@@ -400,16 +401,17 @@
3322 dev_watchdog.expires = jiffies + 5*HZ;
3323 add_timer(&dev_watchdog);
3324 }
3325- end_bh_atomic();
3326+ sch_dev_queue_unlock(dev);
3327 }
3328
3329 void dev_deactivate(struct device *dev)
3330 {
3331 struct Qdisc *qdisc;
3332
3333- start_bh_atomic();
3334+ sch_dev_queue_lock(dev);
3335
3336- qdisc = xchg(&dev->qdisc, &noop_qdisc);
3337+ qdisc = dev->qdisc;
3338+ dev->qdisc = &noop_qdisc;
3339
3340 qdisc_reset(qdisc);
3341
3342@@ -425,7 +427,7 @@
3343 }
3344 }
3345
3346- end_bh_atomic();
3347+ sch_dev_queue_unlock(dev);
3348 }
3349
3350 void dev_init_scheduler(struct device *dev)
3351@@ -439,13 +441,16 @@
3352 {
3353 struct Qdisc *qdisc;
3354
3355- start_bh_atomic();
3356+ sch_dev_queue_lock(dev);
3357 qdisc = dev->qdisc_sleeping;
3358 dev->qdisc = &noop_qdisc;
3359 dev->qdisc_sleeping = &noop_qdisc;
3360 qdisc_destroy(qdisc);
3361+ if ((qdisc = dev->qdisc_ingress) != NULL) {
3362+ dev->qdisc_ingress = NULL;
3363+ qdisc_destroy(qdisc);
3364+ }
3365 BUG_TRAP(dev->qdisc_list == NULL);
3366 dev->qdisc_list = NULL;
3367- end_bh_atomic();
3368+ sch_dev_queue_unlock(dev);
3369 }
3370-
3371diff -urN ../v2.2.21/linux/net/sched/sch_gred.c linux/net/sched/sch_gred.c
3372--- ../v2.2.21/linux/net/sched/sch_gred.c Thu Jan 1 00:00:00 1970
3373+++ linux/net/sched/sch_gred.c Fri Jul 5 22:06:27 2002
3374@@ -0,0 +1,633 @@
3375+/*
3376+ * net/sched/sch_gred.c Generic Random Early Detection queue.
3377+ *
3378+ *
3379+ * This program is free software; you can redistribute it and/or
3380+ * modify it under the terms of the GNU General Public License
3381+ * as published by the Free Software Foundation; either version
3382+ * 2 of the License, or (at your option) any later version.
3383+ *
3384+ * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
3385+ *
3386+ * 991129: - Bug fix with grio mode
3387+ * - a better sing. AvgQ mode with Grio(WRED)
3388+ * - A finer grained VQ dequeue based on sugestion
3389+ * from Ren Liu
3390+ * - More error checks
3391+ *
3392+ *
3393+ *
3394+ * For all the glorious comments look at Alexey's sch_red.c
3395+ */
3396+
3397+#include <linux/config.h>
3398+#include <linux/module.h>
3399+#include <asm/uaccess.h>
3400+#include <asm/system.h>
3401+#include <asm/bitops.h>
3402+#include <linux/types.h>
3403+#include <linux/kernel.h>
3404+#include <linux/sched.h>
3405+#include <linux/string.h>
3406+#include <linux/mm.h>
3407+#include <linux/socket.h>
3408+#include <linux/sockios.h>
3409+#include <linux/in.h>
3410+#include <linux/errno.h>
3411+#include <linux/interrupt.h>
3412+#include <linux/if_ether.h>
3413+#include <linux/inet.h>
3414+#include <linux/netdevice.h>
3415+#include <linux/etherdevice.h>
3416+#include <linux/notifier.h>
3417+#include <net/ip.h>
3418+#include <net/route.h>
3419+#include <linux/skbuff.h>
3420+#include <net/sock.h>
3421+#include <net/pkt_sched.h>
3422+
3423+#if 1 /* control */
3424+#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
3425+#else
3426+#define DPRINTK(format,args...)
3427+#endif
3428+
3429+#if 0 /* data */
3430+#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
3431+#else
3432+#define D2PRINTK(format,args...)
3433+#endif
3434+
3435+struct gred_sched_data;
3436+struct gred_sched;
3437+
3438+struct gred_sched_data
3439+{
3440+/* Parameters */
3441+ u32 limit; /* HARD maximal queue length */
3442+ u32 qth_min; /* Min average length threshold: A scaled */
3443+ u32 qth_max; /* Max average length threshold: A scaled */
3444+ u32 DP; /* the drop pramaters */
3445+ char Wlog; /* log(W) */
3446+ char Plog; /* random number bits */
3447+ u32 Scell_max;
3448+ u32 Rmask;
3449+ u32 bytesin; /* bytes seen on virtualQ so far*/
3450+ u32 packetsin; /* packets seen on virtualQ so far*/
3451+ u32 backlog; /* bytes on the virtualQ */
3452+ u32 forced; /* packets dropped for exceeding limits */
3453+ u32 early; /* packets dropped as a warning */
3454+ u32 other; /* packets dropped by invoking drop() */
3455+ u32 pdrop; /* packets dropped because we exceeded physical queue limits */
3456+ char Scell_log;
3457+ u8 Stab[256];
3458+ u8 prio; /* the prio of this vq */
3459+
3460+/* Variables */
3461+ unsigned long qave; /* Average queue length: A scaled */
3462+ int qcount; /* Packets since last random number generation */
3463+ u32 qR; /* Cached random number */
3464+
3465+ psched_time_t qidlestart; /* Start of idle period */
3466+};
3467+
3468+struct gred_sched
3469+{
3470+ struct gred_sched_data *tab[MAX_DPs];
3471+ u32 DPs;
3472+ u32 def;
3473+ u8 initd;
3474+ u8 grio;
3475+ u8 eqp;
3476+};
3477+
3478+static int
3479+gred_enqueue(struct sk_buff *skb, struct Qdisc* sch)
3480+{
3481+ psched_time_t now;
3482+ struct gred_sched_data *q=NULL;
3483+ struct gred_sched *t= (struct gred_sched *)sch->data;
3484+ unsigned long qave=0;
3485+ int i=0;
3486+
3487+ if (!t->initd && skb_queue_len(&sch->q) <= sch->dev->tx_queue_len) {
3488+ D2PRINTK("NO GRED Queues setup yet! Enqueued anyway\n");
3489+ goto do_enqueue;
3490+ }
3491+
3492+
3493+ if ( ((skb->tc_index&0xf) > t->DPs) || !(q=t->tab[skb->tc_index&0xf])) {
3494+ printk("GRED: setting to default (%d)\n ",t->def);
3495+ if (!(q=t->tab[t->def])) {
3496+ DPRINTK("GRED: setting to default FAILED! dropping!! "
3497+ "(%d)\n ", t->def);
3498+ goto drop;
3499+ }
3500+ /* fix tc_index? --could be controvesial but needed for
3501+ requeueing */
3502+ skb->tc_index=(skb->tc_index&0xfffffff0) | t->def;
3503+ }
3504+
3505+ D2PRINTK("gred_enqueue virtualQ 0x%x classid %x backlog %d "
3506+ "general backlog %d\n",skb->tc_index&0xf,sch->handle,q->backlog,
3507+ sch->stats.backlog);
3508+ /* sum up all the qaves of prios <= to ours to get the new qave*/
3509+ if (!t->eqp && t->grio) {
3510+ for (i=0;i<t->DPs;i++) {
3511+ if ((!t->tab[i]) || (i==q->DP))
3512+ continue;
3513+
3514+ if ((t->tab[i]->prio < q->prio) && (PSCHED_IS_PASTPERFECT(t->tab[i]->qidlestart)))
3515+ qave +=t->tab[i]->qave;
3516+ }
3517+
3518+ }
3519+
3520+ q->packetsin++;
3521+ q->bytesin+=skb->len;
3522+
3523+ if (t->eqp && t->grio) {
3524+ qave=0;
3525+ q->qave=t->tab[t->def]->qave;
3526+ q->qidlestart=t->tab[t->def]->qidlestart;
3527+ }
3528+
3529+ if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
3530+ long us_idle;
3531+ PSCHED_GET_TIME(now);
3532+ us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max, 0);
3533+ PSCHED_SET_PASTPERFECT(q->qidlestart);
3534+
3535+ q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
3536+ } else {
3537+ if (t->eqp) {
3538+ q->qave += sch->stats.backlog - (q->qave >> q->Wlog);
3539+ } else {
3540+ q->qave += q->backlog - (q->qave >> q->Wlog);
3541+ }
3542+
3543+ }
3544+
3545+
3546+ if (t->eqp && t->grio)
3547+ t->tab[t->def]->qave=q->qave;
3548+
3549+ if ((q->qave+qave) < q->qth_min) {
3550+ q->qcount = -1;
3551+enqueue:
3552+ if (q->backlog <= q->limit) {
3553+ q->backlog += skb->len;
3554+do_enqueue:
3555+ __skb_queue_tail(&sch->q, skb);
3556+ sch->stats.backlog += skb->len;
3557+ sch->stats.bytes += skb->len;
3558+ sch->stats.packets++;
3559+ return 0;
3560+ } else {
3561+ q->pdrop++;
3562+ }
3563+
3564+drop:
3565+ kfree_skb(skb);
3566+ sch->stats.drops++;
3567+ return NET_XMIT_DROP;
3568+ }
3569+ if ((q->qave+qave) >= q->qth_max) {
3570+ q->qcount = -1;
3571+ sch->stats.overlimits++;
3572+ q->forced++;
3573+ goto drop;
3574+ }
3575+ if (++q->qcount) {
3576+ if ((((qave+q->qave) - q->qth_min)>>q->Wlog)*q->qcount < q->qR)
3577+ goto enqueue;
3578+ q->qcount = 0;
3579+ q->qR = net_random()&q->Rmask;
3580+ sch->stats.overlimits++;
3581+ q->early++;
3582+ goto drop;
3583+ }
3584+ q->qR = net_random()&q->Rmask;
3585+ goto enqueue;
3586+}
3587+
3588+static int
3589+gred_requeue(struct sk_buff *skb, struct Qdisc* sch)
3590+{
3591+ struct gred_sched_data *q;
3592+ struct gred_sched *t= (struct gred_sched *)sch->data;
3593+ q= t->tab[(skb->tc_index&0xf)];
3594+/* error checking here -- probably unnecessary */
3595+ PSCHED_SET_PASTPERFECT(q->qidlestart);
3596+
3597+ __skb_queue_head(&sch->q, skb);
3598+ sch->stats.backlog += skb->len;
3599+ q->backlog += skb->len;
3600+ return 0;
3601+}
3602+
3603+static struct sk_buff *
3604+gred_dequeue(struct Qdisc* sch)
3605+{
3606+ struct sk_buff *skb;
3607+ struct gred_sched_data *q;
3608+ struct gred_sched *t= (struct gred_sched *)sch->data;
3609+
3610+ skb = __skb_dequeue(&sch->q);
3611+ if (skb) {
3612+ sch->stats.backlog -= skb->len;
3613+ q= t->tab[(skb->tc_index&0xf)];
3614+ if (q) {
3615+ q->backlog -= skb->len;
3616+ if (!q->backlog && !t->eqp)
3617+ PSCHED_GET_TIME(q->qidlestart);
3618+ } else {
3619+ D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
3620+ }
3621+ return skb;
3622+ }
3623+
3624+ if (t->eqp) {
3625+ q= t->tab[t->def];
3626+ if (!q)
3627+ D2PRINTK("no default VQ set: Results will be "
3628+ "screwed up\n");
3629+ else
3630+ PSCHED_GET_TIME(q->qidlestart);
3631+ }
3632+
3633+ return NULL;
3634+}
3635+
3636+static int
3637+gred_drop(struct Qdisc* sch)
3638+{
3639+ struct sk_buff *skb;
3640+
3641+ struct gred_sched_data *q;
3642+ struct gred_sched *t= (struct gred_sched *)sch->data;
3643+
3644+ skb = __skb_dequeue_tail(&sch->q);
3645+ if (skb) {
3646+ sch->stats.backlog -= skb->len;
3647+ sch->stats.drops++;
3648+ q= t->tab[(skb->tc_index&0xf)];
3649+ if (q) {
3650+ q->backlog -= skb->len;
3651+ q->other++;
3652+ if (!q->backlog && !t->eqp)
3653+ PSCHED_GET_TIME(q->qidlestart);
3654+ } else {
3655+ D2PRINTK("gred_dequeue: skb has bad tcindex %x\n",skb->tc_index&0xf);
3656+ }
3657+
3658+ kfree_skb(skb);
3659+ return 1;
3660+ }
3661+
3662+ q=t->tab[t->def];
3663+ if (!q) {
3664+ D2PRINTK("no default VQ set: Results might be screwed up\n");
3665+ return 0;
3666+ }
3667+
3668+ PSCHED_GET_TIME(q->qidlestart);
3669+ return 0;
3670+
3671+}
3672+
3673+static void gred_reset(struct Qdisc* sch)
3674+{
3675+ int i;
3676+ struct gred_sched_data *q;
3677+ struct gred_sched *t= (struct gred_sched *)sch->data;
3678+
3679+ __skb_queue_purge(&sch->q);
3680+
3681+ sch->stats.backlog = 0;
3682+
3683+ for (i=0;i<t->DPs;i++) {
3684+ q= t->tab[i];
3685+ if (!q)
3686+ continue;
3687+ PSCHED_SET_PASTPERFECT(q->qidlestart);
3688+ q->qave = 0;
3689+ q->qcount = -1;
3690+ q->backlog = 0;
3691+ q->other=0;
3692+ q->forced=0;
3693+ q->pdrop=0;
3694+ q->early=0;
3695+ }
3696+}
3697+
3698+static int gred_change(struct Qdisc *sch, struct rtattr *opt)
3699+{
3700+ struct gred_sched *table = (struct gred_sched *)sch->data;
3701+ struct gred_sched_data *q;
3702+ struct tc_gred_qopt *ctl;
3703+ struct tc_gred_sopt *sopt;
3704+ struct rtattr *tb[TCA_GRED_STAB];
3705+ struct rtattr *tb2[TCA_GRED_STAB];
3706+ int i;
3707+
3708+ if (opt == NULL ||
3709+ rtattr_parse(tb, TCA_GRED_STAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) )
3710+ return -EINVAL;
3711+
3712+ if (tb[TCA_GRED_PARMS-1] == 0 && tb[TCA_GRED_STAB-1] == 0 &&
3713+ tb[TCA_GRED_DPS-1] != 0) {
3714+ rtattr_parse(tb2, TCA_GRED_DPS, RTA_DATA(opt),
3715+ RTA_PAYLOAD(opt));
3716+
3717+ sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]);
3718+ table->DPs=sopt->DPs;
3719+ table->def=sopt->def_DP;
3720+ table->grio=sopt->grio;
3721+ table->initd=0;
3722+ /* probably need to clear all the table DP entries as well */
3723+ MOD_INC_USE_COUNT;
3724+ return 0;
3725+ }
3726+
3727+
3728+ if (!table->DPs || tb[TCA_GRED_PARMS-1] == 0 || tb[TCA_GRED_STAB-1] == 0 ||
3729+ RTA_PAYLOAD(tb[TCA_GRED_PARMS-1]) < sizeof(*ctl) ||
3730+ RTA_PAYLOAD(tb[TCA_GRED_STAB-1]) < 256)
3731+ return -EINVAL;
3732+
3733+ ctl = RTA_DATA(tb[TCA_GRED_PARMS-1]);
3734+ if (ctl->DP > MAX_DPs-1 ) {
3735+ /* misbehaving is punished! Put in the default drop probability */
3736+ DPRINTK("\nGRED: DP %u not in the proper range fixed. New DP "
3737+ "set to default at %d\n",ctl->DP,table->def);
3738+ ctl->DP=table->def;
3739+ }
3740+
3741+ if (table->tab[ctl->DP] == NULL) {
3742+ table->tab[ctl->DP]=kmalloc(sizeof(struct gred_sched_data),
3743+ GFP_KERNEL);
3744+ if (NULL == table->tab[ctl->DP])
3745+ return -ENOMEM;
3746+ memset(table->tab[ctl->DP], 0, (sizeof(struct gred_sched_data)));
3747+ }
3748+ q= table->tab[ctl->DP];
3749+
3750+ if (table->grio) {
3751+ if (ctl->prio <=0) {
3752+ if (table->def && table->tab[table->def]) {
3753+ DPRINTK("\nGRED: DP %u does not have a prio"
3754+ "setting default to %d\n",ctl->DP,
3755+ table->tab[table->def]->prio);
3756+ q->prio=table->tab[table->def]->prio;
3757+ } else {
3758+ DPRINTK("\nGRED: DP %u does not have a prio"
3759+ " setting default to 8\n",ctl->DP);
3760+ q->prio=8;
3761+ }
3762+ } else {
3763+ q->prio=ctl->prio;
3764+ }
3765+ } else {
3766+ q->prio=8;
3767+ }
3768+
3769+
3770+ q->DP=ctl->DP;
3771+ q->Wlog = ctl->Wlog;
3772+ q->Plog = ctl->Plog;
3773+ q->limit = ctl->limit;
3774+ q->Scell_log = ctl->Scell_log;
3775+ q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
3776+ q->Scell_max = (255<<q->Scell_log);
3777+ q->qth_min = ctl->qth_min<<ctl->Wlog;
3778+ q->qth_max = ctl->qth_max<<ctl->Wlog;
3779+ q->qave=0;
3780+ q->backlog=0;
3781+ q->qcount = -1;
3782+ q->other=0;
3783+ q->forced=0;
3784+ q->pdrop=0;
3785+ q->early=0;
3786+
3787+ PSCHED_SET_PASTPERFECT(q->qidlestart);
3788+ memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
3789+
3790+ if ( table->initd && table->grio) {
3791+ /* this looks ugly but its not in the fast path */
3792+ for (i=0;i<table->DPs;i++) {
3793+ if ((!table->tab[i]) || (i==q->DP) )
3794+ continue;
3795+ if (table->tab[i]->prio == q->prio ){
3796+ /* WRED mode detected */
3797+ table->eqp=1;
3798+ break;
3799+ }
3800+ }
3801+ }
3802+
3803+ if (!table->initd) {
3804+ table->initd=1;
3805+ /*
3806+ the first entry also goes into the default until
3807+ over-written
3808+ */
3809+
3810+ if (table->tab[table->def] == NULL) {
3811+ table->tab[table->def]=
3812+ kmalloc(sizeof(struct gred_sched_data), GFP_KERNEL);
3813+ if (NULL == table->tab[table->def])
3814+ return -ENOMEM;
3815+
3816+ memset(table->tab[table->def], 0,
3817+ (sizeof(struct gred_sched_data)));
3818+ }
3819+ q= table->tab[table->def];
3820+ q->DP=table->def;
3821+ q->Wlog = ctl->Wlog;
3822+ q->Plog = ctl->Plog;
3823+ q->limit = ctl->limit;
3824+ q->Scell_log = ctl->Scell_log;
3825+ q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
3826+ q->Scell_max = (255<<q->Scell_log);
3827+ q->qth_min = ctl->qth_min<<ctl->Wlog;
3828+ q->qth_max = ctl->qth_max<<ctl->Wlog;
3829+
3830+ if (table->grio)
3831+ q->prio=table->tab[ctl->DP]->prio;
3832+ else
3833+ q->prio=8;
3834+
3835+ q->qcount = -1;
3836+ PSCHED_SET_PASTPERFECT(q->qidlestart);
3837+ memcpy(q->Stab, RTA_DATA(tb[TCA_GRED_STAB-1]), 256);
3838+ }
3839+ return 0;
3840+
3841+}
3842+
3843+static int gred_init(struct Qdisc *sch, struct rtattr *opt)
3844+{
3845+ struct gred_sched *table = (struct gred_sched *)sch->data;
3846+ struct tc_gred_sopt *sopt;
3847+ struct rtattr *tb[TCA_GRED_STAB];
3848+ struct rtattr *tb2[TCA_GRED_STAB];
3849+
3850+ if (opt == NULL ||
3851+ rtattr_parse(tb, TCA_GRED_STAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) )
3852+ return -EINVAL;
3853+
3854+ if (tb[TCA_GRED_PARMS-1] == 0 && tb[TCA_GRED_STAB-1] == 0 &&
3855+ tb[TCA_GRED_DPS-1] != 0) {
3856+ rtattr_parse(tb2, TCA_GRED_DPS, RTA_DATA(opt),RTA_PAYLOAD(opt));
3857+
3858+ sopt = RTA_DATA(tb2[TCA_GRED_DPS-1]);
3859+ table->DPs=sopt->DPs;
3860+ table->def=sopt->def_DP;
3861+ table->grio=sopt->grio;
3862+ table->initd=0;
3863+ MOD_INC_USE_COUNT;
3864+ return 0;
3865+ }
3866+
3867+ DPRINTK("\n GRED_INIT error!\n");
3868+ return -EINVAL;
3869+}
3870+
3871+static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
3872+{
3873+ unsigned long qave;
3874+ struct rtattr *rta;
3875+ struct tc_gred_qopt *opt = NULL ;
3876+ struct tc_gred_qopt *dst;
3877+ struct gred_sched *table = (struct gred_sched *)sch->data;
3878+ struct gred_sched_data *q;
3879+ int i;
3880+ unsigned char *b = skb->tail;
3881+
3882+ rta = (struct rtattr*)b;
3883+ RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
3884+
3885+ opt=kmalloc(sizeof(struct tc_gred_qopt)*MAX_DPs, GFP_KERNEL);
3886+
3887+ if (opt == NULL) {
3888+ DPRINTK("gred_dump:failed to malloc for %Zd\n",
3889+ sizeof(struct tc_gred_qopt)*MAX_DPs);
3890+ goto rtattr_failure;
3891+ }
3892+
3893+ memset(opt, 0, (sizeof(struct tc_gred_qopt))*table->DPs);
3894+
3895+ if (!table->initd) {
3896+ DPRINTK("NO GRED Queues setup!\n");
3897+ }
3898+
3899+ for (i=0;i<MAX_DPs;i++) {
3900+ dst= &opt[i];
3901+ q= table->tab[i];
3902+
3903+ if (!q) {
3904+ /* hack -- fix at some point with proper message
3905+ This is how we indicate to tc that there is no VQ
3906+ at this DP */
3907+
3908+ dst->DP=MAX_DPs+i;
3909+ continue;
3910+ }
3911+
3912+ dst->limit=q->limit;
3913+ dst->qth_min=q->qth_min>>q->Wlog;
3914+ dst->qth_max=q->qth_max>>q->Wlog;
3915+ dst->DP=q->DP;
3916+ dst->backlog=q->backlog;
3917+ if (q->qave) {
3918+ if (table->eqp && table->grio) {
3919+ q->qidlestart=table->tab[table->def]->qidlestart;
3920+ q->qave=table->tab[table->def]->qave;
3921+ }
3922+ if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
3923+ long idle;
3924+ psched_time_t now;
3925+ PSCHED_GET_TIME(now);
3926+ idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max, 0);
3927+ qave = q->qave >> q->Stab[(idle>>q->Scell_log)&0xFF];
3928+ dst->qave = qave >> q->Wlog;
3929+
3930+ } else {
3931+ dst->qave = q->qave >> q->Wlog;
3932+ }
3933+ } else {
3934+ dst->qave = 0;
3935+ }
3936+
3937+
3938+ dst->Wlog = q->Wlog;
3939+ dst->Plog = q->Plog;
3940+ dst->Scell_log = q->Scell_log;
3941+ dst->other = q->other;
3942+ dst->forced = q->forced;
3943+ dst->early = q->early;
3944+ dst->pdrop = q->pdrop;
3945+ dst->prio = q->prio;
3946+ dst->packets=q->packetsin;
3947+ dst->bytesin=q->bytesin;
3948+ }
3949+
3950+ RTA_PUT(skb, TCA_GRED_PARMS, sizeof(struct tc_gred_qopt)*MAX_DPs, opt);
3951+ rta->rta_len = skb->tail - b;
3952+
3953+ kfree(opt);
3954+ return skb->len;
3955+
3956+rtattr_failure:
3957+ if (opt)
3958+ kfree(opt);
3959+ DPRINTK("gred_dump: FAILURE!!!!\n");
3960+
3961+/* also free the opt struct here */
3962+ skb_trim(skb, b - skb->data);
3963+ return -1;
3964+}
3965+
3966+static void gred_destroy(struct Qdisc *sch)
3967+{
3968+ struct gred_sched *table = (struct gred_sched *)sch->data;
3969+ int i;
3970+
3971+ for (i = 0;i < table->DPs; i++) {
3972+ if (table->tab[i])
3973+ kfree(table->tab[i]);
3974+ }
3975+ MOD_DEC_USE_COUNT;
3976+}
3977+
3978+struct Qdisc_ops gred_qdisc_ops =
3979+{
3980+ NULL,
3981+ NULL,
3982+ "gred",
3983+ sizeof(struct gred_sched),
3984+ gred_enqueue,
3985+ gred_dequeue,
3986+ gred_requeue,
3987+ gred_drop,
3988+ gred_init,
3989+ gred_reset,
3990+ gred_destroy,
3991+ gred_change, /* change */
3992+ gred_dump,
3993+};
3994+
3995+
3996+#ifdef MODULE
3997+int init_module(void)
3998+{
3999+ return register_qdisc(&gred_qdisc_ops);
4000+}
4001+
4002+void cleanup_module(void)
4003+{
4004+ unregister_qdisc(&gred_qdisc_ops);
4005+}
4006+#endif
4007+MODULE_LICENSE("GPL");
4008diff -urN ../v2.2.21/linux/net/sched/sch_ingress.c linux/net/sched/sch_ingress.c
4009--- ../v2.2.21/linux/net/sched/sch_ingress.c Thu Jan 1 00:00:00 1970
4010+++ linux/net/sched/sch_ingress.c Sun Aug 4 18:02:57 2002
4011@@ -0,0 +1,313 @@
4012+/* net/sched/sch_ingress.c - Ingress qdisc */
4013+
4014+/* Written 1999 by Jamal Hadi Salim */
4015+
4016+
4017+#include <linux/config.h>
4018+#include <linux/module.h>
4019+#include <linux/types.h>
4020+#include <linux/skbuff.h>
4021+#include <linux/netdevice.h> /* for pkt_sched */
4022+#include <linux/rtnetlink.h>
4023+#include <linux/firewall.h>
4024+#include <net/pkt_sched.h>
4025+#include <asm/byteorder.h>
4026+
4027+#undef DEBUG_INGRESS
4028+
4029+#ifdef DEBUG_INGRESS /* control */
4030+#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
4031+#else
4032+#define DPRINTK(format,args...)
4033+#endif
4034+
4035+#if 0 /* data */
4036+#define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
4037+#else
4038+#define D2PRINTK(format,args...)
4039+#endif
4040+
4041+
4042+#define PRIV(sch) ((struct ingress_qdisc_data *) (sch)->data)
4043+
4044+
4045+
4046+struct ingress_qdisc_data {
4047+ struct Qdisc *q;
4048+ struct tcf_proto *filter_list;
4049+};
4050+
4051+
4052+/* ------------------------- Class/flow operations ------------------------- */
4053+
4054+
4055+static int ingress_graft(struct Qdisc *sch,unsigned long arg,
4056+ struct Qdisc *new,struct Qdisc **old)
4057+{
4058+#ifdef DEBUG_INGRESS
4059+ struct ingress_qdisc_data *p = PRIV(sch);
4060+#endif
4061+
4062+ DPRINTK("ingress_graft(sch %p,[qdisc %p],new %p,old %p)\n",
4063+ sch, p, new, old);
4064+ DPRINTK("\n ingress_graft: You cannot add qdiscs to classes");
4065+ return 1;
4066+}
4067+
4068+
4069+static struct Qdisc *ingress_leaf(struct Qdisc *sch, unsigned long arg)
4070+{
4071+ return NULL;
4072+}
4073+
4074+
4075+static unsigned long ingress_get(struct Qdisc *sch,u32 classid)
4076+{
4077+#ifdef DEBUG_INGRESS
4078+ struct ingress_qdisc_data *p = PRIV(sch);
4079+#endif
4080+ DPRINTK("ingress_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid);
4081+ return TC_H_MIN(classid) + 1;
4082+}
4083+
4084+
4085+static unsigned long ingress_bind_filter(struct Qdisc *sch,
4086+ unsigned long parent, u32 classid)
4087+{
4088+ return ingress_get(sch, classid);
4089+}
4090+
4091+
4092+static void ingress_put(struct Qdisc *sch, unsigned long cl)
4093+{
4094+}
4095+
4096+
4097+static int ingress_change(struct Qdisc *sch, u32 classid, u32 parent,
4098+ struct rtattr **tca, unsigned long *arg)
4099+{
4100+#ifdef DEBUG_INGRESS
4101+ struct ingress_qdisc_data *p = PRIV(sch);
4102+#endif
4103+ DPRINTK("ingress_change(sch %p,[qdisc %p],classid %x,parent %x),"
4104+ "arg 0x%lx\n", sch, p, classid, parent, *arg);
4105+ DPRINTK("No effect. sch_ingress doesnt maintain classes at the moment");
4106+ return 0;
4107+}
4108+
4109+
4110+
4111+static void ingress_walk(struct Qdisc *sch,struct qdisc_walker *walker)
4112+{
4113+#ifdef DEBUG_INGRESS
4114+ struct ingress_qdisc_data *p = PRIV(sch);
4115+#endif
4116+ DPRINTK("ingress_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
4117+ DPRINTK("No effect. sch_ingress doesnt maintain classes at the moment");
4118+}
4119+
4120+
4121+static struct tcf_proto **ingress_find_tcf(struct Qdisc *sch,unsigned long cl)
4122+{
4123+ struct ingress_qdisc_data *p = PRIV(sch);
4124+
4125+ return &p->filter_list;
4126+}
4127+
4128+
4129+/* --------------------------- Qdisc operations ---------------------------- */
4130+
4131+
4132+static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
4133+{
4134+ struct ingress_qdisc_data *p = PRIV(sch);
4135+ struct tcf_result res;
4136+ int result;
4137+
4138+ D2PRINTK("ingress_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
4139+ result = tc_classify(skb, p->filter_list, &res);
4140+ D2PRINTK("result %d class 0x%04x\n", result, res.classid);
4141+ /*
4142+ * Unlike normal "enqueue" functions, ingress_enqueue returns a
4143+ * firewall FW_* code.
4144+ */
4145+#ifdef CONFIG_NET_CLS_POLICE
4146+ switch (result) {
4147+ case TC_POLICE_SHOT:
4148+ result = FW_BLOCK;
4149+ sch->stats.drops++;
4150+ break;
4151+ case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */
4152+ case TC_POLICE_OK:
4153+ case TC_POLICE_UNSPEC:
4154+ default:
4155+ sch->stats.packets++;
4156+ sch->stats.bytes += skb->len;
4157+ result = FW_ACCEPT;
4158+ break;
4159+ };
4160+#else
4161+ sch->stats.packets++;
4162+ sch->stats.bytes += skb->len;
4163+#endif
4164+
4165+ skb->tc_index = TC_H_MIN(res.classid);
4166+ return result;
4167+}
4168+
4169+
4170+static struct sk_buff *ingress_dequeue(struct Qdisc *sch)
4171+{
4172+/*
4173+ struct ingress_qdisc_data *p = PRIV(sch);
4174+ D2PRINTK("ingress_dequeue(sch %p,[qdisc %p])\n",sch,PRIV(p));
4175+*/
4176+ return NULL;
4177+}
4178+
4179+
4180+static int ingress_requeue(struct sk_buff *skb,struct Qdisc *sch)
4181+{
4182+/*
4183+ struct ingress_qdisc_data *p = PRIV(sch);
4184+ D2PRINTK("ingress_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,PRIV(p));
4185+*/
4186+ return 0;
4187+}
4188+
4189+static int ingress_drop(struct Qdisc *sch)
4190+{
4191+#ifdef DEBUG_INGRESS
4192+ struct ingress_qdisc_data *p = PRIV(sch);
4193+#endif
4194+ DPRINTK("ingress_drop(sch %p,[qdisc %p])\n", sch, p);
4195+ return 0;
4196+}
4197+
4198+
4199+int ingress_init(struct Qdisc *sch,struct rtattr *opt)
4200+{
4201+ struct ingress_qdisc_data *p = PRIV(sch);
4202+
4203+ DPRINTK("ingress_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt);
4204+ memset(p, 0, sizeof(*p));
4205+ p->filter_list = NULL;
4206+ p->q = &noop_qdisc;
4207+ MOD_INC_USE_COUNT;
4208+ return 0;
4209+}
4210+
4211+
4212+static void ingress_reset(struct Qdisc *sch)
4213+{
4214+ struct ingress_qdisc_data *p = PRIV(sch);
4215+
4216+ DPRINTK("ingress_reset(sch %p,[qdisc %p])\n", sch, p);
4217+
4218+/*
4219+#if 0
4220+*/
4221+/* for future use */
4222+ qdisc_reset(p->q);
4223+/*
4224+#endif
4225+*/
4226+}
4227+
4228+
4229+static void ingress_destroy(struct Qdisc *sch)
4230+{
4231+ struct ingress_qdisc_data *p = PRIV(sch);
4232+ struct tcf_proto *tp;
4233+
4234+ DPRINTK("ingress_destroy(sch %p,[qdisc %p])\n", sch, p);
4235+ while (p->filter_list) {
4236+ tp = p->filter_list;
4237+ p->filter_list = tp->next;
4238+ tp->ops->destroy(tp);
4239+ }
4240+ memset(p, 0, sizeof(*p));
4241+ p->filter_list = NULL;
4242+
4243+#if 0
4244+/* for future use */
4245+ qdisc_destroy(p->q);
4246+#endif
4247+
4248+ MOD_DEC_USE_COUNT;
4249+
4250+}
4251+
4252+
4253+static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb)
4254+{
4255+ unsigned char *b = skb->tail;
4256+ struct rtattr *rta;
4257+
4258+ rta = (struct rtattr *) b;
4259+ RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
4260+ rta->rta_len = skb->tail - b;
4261+ return skb->len;
4262+
4263+rtattr_failure:
4264+ skb_trim(skb, b - skb->data);
4265+ return -1;
4266+}
4267+
4268+static struct Qdisc_class_ops ingress_class_ops =
4269+{
4270+ ingress_graft, /* graft */
4271+ ingress_leaf, /* leaf */
4272+ ingress_get, /* get */
4273+ ingress_put, /* put */
4274+ ingress_change, /* change */
4275+ NULL, /* delete */
4276+ ingress_walk, /* walk */
4277+
4278+ ingress_find_tcf, /* tcf_chain */
4279+ ingress_bind_filter, /* bind_tcf */
4280+ ingress_put, /* unbind_tcf */
4281+
4282+ NULL, /* dump */
4283+};
4284+
4285+struct Qdisc_ops ingress_qdisc_ops =
4286+{
4287+ NULL, /* next */
4288+ &ingress_class_ops, /* cl_ops */
4289+ "ingress",
4290+ sizeof(struct ingress_qdisc_data),
4291+
4292+ ingress_enqueue, /* enqueue */
4293+ ingress_dequeue, /* dequeue */
4294+ ingress_requeue, /* requeue */
4295+ ingress_drop, /* drop */
4296+
4297+ ingress_init, /* init */
4298+ ingress_reset, /* reset */
4299+ ingress_destroy, /* destroy */
4300+ NULL, /* change */
4301+
4302+ ingress_dump, /* dump */
4303+};
4304+
4305+
4306+#ifdef MODULE
4307+int init_module(void)
4308+{
4309+ int ret = 0;
4310+
4311+ if ((ret = register_qdisc(&ingress_qdisc_ops)) < 0) {
4312+ printk("Unable to register Ingress qdisc\n");
4313+ return ret;
4314+ }
4315+
4316+ return ret;
4317+}
4318+
4319+
4320+void cleanup_module(void)
4321+{
4322+ unregister_qdisc(&ingress_qdisc_ops);
4323+}
4324+#endif
4325diff -urN ../v2.2.21/linux/net/sched/sch_prio.c linux/net/sched/sch_prio.c
4326--- ../v2.2.21/linux/net/sched/sch_prio.c Sat Oct 21 12:10:50 2000
4327+++ linux/net/sched/sch_prio.c Fri Jul 5 22:06:27 2002
4328@@ -7,6 +7,8 @@
4329 * 2 of the License, or (at your option) any later version.
4330 *
4331 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
4332+ * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>:
4333+ * Init -- EINVAL when opt undefined
4334 */
4335
4336 #include <linux/config.h>
4337@@ -69,17 +71,18 @@
4338 {
4339 struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
4340 struct Qdisc *qdisc;
4341+ int ret;
4342
4343 qdisc = q->queues[prio_classify(skb, sch)];
4344
4345- if (qdisc->enqueue(skb, qdisc) == 1) {
4346+ if ((ret = qdisc->enqueue(skb, qdisc)) == 0) {
4347 sch->stats.bytes += skb->len;
4348 sch->stats.packets++;
4349 sch->q.qlen++;
4350- return 1;
4351+ return 0;
4352 }
4353 sch->stats.drops++;
4354- return 0;
4355+ return ret;
4356 }
4357
4358
4359@@ -88,15 +91,16 @@
4360 {
4361 struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
4362 struct Qdisc *qdisc;
4363+ int ret;
4364
4365 qdisc = q->queues[prio_classify(skb, sch)];
4366
4367- if (qdisc->ops->requeue(skb, qdisc) == 1) {
4368+ if ((ret = qdisc->ops->requeue(skb, qdisc)) == 0) {
4369 sch->q.qlen++;
4370- return 1;
4371+ return 0;
4372 }
4373 sch->stats.drops++;
4374- return 0;
4375+ return ret;
4376 }
4377
4378
4379@@ -178,7 +182,7 @@
4380 return -EINVAL;
4381 }
4382
4383- start_bh_atomic();
4384+ sch_tree_lock(sch);
4385 q->bands = qopt->bands;
4386 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
4387
4388@@ -187,7 +191,7 @@
4389 if (child != &noop_qdisc)
4390 qdisc_destroy(child);
4391 }
4392- end_bh_atomic();
4393+ sch_tree_unlock(sch);
4394
4395 for (i=0; i<=TC_PRIO_MAX; i++) {
4396 int band = q->prio2band[i];
4397@@ -195,11 +199,12 @@
4398 struct Qdisc *child;
4399 child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
4400 if (child) {
4401+ sch_tree_lock(sch);
4402 child = xchg(&q->queues[band], child);
4403- synchronize_bh();
4404
4405 if (child != &noop_qdisc)
4406 qdisc_destroy(child);
4407+ sch_tree_unlock(sch);
4408 }
4409 }
4410 }
4411@@ -208,8 +213,6 @@
4412
4413 static int prio_init(struct Qdisc *sch, struct rtattr *opt)
4414 {
4415- static const u8 prio2band[TC_PRIO_MAX+1] =
4416- { 1, 2, 2, 2, 1, 2, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1 };
4417 struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
4418 int i;
4419
4420@@ -217,14 +220,7 @@
4421 q->queues[i] = &noop_qdisc;
4422
4423 if (opt == NULL) {
4424- q->bands = 3;
4425- memcpy(q->prio2band, prio2band, sizeof(prio2band));
4426- for (i=0; i<3; i++) {
4427- struct Qdisc *child;
4428- child = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
4429- if (child)
4430- q->queues[i] = child;
4431- }
4432+ return -EINVAL;
4433 } else {
4434 int err;
4435
4436@@ -235,7 +231,6 @@
4437 return 0;
4438 }
4439
4440-#ifdef CONFIG_RTNETLINK
4441 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
4442 {
4443 struct prio_sched_data *q = (struct prio_sched_data *)sch->data;
4444@@ -251,7 +246,6 @@
4445 skb_trim(skb, b - skb->data);
4446 return -1;
4447 }
4448-#endif
4449
4450 static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
4451 struct Qdisc **old)
4452@@ -265,7 +259,11 @@
4453 if (new == NULL)
4454 new = &noop_qdisc;
4455
4456- *old = xchg(&q->queues[band], new);
4457+ sch_tree_lock(sch);
4458+ *old = q->queues[band];
4459+ q->queues[band] = new;
4460+ qdisc_reset(*old);
4461+ sch_tree_unlock(sch);
4462
4463 return 0;
4464 }
4465@@ -322,7 +320,6 @@
4466 }
4467
4468
4469-#ifdef CONFIG_RTNETLINK
4470 static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
4471 struct tcmsg *tcm)
4472 {
4473@@ -330,11 +327,11 @@
4474
4475 if (cl - 1 > q->bands)
4476 return -ENOENT;
4477+ tcm->tcm_handle |= TC_H_MIN(cl);
4478 if (q->queues[cl-1])
4479 tcm->tcm_info = q->queues[cl-1]->handle;
4480 return 0;
4481 }
4482-#endif
4483
4484 static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
4485 {
4486@@ -381,9 +378,7 @@
4487 prio_bind,
4488 prio_put,
4489
4490-#ifdef CONFIG_RTNETLINK
4491 prio_dump_class,
4492-#endif
4493 };
4494
4495 struct Qdisc_ops prio_qdisc_ops =
4496@@ -403,9 +398,7 @@
4497 prio_destroy,
4498 prio_tune,
4499
4500-#ifdef CONFIG_RTNETLINK
4501 prio_dump,
4502-#endif
4503 };
4504
4505 #ifdef MODULE
4506@@ -421,3 +414,4 @@
4507 }
4508
4509 #endif
4510+MODULE_LICENSE("GPL");
4511diff -urN ../v2.2.21/linux/net/sched/sch_red.c linux/net/sched/sch_red.c
4512--- ../v2.2.21/linux/net/sched/sch_red.c Sat Oct 21 12:11:29 2000
4513+++ linux/net/sched/sch_red.c Fri Aug 2 02:41:03 2002
4514@@ -10,6 +10,8 @@
4515 *
4516 * Changes:
4517 * J Hadi Salim <hadi@nortel.com> 980914: computation fixes
4518+ * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
4519+ * J Hadi Salim <hadi@nortelnetworks.com> 980816: ECN support
4520 */
4521
4522 #include <linux/config.h>
4523@@ -38,6 +40,9 @@
4524 #include <net/sock.h>
4525 #include <net/pkt_sched.h>
4526
4527+#define RED_ECN_ECT 0x02
4528+#define RED_ECN_CE 0x01
4529+
4530
4531 /* Random Early Detection (RED) algorithm.
4532 =======================================
4533@@ -137,6 +142,7 @@
4534 u32 qth_max; /* Max average length threshold: A scaled */
4535 u32 Rmask;
4536 u32 Scell_max;
4537+ unsigned char flags;
4538 char Wlog; /* log(W) */
4539 char Plog; /* random number bits */
4540 char Scell_log;
4541@@ -148,8 +154,43 @@
4542 u32 qR; /* Cached random number */
4543
4544 psched_time_t qidlestart; /* Start of idle period */
4545+ struct tc_red_xstats st;
4546 };
4547
4548+static int red_ecn_mark(struct sk_buff *skb)
4549+{
4550+ if (skb->nh.raw + 20 > skb->tail)
4551+ return 0;
4552+
4553+ switch (skb->protocol) {
4554+ case __constant_htons(ETH_P_IP):
4555+ {
4556+ u8 tos = skb->nh.iph->tos;
4557+
4558+ if (!(tos & RED_ECN_ECT))
4559+ return 0;
4560+
4561+ if (!(tos & RED_ECN_CE))
4562+ IP_ECN_set_ce(skb->nh.iph);
4563+
4564+ return 1;
4565+ }
4566+
4567+ case __constant_htons(ETH_P_IPV6):
4568+ {
4569+ u32 label = *(u32*)skb->nh.raw;
4570+
4571+ if (!(label & __constant_htonl(RED_ECN_ECT<<20)))
4572+ return 0;
4573+ label |= __constant_htonl(RED_ECN_CE<<20);
4574+ return 1;
4575+ }
4576+
4577+ default:
4578+ return 0;
4579+ }
4580+}
4581+
4582 static int
4583 red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
4584 {
4585@@ -159,6 +200,8 @@
4586
4587 if (!PSCHED_IS_PASTPERFECT(q->qidlestart)) {
4588 long us_idle;
4589+ int shift;
4590+
4591 PSCHED_GET_TIME(now);
4592 us_idle = PSCHED_TDIFF_SAFE(now, q->qidlestart, q->Scell_max, 0);
4593 PSCHED_SET_PASTPERFECT(q->qidlestart);
4594@@ -179,7 +222,25 @@
4595 I believe that a simpler model may be used here,
4596 but it is field for experiments.
4597 */
4598- q->qave >>= q->Stab[(us_idle>>q->Scell_log)&0xFF];
4599+ shift = q->Stab[us_idle>>q->Scell_log];
4600+
4601+ if (shift) {
4602+ q->qave >>= shift;
4603+ } else {
4604+ /* Approximate initial part of exponent
4605+ with linear function:
4606+ (1-W)^m ~= 1-mW + ...
4607+
4608+ Seems, it is the best solution to
4609+ problem of too coarce exponent tabulation.
4610+ */
4611+
4612+ us_idle = (q->qave * us_idle)>>q->Scell_log;
4613+ if (us_idle < q->qave/2)
4614+ q->qave -= us_idle;
4615+ else
4616+ q->qave >>= 1;
4617+ }
4618 } else {
4619 q->qave += sch->stats.backlog - (q->qave >> q->Wlog);
4620 /* NOTE:
4621@@ -200,18 +261,26 @@
4622 sch->stats.backlog += skb->len;
4623 sch->stats.bytes += skb->len;
4624 sch->stats.packets++;
4625- return 1;
4626+ return NET_XMIT_SUCCESS;
4627+ } else {
4628+ q->st.pdrop++;
4629 }
4630-drop:
4631 kfree_skb(skb);
4632 sch->stats.drops++;
4633- return 0;
4634+ return NET_XMIT_DROP;
4635 }
4636 if (q->qave >= q->qth_max) {
4637 q->qcount = -1;
4638 sch->stats.overlimits++;
4639- goto drop;
4640+mark:
4641+ if (!(q->flags&TC_RED_ECN) || !red_ecn_mark(skb)) {
4642+ q->st.early++;
4643+ goto drop;
4644+ }
4645+ q->st.marked++;
4646+ goto enqueue;
4647 }
4648+
4649 if (++q->qcount) {
4650 /* The formula used below causes questions.
4651
4652@@ -234,10 +303,15 @@
4653 q->qcount = 0;
4654 q->qR = net_random()&q->Rmask;
4655 sch->stats.overlimits++;
4656- goto drop;
4657+ goto mark;
4658 }
4659 q->qR = net_random()&q->Rmask;
4660 goto enqueue;
4661+
4662+drop:
4663+ kfree_skb(skb);
4664+ sch->stats.drops++;
4665+ return NET_XMIT_CN;
4666 }
4667
4668 static int
4669@@ -249,7 +323,7 @@
4670
4671 __skb_queue_head(&sch->q, skb);
4672 sch->stats.backlog += skb->len;
4673- return 1;
4674+ return 0;
4675 }
4676
4677 static struct sk_buff *
4678@@ -277,6 +351,7 @@
4679 if (skb) {
4680 sch->stats.backlog -= skb->len;
4681 sch->stats.drops++;
4682+ q->st.other++;
4683 kfree_skb(skb);
4684 return 1;
4685 }
4686@@ -287,17 +362,15 @@
4687 static void red_reset(struct Qdisc* sch)
4688 {
4689 struct red_sched_data *q = (struct red_sched_data *)sch->data;
4690- struct sk_buff *skb;
4691
4692- while((skb=__skb_dequeue(&sch->q))!=NULL)
4693- kfree_skb(skb);
4694+ __skb_queue_purge(&sch->q);
4695 sch->stats.backlog = 0;
4696 PSCHED_SET_PASTPERFECT(q->qidlestart);
4697 q->qave = 0;
4698 q->qcount = -1;
4699 }
4700
4701-static int red_init(struct Qdisc *sch, struct rtattr *opt)
4702+static int red_change(struct Qdisc *sch, struct rtattr *opt)
4703 {
4704 struct red_sched_data *q = (struct red_sched_data *)sch->data;
4705 struct rtattr *tb[TCA_RED_STAB];
4706@@ -312,6 +385,8 @@
4707
4708 ctl = RTA_DATA(tb[TCA_RED_PARMS-1]);
4709
4710+ sch_tree_lock(sch);
4711+ q->flags = ctl->flags;
4712 q->Wlog = ctl->Wlog;
4713 q->Plog = ctl->Plog;
4714 q->Rmask = ctl->Plog < 32 ? ((1<<ctl->Plog) - 1) : ~0UL;
4715@@ -323,12 +398,34 @@
4716 memcpy(q->Stab, RTA_DATA(tb[TCA_RED_STAB-1]), 256);
4717
4718 q->qcount = -1;
4719- PSCHED_SET_PASTPERFECT(q->qidlestart);
4720- MOD_INC_USE_COUNT;
4721+ if (skb_queue_len(&sch->q) == 0)
4722+ PSCHED_SET_PASTPERFECT(q->qidlestart);
4723+ sch_tree_unlock(sch);
4724 return 0;
4725 }
4726
4727-#ifdef CONFIG_RTNETLINK
4728+static int red_init(struct Qdisc* sch, struct rtattr *opt)
4729+{
4730+ int err;
4731+
4732+ MOD_INC_USE_COUNT;
4733+
4734+ if ((err = red_change(sch, opt)) != 0) {
4735+ MOD_DEC_USE_COUNT;
4736+ }
4737+ return err;
4738+}
4739+
4740+
4741+int red_copy_xstats(struct sk_buff *skb, struct tc_red_xstats *st)
4742+{
4743+ RTA_PUT(skb, TCA_XSTATS, sizeof(*st), st);
4744+ return 0;
4745+
4746+rtattr_failure:
4747+ return 1;
4748+}
4749+
4750 static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
4751 {
4752 struct red_sched_data *q = (struct red_sched_data *)sch->data;
4753@@ -344,16 +441,19 @@
4754 opt.Wlog = q->Wlog;
4755 opt.Plog = q->Plog;
4756 opt.Scell_log = q->Scell_log;
4757+ opt.flags = q->flags;
4758 RTA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt);
4759 rta->rta_len = skb->tail - b;
4760
4761+ if (red_copy_xstats(skb, &q->st))
4762+ goto rtattr_failure;
4763+
4764 return skb->len;
4765
4766 rtattr_failure:
4767 skb_trim(skb, b - skb->data);
4768 return -1;
4769 }
4770-#endif
4771
4772 static void red_destroy(struct Qdisc *sch)
4773 {
4774@@ -375,11 +475,9 @@
4775 red_init,
4776 red_reset,
4777 red_destroy,
4778- NULL /* red_change */,
4779+ red_change,
4780
4781-#ifdef CONFIG_RTNETLINK
4782 red_dump,
4783-#endif
4784 };
4785
4786
4787@@ -394,3 +492,4 @@
4788 unregister_qdisc(&red_qdisc_ops);
4789 }
4790 #endif
4791+MODULE_LICENSE("GPL");
4792diff -urN ../v2.2.21/linux/net/sched/sch_sfq.c linux/net/sched/sch_sfq.c
4793--- ../v2.2.21/linux/net/sched/sch_sfq.c Sat Oct 21 12:10:57 2000
4794+++ linux/net/sched/sch_sfq.c Fri Jul 5 22:06:27 2002
4795@@ -105,6 +105,7 @@
4796 /* Parameters */
4797 int perturb_period;
4798 unsigned quantum; /* Allotment per round: MUST BE >= MTU */
4799+ int limit;
4800
4801 /* Variables */
4802 struct timer_list perturb_timer;
4803@@ -275,14 +276,14 @@
4804 q->tail = x;
4805 }
4806 }
4807- if (++sch->q.qlen < SFQ_DEPTH-1) {
4808+ if (++sch->q.qlen < q->limit-1) {
4809 sch->stats.bytes += skb->len;
4810 sch->stats.packets++;
4811- return 1;
4812+ return 0;
4813 }
4814
4815 sfq_drop(sch);
4816- return 0;
4817+ return NET_XMIT_CN;
4818 }
4819
4820 static int
4821@@ -310,12 +311,12 @@
4822 q->tail = x;
4823 }
4824 }
4825- if (++sch->q.qlen < SFQ_DEPTH-1)
4826- return 1;
4827+ if (++sch->q.qlen < q->limit - 1)
4828+ return 0;
4829
4830 sch->stats.drops++;
4831 sfq_drop(sch);
4832- return 0;
4833+ return NET_XMIT_CN;
4834 }
4835
4836
4837@@ -387,16 +388,21 @@
4838 if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
4839 return -EINVAL;
4840
4841- start_bh_atomic();
4842+ sch_tree_lock(sch);
4843 q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
4844 q->perturb_period = ctl->perturb_period*HZ;
4845+ if (ctl->limit)
4846+ q->limit = min_t(u32, ctl->limit, SFQ_DEPTH);
4847+
4848+ while (sch->q.qlen >= q->limit-1)
4849+ sfq_drop(sch);
4850
4851 del_timer(&q->perturb_timer);
4852 if (q->perturb_period) {
4853 q->perturb_timer.expires = jiffies + q->perturb_period;
4854 add_timer(&q->perturb_timer);
4855 }
4856- end_bh_atomic();
4857+ sch_tree_unlock(sch);
4858 return 0;
4859 }
4860
4861@@ -416,6 +422,7 @@
4862 q->dep[i+SFQ_DEPTH].next = i+SFQ_DEPTH;
4863 q->dep[i+SFQ_DEPTH].prev = i+SFQ_DEPTH;
4864 }
4865+ q->limit = SFQ_DEPTH;
4866 q->max_depth = 0;
4867 q->tail = SFQ_DEPTH;
4868 if (opt == NULL) {
4869@@ -439,7 +446,6 @@
4870 MOD_DEC_USE_COUNT;
4871 }
4872
4873-#ifdef CONFIG_RTNETLINK
4874 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
4875 {
4876 struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data;
4877@@ -449,9 +455,9 @@
4878 opt.quantum = q->quantum;
4879 opt.perturb_period = q->perturb_period/HZ;
4880
4881- opt.limit = SFQ_DEPTH;
4882+ opt.limit = q->limit;
4883 opt.divisor = SFQ_HASH_DIVISOR;
4884- opt.flows = SFQ_DEPTH;
4885+ opt.flows = q->limit;
4886
4887 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
4888
4889@@ -461,7 +467,6 @@
4890 skb_trim(skb, b - skb->data);
4891 return -1;
4892 }
4893-#endif
4894
4895 struct Qdisc_ops sfq_qdisc_ops =
4896 {
4897@@ -480,9 +485,7 @@
4898 sfq_destroy,
4899 NULL, /* sfq_change */
4900
4901-#ifdef CONFIG_RTNETLINK
4902 sfq_dump,
4903-#endif
4904 };
4905
4906 #ifdef MODULE
4907@@ -496,3 +499,4 @@
4908 unregister_qdisc(&sfq_qdisc_ops);
4909 }
4910 #endif
4911+MODULE_LICENSE("GPL");
4912diff -urN ../v2.2.21/linux/net/sched/sch_tbf.c linux/net/sched/sch_tbf.c
4913--- ../v2.2.21/linux/net/sched/sch_tbf.c Sat Oct 21 12:10:47 2000
4914+++ linux/net/sched/sch_tbf.c Sun Mar 31 03:18:30 2002
4915@@ -66,7 +66,7 @@
4916 N(t+delta) = min{B/R, N(t) + delta}
4917
4918 If the first packet in queue has length S, it may be
4919- transmited only at the time t_* when S/R <= N(t_*),
4920+ transmitted only at the time t_* when S/R <= N(t_*),
4921 and in this case N(t) jumps:
4922
4923 N(t_* + 0) = N(t_* - 0) - S/R.
4924@@ -139,7 +139,7 @@
4925 if ((sch->stats.backlog += skb->len) <= q->limit) {
4926 sch->stats.bytes += skb->len;
4927 sch->stats.packets++;
4928- return 1;
4929+ return 0;
4930 }
4931
4932 /* Drop action: undo the things that we just did,
4933@@ -155,7 +155,7 @@
4934 if (sch->reshape_fail==NULL || sch->reshape_fail(skb, sch))
4935 #endif
4936 kfree_skb(skb);
4937- return 0;
4938+ return NET_XMIT_DROP;
4939 }
4940
4941 static int
4942@@ -163,7 +163,7 @@
4943 {
4944 __skb_queue_head(&sch->q, skb);
4945 sch->stats.backlog += skb->len;
4946- return 1;
4947+ return 0;
4948 }
4949
4950 static int
4951@@ -186,7 +186,7 @@
4952 struct Qdisc *sch = (struct Qdisc*)arg;
4953
4954 sch->flags &= ~TCQ_F_THROTTLED;
4955- qdisc_wakeup(sch->dev);
4956+ netif_schedule(sch->dev);
4957 }
4958
4959 static struct sk_buff *
4960@@ -226,15 +226,13 @@
4961 return skb;
4962 }
4963
4964- if (!sch->dev->tbusy) {
4965- long delay = PSCHED_US2JIFFIE(max(-toks, -ptoks));
4966+ if (!netif_queue_stopped(sch->dev)) {
4967+ long delay = PSCHED_US2JIFFIE(max_t(long, -toks, -ptoks));
4968
4969 if (delay == 0)
4970 delay = 1;
4971
4972- del_timer(&q->wd_timer);
4973- q->wd_timer.expires = jiffies + delay;
4974- add_timer(&q->wd_timer);
4975+ mod_timer(&q->wd_timer, jiffies+delay);
4976 }
4977
4978 /* Maybe we have a shorter packet in the queue,
4979@@ -278,7 +276,7 @@
4980 struct tc_tbf_qopt *qopt;
4981 struct qdisc_rate_table *rtab = NULL;
4982 struct qdisc_rate_table *ptab = NULL;
4983- int max_size;
4984+ int max_size,n;
4985
4986 if (rtattr_parse(tb, TCA_TBF_PTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
4987 tb[TCA_TBF_PARMS-1] == NULL ||
4988@@ -297,18 +295,21 @@
4989 goto done;
4990 }
4991
4992- max_size = psched_mtu(sch->dev);
4993+ for (n = 0; n < 256; n++)
4994+ if (rtab->data[n] > qopt->buffer) break;
4995+ max_size = (n << qopt->rate.cell_log)-1;
4996 if (ptab) {
4997- int n = max_size>>qopt->peakrate.cell_log;
4998- while (n>0 && ptab->data[n-1] > qopt->mtu) {
4999- max_size -= (1<<qopt->peakrate.cell_log);
5000- n--;
5001- }
5002+ int size;
5003+
5004+ for (n = 0; n < 256; n++)
5005+ if (ptab->data[n] > qopt->mtu) break;
5006+ size = (n << qopt->peakrate.cell_log)-1;
5007+ if (size < max_size) max_size = size;
5008 }
5009- if (rtab->data[max_size>>qopt->rate.cell_log] > qopt->buffer)
5010+ if (max_size < 0)
5011 goto done;
5012
5013- start_bh_atomic();
5014+ sch_tree_lock(sch);
5015 q->limit = qopt->limit;
5016 q->mtu = qopt->mtu;
5017 q->max_size = max_size;
5018@@ -317,7 +318,7 @@
5019 q->ptokens = q->mtu;
5020 rtab = xchg(&q->R_tab, rtab);
5021 ptab = xchg(&q->P_tab, ptab);
5022- end_bh_atomic();
5023+ sch_tree_unlock(sch);
5024 err = 0;
5025 done:
5026 if (rtab)
5027@@ -362,7 +363,6 @@
5028 MOD_DEC_USE_COUNT;
5029 }
5030
5031-#ifdef CONFIG_RTNETLINK
5032 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
5033 {
5034 struct tbf_sched_data *q = (struct tbf_sched_data *)sch->data;
5035@@ -390,7 +390,6 @@
5036 skb_trim(skb, b - skb->data);
5037 return -1;
5038 }
5039-#endif
5040
5041 struct Qdisc_ops tbf_qdisc_ops =
5042 {
5043@@ -409,9 +408,7 @@
5044 tbf_destroy,
5045 tbf_change,
5046
5047-#ifdef CONFIG_RTNETLINK
5048 tbf_dump,
5049-#endif
5050 };
5051
5052
5053@@ -426,3 +423,4 @@
5054 unregister_qdisc(&tbf_qdisc_ops);
5055 }
5056 #endif
5057+MODULE_LICENSE("GPL");
5058diff -urN ../v2.2.21/linux/net/sched/sch_teql.c linux/net/sched/sch_teql.c
5059--- ../v2.2.21/linux/net/sched/sch_teql.c Sat Oct 21 12:10:47 2000
5060+++ linux/net/sched/sch_teql.c Sun Aug 4 18:06:40 2002
5061@@ -97,13 +97,13 @@
5062 if (q->q.qlen <= dev->tx_queue_len) {
5063 sch->stats.bytes += skb->len;
5064 sch->stats.packets++;
5065- return 1;
5066+ return 0;
5067 }
5068
5069 __skb_unlink(skb, &q->q);
5070 kfree_skb(skb);
5071 sch->stats.drops++;
5072- return 0;
5073+ return NET_XMIT_DROP;
5074 }
5075
5076 static int
5077@@ -112,7 +112,7 @@
5078 struct teql_sched_data *q = (struct teql_sched_data *)sch->data;
5079
5080 __skb_queue_head(&q->q, skb);
5081- return 1;
5082+ return 0;
5083 }
5084
5085 static struct sk_buff *
5086@@ -167,7 +167,9 @@
5087 master->slaves = NEXT_SLAVE(q);
5088 if (q == master->slaves) {
5089 master->slaves = NULL;
5090+ sch_dev_queue_lock(&master->dev);
5091 qdisc_reset(master->dev.qdisc);
5092+ sch_dev_queue_unlock(&master->dev);
5093 }
5094 }
5095 skb_queue_purge(&dat->q);
5096@@ -189,6 +191,9 @@
5097
5098 if (dev->hard_header_len > m->dev.hard_header_len)
5099 return -EINVAL;
5100+
5101+ if (&m->dev == dev)
5102+ return -ELOOP;
5103
5104 q->m = m;
5105
This page took 0.687328 seconds and 4 git commands to generate.