]> git.pld-linux.org Git - packages/kernel.git/blame - htb2_2.2.17.diff
- this patch fix twice EXPORT_SYMBOL(br_ioctl_hook); in kernel-source net/netsyms.c
[packages/kernel.git] / htb2_2.2.17.diff
CommitLineData
3f3eb860
GS
1--- linux-2.2.17orig/net/sched/Config.in Sun Mar 21 16:22:00 1999
2+++ linux-2.2/net/sched/Config.in Sun Sep 2 10:23:27 2001
3@@ -5,6 +5,7 @@ define_bool CONFIG_NETLINK y
4 define_bool CONFIG_RTNETLINK y
5 tristate 'CBQ packet scheduler' CONFIG_NET_SCH_CBQ
6 tristate 'CSZ packet scheduler' CONFIG_NET_SCH_CSZ
7+tristate 'HTB packet scheduler' CONFIG_NET_SCH_HTB
8 #tristate 'H-PFQ packet scheduler' CONFIG_NET_SCH_HPFQ
9 #tristate 'H-FSC packet scheduler' CONFIG_NET_SCH_HFCS
10 tristate 'The simplest PRIO pseudoscheduler' CONFIG_NET_SCH_PRIO
11--- linux-2.2.17orig/net/sched/Makefile Sun Mar 21 16:22:00 1999
12+++ linux-2.2/net/sched/Makefile Tue Oct 23 08:45:48 2001
13@@ -69,6 +69,14 @@ else
14 endif
15 endif
16
17+ifeq ($(CONFIG_NET_SCH_HTB), y)
18+O_OBJS += sch_htb.o
19+else
20+ ifeq ($(CONFIG_NET_SCH_HTB), m)
21+ M_OBJS += sch_htb.o
22+ endif
23+endif
24+
25 ifeq ($(CONFIG_NET_SCH_RED), y)
26 O_OBJS += sch_red.o
27 else
28--- linux-2.2.17orig/net/sched/sch_htb.c Tue Sep 25 16:15:54 2001
29+++ linux-2.2/net/sched/sch_htb.c Fri Dec 21 11:18:10 2001
30@@ -0,0 +1,1215 @@
31+/* vim: ts=8 sw=4
32+ * net/sched/sch_htb.c Hierarchical token bucket
33+ *
34+ * This program is free software; you can redistribute it and/or
35+ * modify it under the terms of the GNU General Public License
36+ * as published by the Free Software Foundation; either version
37+ * 2 of the License, or (at your option) any later version.
38+ *
39+ * Authors: Martin Devera, <devik@cdi.cz>
40+ *
41+ * Credits (in time order):
42+ * Ondrej Kraus, <krauso@barr.cz>
43+ * found missing INIT_QDISC(htb)
44+ * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
45+ * helped a lot to locate nasty class stall bug
46+ * Andi Kleen, Jamal Hadi, Bert Hubert
47+ * code review and helpful comments on shaping
48+ * and many others. thanks.
49+ *
50+ * $Id$
51+ */
52+
53+#include <linux/config.h>
54+#include <linux/module.h>
55+#include <asm/uaccess.h>
56+#include <asm/system.h>
57+#include <asm/bitops.h>
58+#include <linux/types.h>
59+#include <linux/kernel.h>
60+#include <linux/version.h>
61+#include <linux/sched.h>
62+#include <linux/string.h>
63+#include <linux/mm.h>
64+#include <linux/socket.h>
65+#include <linux/sockios.h>
66+#include <linux/in.h>
67+#include <linux/errno.h>
68+#include <linux/interrupt.h>
69+#include <linux/if_ether.h>
70+#include <linux/inet.h>
71+#include <linux/netdevice.h>
72+#include <linux/etherdevice.h>
73+#include <linux/notifier.h>
74+#include <net/ip.h>
75+#include <net/route.h>
76+#include <linux/skbuff.h>
77+#include <net/sock.h>
78+#include <net/pkt_sched.h>
79+
80+/* HTB algorithm.
81+ Author: devik@cdi.cz
82+ =======================================
83+ HTB is like TBF with multiple classes. It is also similar to CBQ because
84+ it allows to assign priority to each class in hierarchy.
85+ In fact it is another implementation os Floyd's formal sharing.
86+
87+ Levels:
88+ Each class is assigned level. Leaf has ALWAYS level 0 and root
89+ classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
90+ one less than their parent.
91+*/
92+
93+#define HTB_HSIZE 16 /* classid hash size */
94+#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
95+#define HTB_DEBUG 1 /* compile debugging support (activated by tc tool) */
96+#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
97+#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
98+
99+/* ======== Begin of part to be deleted for 2.4 merged one ========= */
100+#if LINUX_VERSION_CODE < 0x20300
101+#define MODULE_LICENSE(X)
102+
103+#define NET_XMIT_SUCCESS 1
104+#define NET_XMIT_DROP 0
105+
106+static inline void __skb_queue_purge(struct sk_buff_head *list)
107+{
108+ struct sk_buff *skb;
109+ while ((skb=__skb_dequeue(list))!=NULL)
110+ kfree_skb(skb);
111+}
112+#define del_timer_sync(t) del_timer(t)
113+
114+#define netif_schedule qdisc_wakeup
115+#define netif_queue_stopped(D) (D->tbusy)
116+#define sch_tree_lock(S) start_bh_atomic()
117+#define sch_tree_unlock(S) end_bh_atomic()
118+#undef HTB_QLOCK
119+#undef HTB_QUNLOCK
120+#define HTB_QLOCK(S)
121+#define HTB_QUNLOCK(S)
122+#ifndef BUG_TRAP
123+#define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x ") failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
124+#endif
125+#endif
126+/* ======== End of part to be deleted for 2.4 merged one =========== */
127+
128+/* debugging support; S is subsystem, these are defined:
129+ 0 - netlink messages
130+ 1 - enqueue
131+ 2 - drop & requeue
132+ 3 - dequeue main
133+ 4 - dequeue one prio DRR part
134+ 5 - dequeue class accounting
135+ 6 - dequeue rcache (ready level computation)
136+ 10 - rate estimator
137+ 11 - classifier
138+ 12 - fast dequeue cache
139+
140+ L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
141+ q->debug uint32 contains 16 2-bit fields one for subsystem starting
142+ from LSB
143+ */
144+#if HTB_DEBUG
145+#define HTB_DBG(S,L,FMT,ARG...) if (((q->debug>>(2*S))&3) >= L) \
146+ printk(KERN_DEBUG FMT,##ARG)
147+#else
148+#define HTB_DBG(S,L,FMT,ARG...)
149+#endif
150+
151+
152+/* used internaly to pass status of single class */
153+enum htb_cmode {
154+ HTB_CANT_SEND, /* class can't send and can't borrow */
155+ HTB_MAY_BORROW, /* class can't send but may borrow */
156+ HTB_CAN_SEND /* class can send */
157+};
158+#define HTB_F_INJ 0x10000 /* to mark dequeue level as injected one */
159+
160+/* often used circular list of classes; I didn't use generic linux
161+ double linked list to avoid casts and before I rely on some behaviour
162+ of insert and delete functions; item not bound to list is guaranted
163+ to have prev member NULL (we don't mangle next pointer as we often
164+ need it */
165+struct htb_litem {
166+ struct htb_class *prev, *next;
167+};
168+/* circular list insert and delete macros; these also maintain
169+ correct value of pointer to the list; insert adds 'new' class
170+ before 'cl' class using prev/next member 'list' */
171+#define HTB_INSERTB(list,cl,new) \
172+do { if (!cl) new->list.prev = cl = new; \
173+ new->list.next = cl; new->list.prev = cl->list.prev; \
174+ cl->list.prev->list.next = cl->list.prev = new; } while(0)
175+
176+/* remove 'cl' class from 'list' repairing 'ptr' if not null */
177+#define HTB_DELETE(list,cl,ptr) do { \
178+ if (cl->list.prev) { cl->list.prev->list.next = cl->list.next; \
179+ cl->list.next->list.prev = cl->list.prev; \
180+ if (ptr == cl) ptr = cl->list.next; \
181+ if (ptr == cl) ptr = NULL; cl->list.prev = NULL; } \
182+ else printk(KERN_ERR "htb: DELETE BUG [" #list "," #cl "," #ptr "]\n"); \
183+ } while (0)
184+
185+/* interior & leaf nodes; props specific to leaves are marked L: */
186+struct htb_class
187+{
188+ /* general class parameters */
189+ u32 classid;
190+ struct tc_stats stats; /* generic stats */
191+ struct tc_htb_xstats xstats;/* our special stats */
192+ int refcnt; /* usage count of this class */
193+ struct Qdisc *q; /* L: elem. qdisc */
194+
195+ /* rate measurement counters */
196+ unsigned long rate_bytes,sum_bytes;
197+ unsigned long rate_packets,sum_packets;
198+
199+ /* DRR scheduler parameters */
200+ int quantum; /* L: round quantum computed from rate */
201+ int deficit[TC_HTB_MAXDEPTH]; /* L: deficit for class at level */
202+ char prio; /* L: priority of the class; 0 is the highest */
203+ char aprio; /* L: prio at which we were last adding to active list
204+ it is used to change priority at runtime */
205+ /* topology */
206+ char level; /* our level (see above) */
207+ char injectd; /* distance from injected parent */
208+ struct htb_class *parent; /* parent class */
209+ struct htb_class *children; /* pointer to children list */
210+ struct htb_litem hlist; /* classid hash list */
211+ struct htb_litem active; /* L: prio level active DRR list */
212+ struct htb_litem sibling; /* sibling list */
213+
214+ /* class attached filters */
215+ struct tcf_proto *filter_list;
216+ int filter_cnt;
217+
218+ /* token bucket parameters */
219+ struct qdisc_rate_table *rate; /* rate table of the class itself */
220+ struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
221+ long buffer,cbuffer; /* token bucket depth/rate */
222+ long mbuffer; /* max wait time */
223+ long tokens,ctokens; /* current number of tokens */
224+ psched_time_t t_c; /* checkpoint time */
225+
226+ /* walk result cache for leaves */
227+ unsigned long rcache_sn; /* SN of cache validity */
228+ unsigned rc_level; /* victim's level */
229+};
230+
231+/* TODO: maybe compute rate when size is too large .. or drop ? */
232+static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
233+ int size)
234+{
235+ int slot = size >> rate->rate.cell_log;
236+ if (slot > 255) {
237+ cl->xstats.giants++;
238+ slot = 255;
239+ }
240+ return rate->data[slot];
241+}
242+
243+struct htb_sched
244+{
245+ struct htb_class *root; /* root classes circular list */
246+ struct htb_class *hash[HTB_HSIZE]; /* hashed by classid */
247+
248+ /* active classes table; this needs explanation. This table contains
249+ one set of pointers per priority, it is obvious. The set contains
250+ one pointer per class level in the same way as cl->deficit is
251+ independent for each level. This allows us to maintain correct
252+ DRR position independent of borrowing level.
253+ If we used single active/deficit items then DRR fairness'd suffer
254+ from frequent class level changes.
255+ Note that htb_[de]activate must be used to update this item
256+ because it needs to keep all pointers in set coherent. */
257+ struct htb_class *active[TC_HTB_NUMPRIO][TC_HTB_MAXDEPTH];
258+
259+ int defcls; /* class where unclassified flows go to */
260+ u32 debug; /* subsystem debug levels */
261+
262+ /* filters for qdisc itself */
263+ struct tcf_proto *filter_list;
264+ int filter_cnt;
265+
266+ unsigned long sn; /* result cache serial number */
267+ int rate2quantum; /* quant = rate / rate2quantum */
268+ psched_time_t now; /* cached dequeue time */
269+ long delay; /* how long to deactivate for */
270+ struct timer_list timer; /* send delay timer */
271+ struct timer_list rttim; /* rate computer timer */
272+ int recmp_bucket; /* which hash bucket to recompute next */
273+
274+ /* cache of last dequeued class */
275+ struct htb_class *last_tx;
276+ int use_dcache;
277+
278+ /* non shapped skbs; let them go directly thru */
279+ struct sk_buff_head direct_queue;
280+ int direct_qlen; /* max qlen of above */
281+
282+ /* statistics (see tc_htb_glob for explanation) */
283+ long deq_rate,deq_rate_c;
284+ long utilz,utilz_c;
285+ long trials,trials_c;
286+ long dcache_hits;
287+ long direct_pkts;
288+};
289+
290+/* compute hash of size HTB_HSIZE for given handle */
291+static __inline__ int htb_hash(u32 h)
292+{
293+#if HTB_HSIZE != 16
294+ #error "Declare new hash for your HTB_HSIZE"
295+#endif
296+ h ^= h>>8; /* stolen from cbq_hash */
297+ h ^= h>>4;
298+ return h & 0xf;
299+}
300+
301+/* find class in global hash table using given handle */
302+static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
303+{
304+ struct htb_sched *q = (struct htb_sched *)sch->data;
305+ int h = htb_hash(handle);
306+ struct htb_class *cl;
307+ if (TC_H_MAJ(handle) != sch->handle) return NULL;
308+ cl = q->hash[h];
309+ if (cl) do {
310+ if (cl->classid == handle) return cl;
311+
312+ } while ((cl = cl->hlist.next) != q->hash[h]);
313+ return NULL;
314+}
315+
316+/* classify packet into class TODO: use inner filters & marks here */
317+static struct htb_class *htb_clasify(struct sk_buff *skb, struct Qdisc *sch)
318+{
319+ struct htb_sched *q = (struct htb_sched *)sch->data;
320+ struct htb_class *cl;
321+ struct tcf_result res;
322+ struct tcf_proto *tcf;
323+
324+ /* allow to select class by setting skb->priority to valid classid;
325+ note that nfmark can be used too by attaching filter fw with no
326+ rules in it */
327+ if (skb->priority == sch->handle)
328+ return NULL; /* X:0 (direct flow) selected */
329+ if ((cl = htb_find(skb->priority,sch)) != NULL)
330+ return cl;
331+
332+ tcf = q->filter_list;
333+ while (tcf && !tc_classify(skb, tcf, &res)) {
334+ if (res.classid == sch->handle)
335+ return NULL; /* X:0 (direct flow) selected */
336+ if ((cl = htb_find(res.classid,sch)) == NULL)
337+ break; /* filter selected invalid classid */
338+ if (!cl->level)
339+ return cl; /* we hit leaf; return it */
340+
341+ /* we have got inner class; apply inner filter chain */
342+ tcf = cl->filter_list;
343+ }
344+ /* classification failed; try to use default class */
345+ return htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
346+}
347+
348+/* inserts cl into appropriate active lists (for all levels) */
349+static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
350+{
351+ if (!cl->active.prev) {
352+ struct htb_class **ap = q->active[(int)(cl->aprio=cl->prio)];
353+ int i = !ap[0];
354+ HTB_INSERTB(active,ap[0],cl);
355+ if (i) /* set also all level pointers */
356+ for (i = 1; i < TC_HTB_MAXDEPTH; i++) ap[i] = ap[0];
357+ }
358+}
359+
360+/* remove cl from active lists; lev is level at which we dequeued
361+ so that we know that active[prio][lev] points to cl */
362+static __inline__ void
363+htb_deactivate(struct htb_sched *q,struct htb_class *cl,int lev)
364+{
365+ int i;
366+ struct htb_class **ap = q->active[(int)cl->aprio];
367+ HTB_DELETE(active,cl,ap[lev]);
368+ if (ap[lev]) {
369+ /* repair other level pointers if they've pointed
370+ to the deleted class */
371+ for (i = 0; i < TC_HTB_MAXDEPTH; i++)
372+ if (ap[i] == cl) ap[i] = ap[lev];
373+ } else
374+ memset(ap,0,sizeof(*ap)*TC_HTB_MAXDEPTH);
375+}
376+
377+static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
378+{
379+ struct htb_sched *q = (struct htb_sched *)sch->data;
380+ struct htb_class *cl = htb_clasify(skb,sch);
381+
382+ if (!cl || !cl->q) {
383+ /* bad class; enqueue to helper queue */
384+ if (q->direct_queue.qlen < q->direct_qlen) {
385+ __skb_queue_tail(&q->direct_queue, skb);
386+ q->direct_pkts++;
387+ } else {
388+ kfree_skb (skb);
389+ sch->stats.drops++;
390+ return NET_XMIT_DROP;
391+ }
392+ } else if (cl->q->enqueue(skb, cl->q) != NET_XMIT_SUCCESS) {
393+ sch->stats.drops++;
394+ cl->stats.drops++;
395+ return NET_XMIT_DROP;
396+ } else {
397+ cl->stats.packets++; cl->stats.bytes += skb->len;
398+ htb_activate (q,cl);
399+ }
400+
401+ sch->q.qlen++;
402+ sch->stats.packets++; sch->stats.bytes += skb->len;
403+ HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",cl?cl->classid:0,skb);
404+ return NET_XMIT_SUCCESS;
405+}
406+
407+static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
408+{
409+ struct htb_sched *q = (struct htb_sched *)sch->data;
410+ struct htb_class *cl = htb_clasify(skb,sch);
411+
412+ if (!cl || !cl->q) {
413+ /* bad class; enqueue to helper queue */
414+ if (q->direct_queue.qlen < q->direct_qlen) {
415+ __skb_queue_tail(&q->direct_queue, skb);
416+ q->direct_pkts++;
417+ } else {
418+ kfree_skb (skb);
419+ sch->stats.drops++;
420+ return NET_XMIT_DROP;
421+ }
422+ } else if (cl->q->ops->requeue(skb, cl->q) != NET_XMIT_SUCCESS) {
423+ sch->stats.drops++;
424+ cl->stats.drops++;
425+ return NET_XMIT_DROP;
426+ } else
427+ htb_activate (q,cl);
428+
429+ sch->q.qlen++;
430+ HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",cl?cl->classid:0,skb);
431+ return NET_XMIT_SUCCESS;
432+}
433+
434+static void htb_timer(unsigned long arg)
435+{
436+ struct Qdisc *sch = (struct Qdisc*)arg;
437+ sch->flags &= ~TCQ_F_THROTTLED;
438+ wmb();
439+ netif_schedule(sch->dev);
440+}
441+
442+#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
443+static void htb_rate_timer(unsigned long arg)
444+{
445+ struct Qdisc *sch = (struct Qdisc*)arg;
446+ struct htb_sched *q = (struct htb_sched *)sch->data;
447+ struct htb_class *cl;
448+
449+ /* lock queue so that we can muck with it */
450+ HTB_QLOCK(sch);
451+ HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies);
452+
453+ q->rttim.expires = jiffies + HZ;
454+ add_timer(&q->rttim);
455+
456+ /* scan and recompute one bucket at time */
457+ if (++q->recmp_bucket >= HTB_HSIZE) q->recmp_bucket = 0;
458+ if ((cl = q->hash[q->recmp_bucket]) != NULL) do {
459+ HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n",cl->classid,cl->sum_bytes,cl->sum_packets);
460+ RT_GEN (cl->sum_bytes,cl->rate_bytes);
461+ RT_GEN (cl->sum_packets,cl->rate_packets);
462+ } while ((cl = cl->hlist.next) != q->hash[q->recmp_bucket]);
463+
464+ /* global stats */
465+ RT_GEN (q->trials_c,q->trials);
466+ RT_GEN (q->utilz_c,q->utilz);
467+ RT_GEN (q->deq_rate_c,q->deq_rate);
468+
469+ HTB_QUNLOCK(sch);
470+}
471+
472+/* test whether class can send or borrow packet */
473+static enum htb_cmode
474+htb_class_mode(struct htb_sched *q, struct htb_class *cl)
475+{
476+ long toks,diff;
477+ diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
478+ HTB_DBG(6,3,"htb_cm diff=%ld\n",diff);
479+
480+ /* check whether we are over ceil */
481+ if ((toks = (cl->ctokens + diff)) < 0) {
482+ if (q->delay > -toks || !q->delay) q->delay = -toks;
483+ return HTB_CANT_SEND;
484+ }
485+
486+ /* our regular rate */
487+ if ((toks = (cl->tokens + diff)) >= 0)
488+ return HTB_CAN_SEND;
489+
490+ /* record time when we can transmit */
491+ if (q->delay > -toks || !q->delay) q->delay = -toks;
492+
493+ return HTB_MAY_BORROW;
494+}
495+
496+/* computes (possibly ancestor) class ready to send; cl is leaf;
497+ cl's rc_level is then filled with level we are borrowing at;
498+ it is set to TC_HTB_MAXDEPTH if we can't borrow at all and can be
499+ ORed with HTB_F_INJ if bw was injected. */
500+static void htb_ready_level(struct htb_sched *q,struct htb_class *cl)
501+{
502+ struct htb_class *stack[TC_HTB_MAXDEPTH],**sp = stack;
503+ int level = TC_HTB_MAXDEPTH, injdist = cl->injectd;
504+ enum htb_cmode mode;
505+ HTB_DBG(6,1,"htb_rl cl=%X tok=%ld ctok=%ld buf=%ld cbuf=%ld\n",cl->classid,cl->tokens,cl->ctokens,cl->buffer,cl->cbuffer);
506+
507+ /* traverse tree upward looking for ready class */
508+ for (;;) {
509+ *sp++ = cl; /* push at stack */
510+
511+ /* test mode */
512+ mode = htb_class_mode(q,cl);
513+ HTB_DBG(6,2,"htb_clmod cl=%X m=%d tok=%ld ctok=%ld buf=%ld cbuf=%ld\n",cl->classid,mode,cl->tokens,cl->ctokens,cl->buffer,cl->cbuffer);
514+ if (mode != HTB_MAY_BORROW) {
515+ if (mode == HTB_CAN_SEND) level = cl->level;
516+ break;
517+ }
518+ /* update injdist from current node */
519+ if (injdist > cl->injectd) injdist = cl->injectd;
520+
521+ /* if this is leaf's injector then resolve borrow positively */
522+ if (!injdist--) {
523+ /* don't cache this result in interior nodes */
524+ stack[0]->rc_level = cl->level|HTB_F_INJ;
525+ stack[0]->rcache_sn = q->sn;
526+ return;
527+ }
528+ if ((cl = cl->parent) == NULL) break;
529+ if (q->sn == cl->rcache_sn) {
530+ /* the node has already computed result; use it */
531+ level = cl->rc_level; break;
532+ }
533+ }
534+ while (--sp >= stack) { /* update mode cache */
535+ (*sp)->rcache_sn = q->sn;
536+ (*sp)->rc_level = level;
537+ }
538+}
539+
540+/* pull packet from class and charge to ancestors */
541+static struct sk_buff *
542+htb_dequeue_class(struct Qdisc *sch, struct htb_class *cl)
543+{
544+ struct htb_sched *q = (struct htb_sched *)sch->data;
545+ long toks,diff;
546+ int injecting = cl->rc_level & HTB_F_INJ, injdist = cl->injectd;
547+ int level = cl->rc_level & 0xff;
548+ struct sk_buff *skb = cl->q->dequeue(cl->q);
549+ HTB_DBG(5,1,"htb_deq_cl cl=%X skb=%p lev=%d inj=%d\n",cl->classid,skb,level,injecting);
550+ if (!skb) return NULL;
551+
552+ /* we have got skb, account it to victim and its parents
553+ and also to all ceil estimators under victim */
554+ while (cl) {
555+ diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
556+
557+#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
558+ if (toks > cl->B) toks = cl->B; \
559+ toks -= L2T(cl, cl->R, skb->len); \
560+ if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
561+ cl->T = toks
562+
563+ HTB_ACCNT (ctokens,cbuffer,ceil);
564+ if (cl->level >= level) {
565+ if (cl->level == level) cl->xstats.lends++;
566+ HTB_ACCNT (tokens,buffer,rate);
567+ } else {
568+ cl->xstats.borrows++;
569+ cl->tokens += diff; /* we moved t_c; update tokens */
570+ }
571+ cl->t_c = q->now;
572+ HTB_DBG(5,2,"htb_deq_clp cl=%X clev=%d diff=%ld\n",cl->classid,cl->level,diff);
573+
574+ /* update rate counters */
575+ cl->sum_bytes += skb->len; cl->sum_packets++;
576+
577+ /* update byte stats except for leaves which are already updated */
578+ if (cl->level) {
579+ cl->stats.bytes += skb->len;
580+ cl->stats.packets++;
581+ }
582+ /* finish if we hit stop-class and we are injecting */
583+ if (injecting) {
584+ if (injdist > cl->injectd) injdist = cl->injectd;
585+ if (!injdist--) {
586+ cl->xstats.injects++; break;
587+ }
588+ }
589+ cl = cl->parent;
590+ }
591+ return skb;
592+}
593+
594+/* dequeues packet at given priority borrowing from given level;
595+ if unsuccessfull then it returns level at which someone can
596+ dequeue. If it sets level to TC_HTB_MAXDEPTH then no one can. */
597+static struct sk_buff *htb_dequeue_prio(struct Qdisc *sch,int prio,int *level)
598+{
599+ struct sk_buff *skb = NULL;
600+ struct htb_sched *q = (struct htb_sched *)sch->data;
601+ struct htb_class **ap = q->active[prio], *cl = ap[*level];
602+ int done,top = TC_HTB_MAXDEPTH,rclev;
603+
604+ HTB_DBG(4,1,"htb_deq_pr pr=%d lev=%d cl=%X\n",prio,*level,cl->classid);
605+ /* this is DRR algorithm */
606+ do {
607+ done = 1;
608+ do {
609+ /* catch empty classes here; note that we don't remove them
610+ immediately after dequeue but rather delay remove to next
611+ DRR round because if packet arrive for just emptied class
612+ then we don't need to remove and again add it */
613+ if (!cl->q->q.qlen) {
614+ ap[*level] = cl; /* needed for HTB_DELETE in deactivate */
615+ htb_deactivate (q,cl,*level);
616+
617+ HTB_DBG(4,2,"htb_deq_deact cl=%X ncl=%X\n",cl->classid,ap[*level]?ap[*level]->classid:0);
618+ if (ap[*level]) continue;
619+ *level = TC_HTB_MAXDEPTH;
620+ return NULL; /* NO class remains active */
621+ }
622+ /* test whether class can send at all borrowing from level */
623+ if (cl->rcache_sn != q->sn) htb_ready_level(q,cl);
624+ rclev = cl->rc_level & 0xff; /* filter injecting flag out */
625+
626+ HTB_DBG(4,2,"htb_deq_rd cl=%X rc_lev=0x%x dfct=%d qnt=%d\n",
627+ cl->classid,cl->rc_level,cl->deficit[*level],cl->quantum);
628+
629+ if (rclev == TC_HTB_MAXDEPTH) {
630+ /* TODO: overlimit increment here is not proven correct */
631+ if (cl->deficit[*level] > 0) cl->stats.overlimits++;
632+ continue; /* can't send or borrow */
633+ }
634+ /* if we can't send at this level, remember where we can */
635+ if (rclev > *level) {
636+ if (rclev < top) /* keep lowest level */
637+ top = rclev;
638+
639+ HTB_DBG(4,2,"htb_deq_badlev top=%d\n",top);
640+ continue;
641+ }
642+ if (cl->deficit[*level] <= 0) {
643+ /* haven't allotment, increase and try again */
644+ done = 0; cl->deficit[*level] += cl->quantum;
645+ continue;
646+ }
647+ if ((skb = htb_dequeue_class(sch,cl)) == NULL) {
648+ /* nonempty class can't dequeue so that mark it as such;
649+ note that rcache_sn is already set and thus this remarking
650+ will be valid only for rest of this dequeue; this is
651+ possible if child class is non work conserving */
652+ cl->rc_level = TC_HTB_MAXDEPTH;
653+
654+ HTB_DBG(4,2,"htb_deq_noskb cl=%X len=%d\n",cl->classid,cl->q->q.qlen);
655+ continue;
656+ }
657+ sch->q.qlen--;
658+ /* prepare next class if we can't stay valid */
659+ if ((cl->deficit[*level] -= skb->len) <= 0) cl = cl->active.next;
660+ else if (q->use_dcache)
661+ q->last_tx = cl; /* cache cl if it still can transmit */
662+ ap[*level] = cl;
663+
664+ HTB_DBG(4,1,"htb_deq_haspkt ncl=%X sqlen=%d\n",cl->classid,sch->q.qlen);
665+ return skb;
666+
667+ } while ((cl = cl->active.next) != ap[*level]);
668+
669+ } while (!done);
670+ *level = top;
671+ HTB_DBG(4,1,"htb_deq_quit top=%d\n",top);
672+ return NULL;
673+}
674+
675+static struct sk_buff *htb_dequeue(struct Qdisc *sch)
676+{
677+ struct sk_buff *skb = NULL;
678+ struct htb_sched *q = (struct htb_sched *)sch->data;
679+ int prio,oklev,okprio = 0 /* avoid unused warning */,lev,i;
680+ struct htb_class *cl;
681+ psched_time_t endt;
682+
683+ HTB_DBG(3,1,"htb_deq dircnt=%d ltx=%X\n",skb_queue_len(&q->direct_queue),
684+ q->last_tx?q->last_tx->classid:0);
685+
686+ /* try to dequeue direct packets as high prio (!) to minimize cpu work */
687+ if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
688+ sch->flags &= ~TCQ_F_THROTTLED;
689+ sch->q.qlen--;
690+ return skb;
691+ }
692+
693+ PSCHED_GET_TIME(q->now); /* htb_dequeue_class needs it too */
694+ q->delay = 0; q->sn++;
695+
696+ /* well here I bite CBQ's speed :-) if last dequeued class is
697+ still active and is not deficit then we can dequeue it again */
698+ if ((cl = q->last_tx) != NULL && cl->q->q.qlen > 0 &&
699+ cl->deficit[cl->rc_level & 0xff] > 0 &&
700+ (skb = htb_dequeue_class(sch,cl)) != NULL) {
701+ sch->q.qlen--;
702+ cl->deficit[cl->rc_level & 0xff] -= skb->len;
703+ sch->flags &= ~TCQ_F_THROTTLED;
704+ q->dcache_hits++;
705+ HTB_DBG(3,1,"htb_deq_hit skb=%p\n",skb);
706+ return skb;
707+ }
708+ q->last_tx = NULL; /* can't use cache ? invalidate */
709+
710+ for (i = 0; i < TC_HTB_MAXDEPTH; i++) {
711+ /* first try: dequeue leaves (level 0) */
712+ oklev = TC_HTB_MAXDEPTH;
713+ q->trials_c++;
714+ for (prio = 0; prio < TC_HTB_NUMPRIO; prio++) {
715+ if (!q->active[prio][0]) continue;
716+ lev = 0; skb = htb_dequeue_prio(sch, prio, &lev);
717+ HTB_DBG(3,2,"htb_deq_1 i=%d p=%d skb=%p blev=%d\n",i,prio,skb,lev);
718+ if (skb) {
719+ sch->flags &= ~TCQ_F_THROTTLED;
720+ goto fin;
721+ }
722+ if (lev < oklev) {
723+ oklev = lev; okprio = prio;
724+ }
725+ }
726+ if (oklev >= TC_HTB_MAXDEPTH) break;
727+ /* second try: use ok level we learned in first try;
728+ it really should succeed */
729+ q->trials_c++;
730+ skb = htb_dequeue_prio(sch, okprio, &oklev);
731+ HTB_DBG(3,2,"htb_deq_2 p=%d lev=%d skb=%p\n",okprio,oklev,skb);
732+ if (skb) {
733+ sch->flags &= ~TCQ_F_THROTTLED;
734+ goto fin;
735+ }
736+ /* probably qdisc at oklev can't transmit - it is not good
737+ idea to have TBF as HTB's child ! retry with that node
738+ disabled */
739+ }
740+ if (i >= TC_HTB_MAXDEPTH)
741+ printk(KERN_ERR "htb: too many dequeue trials\n");
742+
743+ /* no-one gave us packet, setup timer if someone wants it */
744+ if (sch->q.qlen && !netif_queue_stopped(sch->dev) && q->delay) {
745+ long delay = PSCHED_US2JIFFIE(q->delay);
746+ if (delay == 0) delay = 1;
747+ if (delay > 5*HZ) {
748+ if (net_ratelimit())
749+ printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
750+ delay = 5*HZ;
751+ }
752+ del_timer(&q->timer);
753+ q->timer.expires = jiffies + delay;
754+ add_timer(&q->timer);
755+ sch->flags |= TCQ_F_THROTTLED;
756+ sch->stats.overlimits++;
757+ HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
758+ }
759+fin:
760+ do {
761+ static unsigned util = 0; unsigned d;
762+ PSCHED_GET_TIME(endt); q->deq_rate_c++;
763+ d = PSCHED_TDIFF(endt,q->now);
764+ q->utilz_c += d; util += d;
765+#if 0
766+ /* special debug hack */
767+ if (skb) {
768+ memcpy (skb->data+28,_dbg,sizeof(_dbg));
769+ memset (_dbg,0,sizeof(_dbg));
770+ }
771+#endif
772+ } while (0);
773+ HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p (28/11)\n",sch->dev->name,jiffies,skb);
774+ return skb;
775+}
776+
777+/* try to drop from each class (by prio) until one succeed */
778+static int htb_drop(struct Qdisc* sch)
779+{
780+ struct htb_sched *q = (struct htb_sched *)sch->data;
781+ int prio;
782+
783+ for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
784+ struct htb_class *cl = q->active[prio][0];
785+ if (cl) do {
786+ if (cl->q->ops->drop && cl->q->ops->drop(cl->q)) {
787+ sch->q.qlen--;
788+ return 1;
789+ }
790+ } while ((cl = cl->active.next) != q->active[prio][0]);
791+ }
792+ return 0;
793+}
794+
795+/* reset all classes */
796+/* always caled under BH & queue lock */
797+static void htb_reset(struct Qdisc* sch)
798+{
799+ struct htb_sched *q = (struct htb_sched *)sch->data;
800+ int i;
801+ HTB_DBG(0,1,"htb_reset sch=%X, handle=%X\n",(int)sch,sch->handle);
802+
803+ for (i = 0; i < HTB_HSIZE; i++) {
804+ struct htb_class *cl = q->hash[i];
805+ if (cl) do {
806+ if (cl->q) qdisc_reset(cl->q);
807+
808+ } while ((cl = cl->hlist.next) != q->hash[i]);
809+ }
810+ sch->flags &= ~TCQ_F_THROTTLED;
811+ del_timer(&q->timer);
812+ __skb_queue_purge(&q->direct_queue);
813+ sch->q.qlen = 0; q->last_tx = NULL;
814+}
815+
816+static int htb_init(struct Qdisc *sch, struct rtattr *opt)
817+{
818+ struct htb_sched *q = (struct htb_sched*)sch->data;
819+ struct rtattr *tb[TCA_HTB_INIT];
820+ struct tc_htb_glob *gopt;
821+
822+ if (!opt ||
823+ rtattr_parse(tb, TCA_HTB_INIT, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
824+ tb[TCA_HTB_INIT-1] == NULL ||
825+ RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt))
826+ return -EINVAL;
827+
828+ gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
829+ memset(q,0,sizeof(*q));
830+ q->debug = gopt->debug;
831+ HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum);
832+ init_timer(&q->timer);
833+ init_timer(&q->rttim);
834+ skb_queue_head_init(&q->direct_queue);
835+ q->direct_qlen = sch->dev->tx_queue_len;
836+ q->timer.function = htb_timer;
837+ q->timer.data = (unsigned long)sch;
838+ q->rttim.function = htb_rate_timer;
839+ q->rttim.data = (unsigned long)sch;
840+ q->rttim.expires = jiffies + HZ;
841+ add_timer(&q->rttim);
842+ if ((q->rate2quantum = gopt->rate2quantum) < 1)
843+ q->rate2quantum = 1;
844+ q->defcls = gopt->defcls;
845+ q->use_dcache = gopt->use_dcache;
846+
847+ MOD_INC_USE_COUNT;
848+ return 0;
849+}
850+
851+#ifdef CONFIG_RTNETLINK
852+static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
853+{
854+ struct htb_sched *q = (struct htb_sched*)sch->data;
855+ unsigned char *b = skb->tail;
856+ struct rtattr *rta;
857+ struct tc_htb_glob gopt;
858+ HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
859+ /* stats */
860+ HTB_QLOCK(sch);
861+ gopt.deq_rate = q->deq_rate/HTB_EWMAC;
862+ gopt.utilz = q->utilz/HTB_EWMAC;
863+ gopt.trials = q->trials/HTB_EWMAC;
864+ gopt.dcache_hits = q->dcache_hits;
865+ gopt.direct_pkts = q->direct_pkts;
866+
867+ gopt.use_dcache = q->use_dcache;
868+ gopt.rate2quantum = q->rate2quantum;
869+ gopt.defcls = q->defcls;
870+ gopt.debug = q->debug;
871+ rta = (struct rtattr*)b;
872+ RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
873+ RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
874+ rta->rta_len = skb->tail - b;
875+ sch->stats.qlen = sch->q.qlen;
876+ RTA_PUT(skb, TCA_STATS, sizeof(sch->stats), &sch->stats);
877+ HTB_QUNLOCK(sch);
878+ return skb->len;
879+rtattr_failure:
880+ HTB_QUNLOCK(sch);
881+ skb_trim(skb, skb->tail - skb->data);
882+ return -1;
883+}
884+
885+static int
886+htb_dump_class(struct Qdisc *sch, unsigned long arg,
887+ struct sk_buff *skb, struct tcmsg *tcm)
888+{
889+ struct htb_sched *q = (struct htb_sched*)sch->data;
890+ struct htb_class *cl = (struct htb_class*)arg;
891+ unsigned char *b = skb->tail;
892+ struct rtattr *rta;
893+ struct tc_htb_opt opt;
894+
895+ HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid);
896+
897+ HTB_QLOCK(sch);
898+ tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
899+ tcm->tcm_handle = cl->classid;
900+ if (cl->q) {
901+ tcm->tcm_info = cl->q->handle;
902+ cl->stats.qlen = cl->q->q.qlen;
903+ }
904+
905+ rta = (struct rtattr*)b;
906+ RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
907+
908+ memset (&opt,0,sizeof(opt));
909+
910+ opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
911+ opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
912+ opt.quantum = cl->quantum; opt.prio = cl->prio;
913+ opt.level = cl->level; opt.injectd = cl->injectd;
914+ RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
915+ rta->rta_len = skb->tail - b;
916+
917+ cl->stats.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
918+ cl->stats.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
919+
920+ cl->xstats.tokens = cl->tokens;
921+ cl->xstats.ctokens = cl->ctokens;
922+ RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
923+ RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
924+ HTB_QUNLOCK(sch);
925+ return skb->len;
926+rtattr_failure:
927+ HTB_QUNLOCK(sch);
928+ skb_trim(skb, b - skb->data);
929+ return -1;
930+}
931+#endif
932+
933+static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
934+ struct Qdisc **old)
935+{
936+ struct htb_class *cl = (struct htb_class*)arg;
937+
938+ if (cl && !cl->level) {
939+ if (new == NULL) {
940+ if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)) == NULL)
941+ return -ENOBUFS;
942+ }
943+ if ((*old = xchg(&cl->q, new)) != NULL) /* xchg is atomical :-) */
944+ qdisc_reset(*old);
945+ return 0;
946+ }
947+ return -ENOENT;
948+}
949+
950+static struct Qdisc *
951+htb_leaf(struct Qdisc *sch, unsigned long arg)
952+{
953+ struct htb_class *cl = (struct htb_class*)arg;
954+ return cl ? cl->q : NULL;
955+}
956+
957+static unsigned long htb_get(struct Qdisc *sch, u32 classid)
958+{
959+ struct htb_sched *q = (struct htb_sched *)sch->data;
960+ struct htb_class *cl = htb_find(classid,sch);
961+ HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
962+ if (cl) cl->refcnt++;
963+ return (unsigned long)cl;
964+}
965+
966+static void htb_destroy_filters(struct tcf_proto **fl)
967+{
968+ struct tcf_proto *tp;
969+
970+ while ((tp = *fl) != NULL) {
971+ *fl = tp->next;
972+ tp->ops->destroy(tp);
973+ }
974+}
975+
976+static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
977+{
978+ struct htb_sched *q = (struct htb_sched *)sch->data;
979+ HTB_DBG(0,1,"htb_destrycls clid=%X q=%p ref=%d\n", cl?cl->classid:0,cl->q,cl?cl->refcnt:0);
980+ if (cl->q) qdisc_destroy(cl->q);
981+ qdisc_put_rtab(cl->rate);
982+ qdisc_put_rtab(cl->ceil);
983+#ifdef CONFIG_NET_ESTIMATOR
984+ qdisc_kill_estimator(&cl->stats);
985+#endif
986+ htb_destroy_filters (&cl->filter_list);
987+ /* remove children */
988+ while (cl->children) htb_destroy_class (sch,cl->children);
989+
990+ /* remove class from all lists it is on */
991+ q->last_tx = NULL;
992+ if (cl->hlist.prev)
993+ HTB_DELETE(hlist,cl,q->hash[htb_hash(cl->classid)]);
994+ if (cl->active.prev)
995+ htb_deactivate (q,cl,0);
996+ if (cl->parent)
997+ HTB_DELETE(sibling,cl,cl->parent->children);
998+ else
999+ HTB_DELETE(sibling,cl,q->root);
1000+
1001+ kfree(cl);
1002+}
1003+
1004+/* always caled under BH & queue lock */
1005+static void htb_destroy(struct Qdisc* sch)
1006+{
1007+ struct htb_sched *q = (struct htb_sched *)sch->data;
1008+ HTB_DBG(0,1,"htb_destroy q=%p\n",q);
1009+
1010+ del_timer_sync (&q->timer);
1011+ del_timer_sync (&q->rttim);
1012+ while (q->root) htb_destroy_class(sch,q->root);
1013+ htb_destroy_filters(&q->filter_list);
1014+ __skb_queue_purge(&q->direct_queue);
1015+ MOD_DEC_USE_COUNT;
1016+}
1017+
1018+static void htb_put(struct Qdisc *sch, unsigned long arg)
1019+{
1020+ struct htb_sched *q = (struct htb_sched *)sch->data;
1021+ struct htb_class *cl = (struct htb_class*)arg;
1022+ HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1023+
1024+ if (--cl->refcnt == 0)
1025+ htb_destroy_class(sch,cl);
1026+}
1027+
1028+static int
1029+htb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca,
1030+ unsigned long *arg)
1031+{
1032+ int err = -EINVAL,h;
1033+ struct htb_sched *q = (struct htb_sched *)sch->data;
1034+ struct htb_class *cl = (struct htb_class*)*arg,*parent;
1035+ struct rtattr *opt = tca[TCA_OPTIONS-1];
1036+ struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1037+ struct rtattr *tb[TCA_HTB_RTAB];
1038+ struct tc_htb_opt *hopt;
1039+
1040+ if (parentid == TC_H_ROOT) parent = NULL;
1041+ else parent = htb_find (parentid,sch);
1042+
1043+ /* extract all subattrs from opt attr */
1044+ if (!opt ||
1045+ rtattr_parse(tb, TCA_HTB_RTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
1046+ tb[TCA_HTB_PARMS-1] == NULL ||
1047+ RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
1048+ goto failure;
1049+
1050+ hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
1051+ HTB_DBG(0,1,"htb_chg cl=%p, clid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
1052+ rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
1053+ ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
1054+ if (!rtab || !ctab) goto failure;
1055+
1056+ if (!cl) { /* new class */
1057+ /* check maximal depth */
1058+ if (parent && parent->parent && parent->parent->level < 2) {
1059+ printk(KERN_ERR "htb: tree is too deep\n");
1060+ goto failure;
1061+ }
1062+ err = -ENOBUFS;
1063+ cl = kmalloc(sizeof(*cl), GFP_KERNEL);
1064+ if (cl == NULL) goto failure;
1065+ memset(cl, 0, sizeof(*cl));
1066+ cl->refcnt = 1; cl->level = 0; /* assume leaf */
1067+
1068+ if (parent && !parent->level) {
1069+ /* turn parent into inner node */
1070+ qdisc_destroy (parent->q); parent->q = &noop_qdisc;
1071+ parent->level = (parent->parent ?
1072+ parent->parent->level : TC_HTB_MAXDEPTH) - 1;
1073+ }
1074+ /* leaf (we) needs elementary qdisc */
1075+ if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
1076+ cl->q = &noop_qdisc;
1077+
1078+ cl->classid = classid; cl->parent = parent;
1079+ cl->tokens = hopt->buffer;
1080+ cl->ctokens = hopt->cbuffer;
1081+ cl->mbuffer = 60000000; /* 1min */
1082+ PSCHED_GET_TIME(cl->t_c);
1083+
1084+ /* attach to the hash list and parent's family */
1085+ sch_tree_lock(sch);
1086+ h = htb_hash(classid);
1087+ if (!cl->hlist.prev)
1088+ HTB_INSERTB(hlist,q->hash[h],cl);
1089+ if (parent)
1090+ HTB_INSERTB(sibling,parent->children,cl);
1091+ else HTB_INSERTB(sibling,q->root,cl);
1092+
1093+ } else sch_tree_lock(sch);
1094+
1095+ q->last_tx = NULL;
1096+ cl->quantum = rtab->rate.rate / q->rate2quantum;
1097+ cl->injectd = hopt->injectd;
1098+ if (cl->quantum < 100) cl->quantum = 100;
1099+ if (cl->quantum > 60000) cl->quantum = 60000;
1100+ if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1101+ cl->prio = TC_HTB_NUMPRIO - 1;
1102+
1103+ cl->buffer = hopt->buffer;
1104+ cl->cbuffer = hopt->cbuffer;
1105+ if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
1106+ if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
1107+ sch_tree_unlock(sch);
1108+
1109+ *arg = (unsigned long)cl;
1110+ return 0;
1111+
1112+failure:
1113+ if (rtab) qdisc_put_rtab(rtab);
1114+ if (ctab) qdisc_put_rtab(ctab);
1115+ return err;
1116+}
1117+
1118+static int htb_delete(struct Qdisc *sch, unsigned long arg)
1119+{
1120+ struct htb_sched *q = (struct htb_sched *)sch->data;
1121+ struct htb_class *cl = (struct htb_class*)arg;
1122+ HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1123+
1124+ if (cl->children || cl->filter_cnt) return -EBUSY;
1125+ sch_tree_lock(sch);
1126+ /* delete from hash and active; remainder in destroy_class */
1127+ if (cl->hlist.prev)
1128+ HTB_DELETE(hlist,cl,q->hash[htb_hash(cl->classid)]);
1129+ if (cl->active.prev)
1130+ htb_deactivate (q,cl,0);
1131+ q->last_tx = NULL;
1132+
1133+ if (--cl->refcnt == 0)
1134+ htb_destroy_class(sch,cl);
1135+
1136+ sch_tree_unlock(sch);
1137+ return 0;
1138+}
1139+
1140+static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1141+{
1142+ struct htb_sched *q = (struct htb_sched *)sch->data;
1143+ struct htb_class *cl = (struct htb_class *)arg;
1144+ struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1145+ HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
1146+ return fl;
1147+}
1148+
1149+static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1150+ u32 classid)
1151+{
1152+ struct htb_sched *q = (struct htb_sched *)sch->data;
1153+ struct htb_class *cl = htb_find (classid,sch);
1154+ HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
1155+ if (cl && !cl->level) return 0;
1156+ if (cl) cl->filter_cnt++; else q->filter_cnt++;
1157+ return (unsigned long)cl;
1158+}
1159+
1160+static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1161+{
1162+ struct htb_sched *q = (struct htb_sched *)sch->data;
1163+ struct htb_class *cl = (struct htb_class *)arg;
1164+ HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
1165+ if (cl) cl->filter_cnt--; else q->filter_cnt--;
1166+}
1167+
1168+static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1169+{
1170+ struct htb_sched *q = (struct htb_sched *)sch->data;
1171+ int i;
1172+
1173+ if (arg->stop)
1174+ return;
1175+
1176+ for (i = 0; i < HTB_HSIZE; i++) {
1177+ struct htb_class *cl = q->hash[i];
1178+ if (cl) do {
1179+ if (arg->count < arg->skip) {
1180+ arg->count++;
1181+ continue;
1182+ }
1183+ if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1184+ arg->stop = 1;
1185+ return;
1186+ }
1187+ arg->count++;
1188+
1189+ } while ((cl = cl->hlist.next) != q->hash[i]);
1190+ }
1191+}
1192+
1193+static struct Qdisc_class_ops htb_class_ops =
1194+{
1195+ htb_graft,
1196+ htb_leaf,
1197+ htb_get,
1198+ htb_put,
1199+ htb_change_class,
1200+ htb_delete,
1201+ htb_walk,
1202+
1203+ htb_find_tcf,
1204+ htb_bind_filter,
1205+ htb_unbind_filter,
1206+
1207+#ifdef CONFIG_RTNETLINK
1208+ htb_dump_class,
1209+#endif
1210+};
1211+
1212+struct Qdisc_ops htb_qdisc_ops =
1213+{
1214+ NULL,
1215+ &htb_class_ops,
1216+ "htb",
1217+ sizeof(struct htb_sched),
1218+
1219+ htb_enqueue,
1220+ htb_dequeue,
1221+ htb_requeue,
1222+ htb_drop,
1223+
1224+ htb_init,
1225+ htb_reset,
1226+ htb_destroy,
1227+ NULL /* htb_change */,
1228+
1229+#ifdef CONFIG_RTNETLINK
1230+ htb_dump,
1231+#endif
1232+};
1233+
1234+#ifdef MODULE
1235+int init_module(void)
1236+{
1237+ return register_qdisc(&htb_qdisc_ops);
1238+}
1239+
1240+void cleanup_module(void)
1241+{
1242+ unregister_qdisc(&htb_qdisc_ops);
1243+}
1244+MODULE_LICENSE("GPL");
1245+#endif
1246--- linux-2.2.17orig/include/linux/pkt_sched.h Tue Apr 28 20:10:10 1998
1247+++ linux-2.2/include/linux/pkt_sched.h Wed Dec 19 19:26:07 2001
1248@@ -190,6 +190,55 @@ struct tc_red_qopt
1249 unsigned char Scell_log; /* cell size for idle damping */
1250 };
1251
1252+/* HTB section */
1253+#define TC_HTB_NUMPRIO 4
1254+#define TC_HTB_MAXDEPTH 4
1255+
1256+struct tc_htb_opt
1257+{
1258+ struct tc_ratespec rate;
1259+ struct tc_ratespec ceil;
1260+ __u32 buffer;
1261+ __u32 cbuffer;
1262+ __u32 quantum; /* out only */
1263+ __u32 level; /* out only */
1264+ __u8 prio;
1265+ __u8 injectd; /* inject class distance */
1266+ __u8 pad[2];
1267+};
1268+struct tc_htb_glob
1269+{
1270+ __u32 rate2quantum; /* bps->quantum divisor */
1271+ __u32 defcls; /* default class number */
1272+ __u32 use_dcache; /* use dequeue cache ? */
1273+ __u32 debug; /* debug flags */
1274+
1275+
1276+ /* stats */
1277+ __u32 deq_rate; /* dequeue rate */
1278+ __u32 utilz; /* dequeue utilization */
1279+ __u32 trials; /* deq_prio trials per dequeue */
1280+ __u32 dcache_hits;
1281+ __u32 direct_pkts; /* count of non shapped packets */
1282+};
1283+enum
1284+{
1285+ TCA_HTB_UNSPEC,
1286+ TCA_HTB_PARMS,
1287+ TCA_HTB_INIT,
1288+ TCA_HTB_CTAB,
1289+ TCA_HTB_RTAB,
1290+};
1291+struct tc_htb_xstats
1292+{
1293+ __u32 lends;
1294+ __u32 borrows;
1295+ __u32 giants; /* too big packets (rate will not be accurate) */
1296+ __u32 injects; /* how many times leaf used injected bw */
1297+ __u32 tokens;
1298+ __u32 ctokens;
1299+};
1300+
1301 /* CBQ section */
1302
1303 #define TC_CBQ_MAXPRIO 8
1304--- linux-2.2.17orig/net/sched/sch_api.c Sun Mar 21 16:22:00 1999
1305+++ linux-2.2/net/sched/sch_api.c Fri Nov 9 11:45:19 2001
1306@@ -1120,6 +1120,9 @@ __initfunc(int pktsched_init(void))
1307 #ifdef CONFIG_NET_SCH_CBQ
1308 INIT_QDISC(cbq);
1309 #endif
1310+#ifdef CONFIG_NET_SCH_HTB
1311+ INIT_QDISC(htb);
1312+#endif
1313 #ifdef CONFIG_NET_SCH_CSZ
1314 INIT_QDISC(csz);
1315 #endif
This page took 0.184958 seconds and 4 git commands to generate.