1 --- linux-2.4orig/net/sched/Config.in Sun Jan 13 15:10:25 2002
2 +++ linux-2.4/net/sched/Config.in Sun Jan 13 14:54:48 2002
4 # Traffic control configuration.
6 tristate ' CBQ packet scheduler' CONFIG_NET_SCH_CBQ
7 +tristate ' HTB packet scheduler' CONFIG_NET_SCH_HTB
8 tristate ' CSZ packet scheduler' CONFIG_NET_SCH_CSZ
9 #tristate ' H-PFQ packet scheduler' CONFIG_NET_SCH_HPFQ
10 #tristate ' H-FSC packet scheduler' CONFIG_NET_SCH_HFCS
11 --- linux-2.4orig/net/sched/Makefile Mon Oct 15 16:26:22 2001
12 +++ linux-2.4/net/sched/Makefile Wed Nov 14 13:54:44 2001
13 @@ -16,6 +16,7 @@ obj-$(CONFIG_NET_SCH_CBQ) += sch_cbq.o
14 obj-$(CONFIG_NET_SCH_CSZ) += sch_csz.o
15 obj-$(CONFIG_NET_SCH_HPFQ) += sch_hpfq.o
16 obj-$(CONFIG_NET_SCH_HFSC) += sch_hfsc.o
17 +obj-$(CONFIG_NET_SCH_HTB) += sch_htb.o
18 obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
19 obj-$(CONFIG_NET_SCH_RED) += sch_red.o
20 obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
21 --- linux-2.4orig/net/sched/sch_htb.c Sun Oct 21 22:11:45 2001
22 +++ linux-2.4/net/sched/sch_htb.c Sun Jan 13 15:07:13 2002
25 + * net/sched/sch_htb.c Hierarchical token bucket
27 + * This program is free software; you can redistribute it and/or
28 + * modify it under the terms of the GNU General Public License
29 + * as published by the Free Software Foundation; either version
30 + * 2 of the License, or (at your option) any later version.
32 + * Authors: Martin Devera, <devik@cdi.cz>
34 + * Credits (in time order):
35 + * Ondrej Kraus, <krauso@barr.cz>
36 + * found missing INIT_QDISC(htb)
37 + * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
38 + * helped a lot to locate nasty class stall bug
39 + * Andi Kleen, Jamal Hadi, Bert Hubert
40 + * code review and helpful comments on shaping
41 + * and many others. thanks.
46 +#include <linux/config.h>
47 +#include <linux/module.h>
48 +#include <asm/uaccess.h>
49 +#include <asm/system.h>
50 +#include <asm/bitops.h>
51 +#include <linux/types.h>
52 +#include <linux/kernel.h>
53 +#include <linux/version.h>
54 +#include <linux/sched.h>
55 +#include <linux/string.h>
56 +#include <linux/mm.h>
57 +#include <linux/socket.h>
58 +#include <linux/sockios.h>
59 +#include <linux/in.h>
60 +#include <linux/errno.h>
61 +#include <linux/interrupt.h>
62 +#include <linux/if_ether.h>
63 +#include <linux/inet.h>
64 +#include <linux/netdevice.h>
65 +#include <linux/etherdevice.h>
66 +#include <linux/notifier.h>
68 +#include <net/route.h>
69 +#include <linux/skbuff.h>
70 +#include <net/sock.h>
71 +#include <net/pkt_sched.h>
74 + Author: devik@cdi.cz
75 + =======================================
76 + HTB is like TBF with multiple classes. It is also similar to CBQ because
77 + it allows to assign priority to each class in hierarchy.
78 + In fact it is another implementation os Floyd's formal sharing.
81 + Each class is assigned level. Leaf has ALWAYS level 0 and root
82 + classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
83 + one less than their parent.
86 +#define HTB_HSIZE 16 /* classid hash size */
87 +#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
88 +#define HTB_DEBUG 1 /* compile debugging support (activated by tc tool) */
89 +#define HTB_QLOCK(S) spin_lock_bh(&(S)->dev->queue_lock)
90 +#define HTB_QUNLOCK(S) spin_unlock_bh(&(S)->dev->queue_lock)
92 +/* ======== Begin of part to be deleted for 2.4 merged one ========= */
93 +#if LINUX_VERSION_CODE < 0x20300
94 +#define MODULE_LICENSE(X)
96 +#define NET_XMIT_SUCCESS 1
97 +#define NET_XMIT_DROP 0
99 +static inline void __skb_queue_purge(struct sk_buff_head *list)
101 + struct sk_buff *skb;
102 + while ((skb=__skb_dequeue(list))!=NULL)
105 +#define del_timer_sync(t) del_timer(t)
107 +#define netif_schedule qdisc_wakeup
108 +#define netif_queue_stopped(D) (D->tbusy)
109 +#define sch_tree_lock(S) start_bh_atomic()
110 +#define sch_tree_unlock(S) end_bh_atomic()
113 +#define HTB_QLOCK(S)
114 +#define HTB_QUNLOCK(S)
116 +#define BUG_TRAP(x) if (!(x)) { printk("Assertion (" #x ") failed at " __FILE__ "(%d):" __FUNCTION__ "\n", __LINE__); }
119 +#if LINUX_VERSION_CODE < 0x20411 && !defined(CONFIG_RTNETLINK)
120 +#error "CONFIG_RTNETLINK must be defined"
122 +/* ======== End of part to be deleted for 2.4 merged one =========== */
124 +/* debugging support; S is subsystem, these are defined:
125 + 0 - netlink messages
129 + 4 - dequeue one prio DRR part
130 + 5 - dequeue class accounting
131 + 6 - dequeue rcache (ready level computation)
132 + 10 - rate estimator
134 + 12 - fast dequeue cache
136 + L is level; 0 = none, 1 = basic info, 2 = detailed, 3 = full
137 + q->debug uint32 contains 16 2-bit fields one for subsystem starting
141 +#define HTB_DBG(S,L,FMT,ARG...) if (((q->debug>>(2*S))&3) >= L) \
142 + printk(KERN_DEBUG FMT,##ARG)
144 +#define HTB_DBG(S,L,FMT,ARG...)
148 +/* used internaly to pass status of single class */
150 + HTB_CANT_SEND, /* class can't send and can't borrow */
151 + HTB_MAY_BORROW, /* class can't send but may borrow */
152 + HTB_CAN_SEND /* class can send */
154 +#define HTB_F_INJ 0x10000 /* to mark dequeue level as injected one */
156 +/* often used circular list of classes; I didn't use generic linux
157 + double linked list to avoid casts and before I rely on some behaviour
158 + of insert and delete functions; item not bound to list is guaranted
159 + to have prev member NULL (we don't mangle next pointer as we often
162 + struct htb_class *prev, *next;
164 +/* circular list insert and delete macros; these also maintain
165 + correct value of pointer to the list; insert adds 'new' class
166 + before 'cl' class using prev/next member 'list' */
167 +#define HTB_INSERTB(list,cl,new) \
168 +do { if (!cl) new->list.prev = cl = new; \
169 + new->list.next = cl; new->list.prev = cl->list.prev; \
170 + cl->list.prev->list.next = cl->list.prev = new; } while(0)
172 +/* remove 'cl' class from 'list' repairing 'ptr' if not null */
173 +#define HTB_DELETE(list,cl,ptr) do { \
174 + if (cl->list.prev) { cl->list.prev->list.next = cl->list.next; \
175 + cl->list.next->list.prev = cl->list.prev; \
176 + if (ptr == cl) ptr = cl->list.next; \
177 + if (ptr == cl) ptr = NULL; cl->list.prev = NULL; } \
178 + else printk(KERN_ERR "htb: DELETE BUG [" #list "," #cl "," #ptr "]\n"); \
181 +/* interior & leaf nodes; props specific to leaves are marked L: */
184 + /* general class parameters */
186 + struct tc_stats stats; /* generic stats */
187 + struct tc_htb_xstats xstats;/* our special stats */
188 + int refcnt; /* usage count of this class */
189 + struct Qdisc *q; /* L: elem. qdisc */
191 + /* rate measurement counters */
192 + unsigned long rate_bytes,sum_bytes;
193 + unsigned long rate_packets,sum_packets;
195 + /* DRR scheduler parameters */
196 + int quantum; /* L: round quantum computed from rate */
197 + int deficit[TC_HTB_MAXDEPTH]; /* L: deficit for class at level */
198 + char prio; /* L: priority of the class; 0 is the highest */
199 + char aprio; /* L: prio at which we were last adding to active list
200 + it is used to change priority at runtime */
202 + char level; /* our level (see above) */
203 + char injectd; /* distance from injected parent */
204 + struct htb_class *parent; /* parent class */
205 + struct htb_class *children; /* pointer to children list */
206 + struct htb_litem hlist; /* classid hash list */
207 + struct htb_litem active; /* L: prio level active DRR list */
208 + struct htb_litem sibling; /* sibling list */
210 + /* class attached filters */
211 + struct tcf_proto *filter_list;
214 + /* token bucket parameters */
215 + struct qdisc_rate_table *rate; /* rate table of the class itself */
216 + struct qdisc_rate_table *ceil; /* ceiling rate (limits borrows too) */
217 + long buffer,cbuffer; /* token bucket depth/rate */
218 + long mbuffer; /* max wait time */
219 + long tokens,ctokens; /* current number of tokens */
220 + psched_time_t t_c; /* checkpoint time */
222 + /* walk result cache for leaves */
223 + unsigned long rcache_sn; /* SN of cache validity */
224 + unsigned rc_level; /* victim's level */
227 +/* TODO: maybe compute rate when size is too large .. or drop ? */
228 +static __inline__ long L2T(struct htb_class *cl,struct qdisc_rate_table *rate,
231 + int slot = size >> rate->rate.cell_log;
233 + cl->xstats.giants++;
236 + return rate->data[slot];
241 + struct htb_class *root; /* root classes circular list */
242 + struct htb_class *hash[HTB_HSIZE]; /* hashed by classid */
244 + /* active classes table; this needs explanation. This table contains
245 + one set of pointers per priority, it is obvious. The set contains
246 + one pointer per class level in the same way as cl->deficit is
247 + independent for each level. This allows us to maintain correct
248 + DRR position independent of borrowing level.
249 + If we used single active/deficit items then DRR fairness'd suffer
250 + from frequent class level changes.
251 + Note that htb_[de]activate must be used to update this item
252 + because it needs to keep all pointers in set coherent. */
253 + struct htb_class *active[TC_HTB_NUMPRIO][TC_HTB_MAXDEPTH];
255 + int defcls; /* class where unclassified flows go to */
256 + u32 debug; /* subsystem debug levels */
258 + /* filters for qdisc itself */
259 + struct tcf_proto *filter_list;
262 + unsigned long sn; /* result cache serial number */
263 + int rate2quantum; /* quant = rate / rate2quantum */
264 + psched_time_t now; /* cached dequeue time */
265 + long delay; /* how long to deactivate for */
266 + struct timer_list timer; /* send delay timer */
267 + struct timer_list rttim; /* rate computer timer */
268 + int recmp_bucket; /* which hash bucket to recompute next */
270 + /* cache of last dequeued class */
271 + struct htb_class *last_tx;
274 + /* non shapped skbs; let them go directly thru */
275 + struct sk_buff_head direct_queue;
276 + int direct_qlen; /* max qlen of above */
278 + /* statistics (see tc_htb_glob for explanation) */
279 + long deq_rate,deq_rate_c;
280 + long utilz,utilz_c;
281 + long trials,trials_c;
286 +/* compute hash of size HTB_HSIZE for given handle */
287 +static __inline__ int htb_hash(u32 h)
290 + #error "Declare new hash for your HTB_HSIZE"
292 + h ^= h>>8; /* stolen from cbq_hash */
297 +/* find class in global hash table using given handle */
298 +static __inline__ struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
300 + struct htb_sched *q = (struct htb_sched *)sch->data;
301 + int h = htb_hash(handle);
302 + struct htb_class *cl;
303 + if (TC_H_MAJ(handle) != sch->handle) return NULL;
306 + if (cl->classid == handle) return cl;
308 + } while ((cl = cl->hlist.next) != q->hash[h]);
312 +/* classify packet into class TODO: use inner filters & marks here */
313 +static struct htb_class *htb_clasify(struct sk_buff *skb, struct Qdisc *sch)
315 + struct htb_sched *q = (struct htb_sched *)sch->data;
316 + struct htb_class *cl;
317 + struct tcf_result res;
318 + struct tcf_proto *tcf;
320 + /* allow to select class by setting skb->priority to valid classid;
321 + note that nfmark can be used too by attaching filter fw with no
323 + if (skb->priority == sch->handle)
324 + return NULL; /* X:0 (direct flow) selected */
325 + if ((cl = htb_find(skb->priority,sch)) != NULL)
328 + tcf = q->filter_list;
329 + while (tcf && !tc_classify(skb, tcf, &res)) {
330 + if (res.classid == sch->handle)
331 + return NULL; /* X:0 (direct flow) selected */
332 + if ((cl = htb_find(res.classid,sch)) == NULL)
333 + break; /* filter selected invalid classid */
335 + return cl; /* we hit leaf; return it */
337 + /* we have got inner class; apply inner filter chain */
338 + tcf = cl->filter_list;
340 + /* classification failed; try to use default class */
341 + return htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle),q->defcls),sch);
344 +/* inserts cl into appropriate active lists (for all levels) */
345 +static __inline__ void htb_activate(struct htb_sched *q,struct htb_class *cl)
347 + if (!cl->active.prev) {
348 + struct htb_class **ap = q->active[(int)(cl->aprio=cl->prio)];
350 + HTB_INSERTB(active,ap[0],cl);
351 + if (i) /* set also all level pointers */
352 + for (i = 1; i < TC_HTB_MAXDEPTH; i++) ap[i] = ap[0];
356 +/* remove cl from active lists; lev is level at which we dequeued
357 + so that we know that active[prio][lev] points to cl */
358 +static __inline__ void
359 +htb_deactivate(struct htb_sched *q,struct htb_class *cl,int lev)
362 + struct htb_class **ap = q->active[(int)cl->aprio];
363 + HTB_DELETE(active,cl,ap[lev]);
365 + /* repair other level pointers if they've pointed
366 + to the deleted class */
367 + for (i = 0; i < TC_HTB_MAXDEPTH; i++)
368 + if (ap[i] == cl) ap[i] = ap[lev];
370 + memset(ap,0,sizeof(*ap)*TC_HTB_MAXDEPTH);
373 +static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
375 + struct htb_sched *q = (struct htb_sched *)sch->data;
376 + struct htb_class *cl = htb_clasify(skb,sch);
378 + if (!cl || !cl->q) {
379 + /* bad class; enqueue to helper queue */
380 + if (q->direct_queue.qlen < q->direct_qlen) {
381 + __skb_queue_tail(&q->direct_queue, skb);
385 + sch->stats.drops++;
386 + return NET_XMIT_DROP;
388 + } else if (cl->q->enqueue(skb, cl->q) != NET_XMIT_SUCCESS) {
389 + sch->stats.drops++;
391 + return NET_XMIT_DROP;
393 + cl->stats.packets++; cl->stats.bytes += skb->len;
394 + htb_activate (q,cl);
398 + sch->stats.packets++; sch->stats.bytes += skb->len;
399 + HTB_DBG(1,1,"htb_enq_ok cl=%X skb=%p\n",cl?cl->classid:0,skb);
400 + return NET_XMIT_SUCCESS;
403 +static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
405 + struct htb_sched *q = (struct htb_sched *)sch->data;
406 + struct htb_class *cl = htb_clasify(skb,sch);
408 + if (!cl || !cl->q) {
409 + /* bad class; enqueue to helper queue */
410 + if (q->direct_queue.qlen < q->direct_qlen) {
411 + __skb_queue_tail(&q->direct_queue, skb);
415 + sch->stats.drops++;
416 + return NET_XMIT_DROP;
418 + } else if (cl->q->ops->requeue(skb, cl->q) != NET_XMIT_SUCCESS) {
419 + sch->stats.drops++;
421 + return NET_XMIT_DROP;
423 + htb_activate (q,cl);
426 + HTB_DBG(1,1,"htb_req_ok cl=%X skb=%p\n",cl?cl->classid:0,skb);
427 + return NET_XMIT_SUCCESS;
430 +static void htb_timer(unsigned long arg)
432 + struct Qdisc *sch = (struct Qdisc*)arg;
433 + sch->flags &= ~TCQ_F_THROTTLED;
435 + netif_schedule(sch->dev);
438 +#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
439 +static void htb_rate_timer(unsigned long arg)
441 + struct Qdisc *sch = (struct Qdisc*)arg;
442 + struct htb_sched *q = (struct htb_sched *)sch->data;
443 + struct htb_class *cl;
445 + /* lock queue so that we can muck with it */
447 + HTB_DBG(10,1,"htb_rttmr j=%ld\n",jiffies);
449 + q->rttim.expires = jiffies + HZ;
450 + add_timer(&q->rttim);
452 + /* scan and recompute one bucket at time */
453 + if (++q->recmp_bucket >= HTB_HSIZE) q->recmp_bucket = 0;
454 + if ((cl = q->hash[q->recmp_bucket]) != NULL) do {
455 + HTB_DBG(10,2,"htb_rttmr_cl cl=%X sbyte=%lu spkt=%lu\n",cl->classid,cl->sum_bytes,cl->sum_packets);
456 + RT_GEN (cl->sum_bytes,cl->rate_bytes);
457 + RT_GEN (cl->sum_packets,cl->rate_packets);
458 + } while ((cl = cl->hlist.next) != q->hash[q->recmp_bucket]);
461 + RT_GEN (q->trials_c,q->trials);
462 + RT_GEN (q->utilz_c,q->utilz);
463 + RT_GEN (q->deq_rate_c,q->deq_rate);
468 +/* test whether class can send or borrow packet */
469 +static enum htb_cmode
470 +htb_class_mode(struct htb_sched *q, struct htb_class *cl)
473 + diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
474 + HTB_DBG(6,3,"htb_cm diff=%ld\n",diff);
476 + /* check whether we are over ceil */
477 + if ((toks = (cl->ctokens + diff)) < 0) {
478 + if (q->delay > -toks || !q->delay) q->delay = -toks;
479 + return HTB_CANT_SEND;
482 + /* our regular rate */
483 + if ((toks = (cl->tokens + diff)) >= 0)
484 + return HTB_CAN_SEND;
486 + /* record time when we can transmit */
487 + if (q->delay > -toks || !q->delay) q->delay = -toks;
489 + return HTB_MAY_BORROW;
492 +/* computes (possibly ancestor) class ready to send; cl is leaf;
493 + cl's rc_level is then filled with level we are borrowing at;
494 + it is set to TC_HTB_MAXDEPTH if we can't borrow at all and can be
495 + ORed with HTB_F_INJ if bw was injected. */
496 +static void htb_ready_level(struct htb_sched *q,struct htb_class *cl)
498 + struct htb_class *stack[TC_HTB_MAXDEPTH],**sp = stack;
499 + int level = TC_HTB_MAXDEPTH, injdist = cl->injectd;
500 + enum htb_cmode mode;
501 + HTB_DBG(6,1,"htb_rl cl=%X tok=%ld ctok=%ld buf=%ld cbuf=%ld\n",cl->classid,cl->tokens,cl->ctokens,cl->buffer,cl->cbuffer);
503 + /* traverse tree upward looking for ready class */
505 + *sp++ = cl; /* push at stack */
508 + mode = htb_class_mode(q,cl);
509 + HTB_DBG(6,2,"htb_clmod cl=%X m=%d tok=%ld ctok=%ld buf=%ld cbuf=%ld\n",cl->classid,mode,cl->tokens,cl->ctokens,cl->buffer,cl->cbuffer);
510 + if (mode != HTB_MAY_BORROW) {
511 + if (mode == HTB_CAN_SEND) level = cl->level;
514 + /* update injdist from current node */
515 + if (injdist > cl->injectd) injdist = cl->injectd;
517 + /* if this is leaf's injector then resolve borrow positively */
519 + /* don't cache this result in interior nodes */
520 + stack[0]->rc_level = cl->level|HTB_F_INJ;
521 + stack[0]->rcache_sn = q->sn;
524 + if ((cl = cl->parent) == NULL) break;
525 + if (q->sn == cl->rcache_sn) {
526 + /* the node has already computed result; use it */
527 + level = cl->rc_level; break;
530 + while (--sp >= stack) { /* update mode cache */
531 + (*sp)->rcache_sn = q->sn;
532 + (*sp)->rc_level = level;
536 +/* pull packet from class and charge to ancestors */
537 +static struct sk_buff *
538 +htb_dequeue_class(struct Qdisc *sch, struct htb_class *cl)
540 + struct htb_sched *q = (struct htb_sched *)sch->data;
542 + int injecting = cl->rc_level & HTB_F_INJ, injdist = cl->injectd;
543 + int level = cl->rc_level & 0xff;
544 + struct sk_buff *skb = cl->q->dequeue(cl->q);
545 + HTB_DBG(5,1,"htb_deq_cl cl=%X skb=%p lev=%d inj=%d\n",cl->classid,skb,level,injecting);
546 + if (!skb) return NULL;
548 + /* we have got skb, account it to victim and its parents
549 + and also to all ceil estimators under victim */
551 + diff = PSCHED_TDIFF_SAFE(q->now, cl->t_c, (u32)cl->mbuffer, 0);
553 +#define HTB_ACCNT(T,B,R) toks = diff + cl->T; \
554 + if (toks > cl->B) toks = cl->B; \
555 + toks -= L2T(cl, cl->R, skb->len); \
556 + if (toks <= -cl->mbuffer) toks = 1-cl->mbuffer; \
559 + HTB_ACCNT (ctokens,cbuffer,ceil);
560 + if (cl->level >= level) {
561 + if (cl->level == level) cl->xstats.lends++;
562 + HTB_ACCNT (tokens,buffer,rate);
564 + cl->xstats.borrows++;
565 + cl->tokens += diff; /* we moved t_c; update tokens */
568 + HTB_DBG(5,2,"htb_deq_clp cl=%X clev=%d diff=%ld\n",cl->classid,cl->level,diff);
570 + /* update rate counters */
571 + cl->sum_bytes += skb->len; cl->sum_packets++;
573 + /* update byte stats except for leaves which are already updated */
575 + cl->stats.bytes += skb->len;
576 + cl->stats.packets++;
578 + /* finish if we hit stop-class and we are injecting */
580 + if (injdist > cl->injectd) injdist = cl->injectd;
582 + cl->xstats.injects++; break;
590 +/* dequeues packet at given priority borrowing from given level;
591 + if unsuccessfull then it returns level at which someone can
592 + dequeue. If it sets level to TC_HTB_MAXDEPTH then no one can. */
593 +static struct sk_buff *htb_dequeue_prio(struct Qdisc *sch,int prio,int *level)
595 + struct sk_buff *skb = NULL;
596 + struct htb_sched *q = (struct htb_sched *)sch->data;
597 + struct htb_class **ap = q->active[prio], *cl = ap[*level];
598 + int done,top = TC_HTB_MAXDEPTH,rclev;
600 + HTB_DBG(4,1,"htb_deq_pr pr=%d lev=%d cl=%X\n",prio,*level,cl->classid);
601 + /* this is DRR algorithm */
605 + /* catch empty classes here; note that we don't remove them
606 + immediately after dequeue but rather delay remove to next
607 + DRR round because if packet arrive for just emptied class
608 + then we don't need to remove and again add it */
609 + if (!cl->q->q.qlen) {
610 + ap[*level] = cl; /* needed for HTB_DELETE in deactivate */
611 + htb_deactivate (q,cl,*level);
613 + HTB_DBG(4,2,"htb_deq_deact cl=%X ncl=%X\n",cl->classid,ap[*level]?ap[*level]->classid:0);
614 + if (ap[*level]) continue;
615 + *level = TC_HTB_MAXDEPTH;
616 + return NULL; /* NO class remains active */
618 + /* test whether class can send at all borrowing from level */
619 + if (cl->rcache_sn != q->sn) htb_ready_level(q,cl);
620 + rclev = cl->rc_level & 0xff; /* filter injecting flag out */
622 + HTB_DBG(4,2,"htb_deq_rd cl=%X rc_lev=0x%x dfct=%d qnt=%d\n",
623 + cl->classid,cl->rc_level,cl->deficit[*level],cl->quantum);
625 + if (rclev == TC_HTB_MAXDEPTH) {
626 + /* TODO: overlimit increment here is not proven correct */
627 + if (cl->deficit[*level] > 0) cl->stats.overlimits++;
628 + continue; /* can't send or borrow */
630 + /* if we can't send at this level, remember where we can */
631 + if (rclev > *level) {
632 + if (rclev < top) /* keep lowest level */
635 + HTB_DBG(4,2,"htb_deq_badlev top=%d\n",top);
638 + if (cl->deficit[*level] <= 0) {
639 + /* haven't allotment, increase and try again */
640 + done = 0; cl->deficit[*level] += cl->quantum;
643 + if ((skb = htb_dequeue_class(sch,cl)) == NULL) {
644 + /* nonempty class can't dequeue so that mark it as such;
645 + note that rcache_sn is already set and thus this remarking
646 + will be valid only for rest of this dequeue; this is
647 + possible if child class is non work conserving */
648 + cl->rc_level = TC_HTB_MAXDEPTH;
650 + HTB_DBG(4,2,"htb_deq_noskb cl=%X len=%d\n",cl->classid,cl->q->q.qlen);
654 + /* prepare next class if we can't stay valid */
655 + if ((cl->deficit[*level] -= skb->len) <= 0) cl = cl->active.next;
656 + else if (q->use_dcache)
657 + q->last_tx = cl; /* cache cl if it still can transmit */
660 + HTB_DBG(4,1,"htb_deq_haspkt ncl=%X sqlen=%d\n",cl->classid,sch->q.qlen);
663 + } while ((cl = cl->active.next) != ap[*level]);
667 + HTB_DBG(4,1,"htb_deq_quit top=%d\n",top);
671 +static struct sk_buff *htb_dequeue(struct Qdisc *sch)
673 + struct sk_buff *skb = NULL;
674 + struct htb_sched *q = (struct htb_sched *)sch->data;
675 + int prio,oklev,okprio = 0 /* avoid unused warning */,lev,i;
676 + struct htb_class *cl;
677 + psched_time_t endt;
679 + HTB_DBG(3,1,"htb_deq dircnt=%d ltx=%X\n",skb_queue_len(&q->direct_queue),
680 + q->last_tx?q->last_tx->classid:0);
682 + /* try to dequeue direct packets as high prio (!) to minimize cpu work */
683 + if ((skb = __skb_dequeue(&q->direct_queue)) != NULL) {
684 + sch->flags &= ~TCQ_F_THROTTLED;
689 + PSCHED_GET_TIME(q->now); /* htb_dequeue_class needs it too */
690 + q->delay = 0; q->sn++;
692 + /* well here I bite CBQ's speed :-) if last dequeued class is
693 + still active and is not deficit then we can dequeue it again */
694 + if ((cl = q->last_tx) != NULL && cl->q->q.qlen > 0 &&
695 + cl->deficit[cl->rc_level & 0xff] > 0 &&
696 + (skb = htb_dequeue_class(sch,cl)) != NULL) {
698 + cl->deficit[cl->rc_level & 0xff] -= skb->len;
699 + sch->flags &= ~TCQ_F_THROTTLED;
701 + HTB_DBG(3,1,"htb_deq_hit skb=%p\n",skb);
704 + q->last_tx = NULL; /* can't use cache ? invalidate */
706 + for (i = 0; i < TC_HTB_MAXDEPTH; i++) {
707 + /* first try: dequeue leaves (level 0) */
708 + oklev = TC_HTB_MAXDEPTH;
710 + for (prio = 0; prio < TC_HTB_NUMPRIO; prio++) {
711 + if (!q->active[prio][0]) continue;
712 + lev = 0; skb = htb_dequeue_prio(sch, prio, &lev);
713 + HTB_DBG(3,2,"htb_deq_1 i=%d p=%d skb=%p blev=%d\n",i,prio,skb,lev);
715 + sch->flags &= ~TCQ_F_THROTTLED;
719 + oklev = lev; okprio = prio;
722 + if (oklev >= TC_HTB_MAXDEPTH) break;
723 + /* second try: use ok level we learned in first try;
724 + it really should succeed */
726 + skb = htb_dequeue_prio(sch, okprio, &oklev);
727 + HTB_DBG(3,2,"htb_deq_2 p=%d lev=%d skb=%p\n",okprio,oklev,skb);
729 + sch->flags &= ~TCQ_F_THROTTLED;
732 + /* probably qdisc at oklev can't transmit - it is not good
733 + idea to have TBF as HTB's child ! retry with that node
736 + if (i >= TC_HTB_MAXDEPTH)
737 + printk(KERN_ERR "htb: too many dequeue trials\n");
739 + /* no-one gave us packet, setup timer if someone wants it */
740 + if (sch->q.qlen && !netif_queue_stopped(sch->dev) && q->delay) {
741 + long delay = PSCHED_US2JIFFIE(q->delay);
742 + if (delay == 0) delay = 1;
743 + if (delay > 5*HZ) {
744 + if (net_ratelimit())
745 + printk(KERN_INFO "HTB delay %ld > 5sec\n", delay);
748 + del_timer(&q->timer);
749 + q->timer.expires = jiffies + delay;
750 + add_timer(&q->timer);
751 + sch->flags |= TCQ_F_THROTTLED;
752 + sch->stats.overlimits++;
753 + HTB_DBG(3,1,"htb_deq t_delay=%ld\n",delay);
757 + static unsigned util = 0; unsigned d;
758 + PSCHED_GET_TIME(endt); q->deq_rate_c++;
759 + d = PSCHED_TDIFF(endt,q->now);
760 + q->utilz_c += d; util += d;
762 + /* special debug hack */
764 + memcpy (skb->data+28,_dbg,sizeof(_dbg));
765 + memset (_dbg,0,sizeof(_dbg));
769 + HTB_DBG(3,1,"htb_deq_end %s j=%lu skb=%p (28/11)\n",sch->dev->name,jiffies,skb);
773 +/* try to drop from each class (by prio) until one succeed */
774 +static int htb_drop(struct Qdisc* sch)
776 + struct htb_sched *q = (struct htb_sched *)sch->data;
779 + for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
780 + struct htb_class *cl = q->active[prio][0];
782 + if (cl->q->ops->drop && cl->q->ops->drop(cl->q)) {
786 + } while ((cl = cl->active.next) != q->active[prio][0]);
791 +/* reset all classes */
792 +/* always caled under BH & queue lock */
793 +static void htb_reset(struct Qdisc* sch)
795 + struct htb_sched *q = (struct htb_sched *)sch->data;
797 + HTB_DBG(0,1,"htb_reset sch=%X, handle=%X\n",(int)sch,sch->handle);
799 + for (i = 0; i < HTB_HSIZE; i++) {
800 + struct htb_class *cl = q->hash[i];
802 + if (cl->q) qdisc_reset(cl->q);
804 + } while ((cl = cl->hlist.next) != q->hash[i]);
806 + sch->flags &= ~TCQ_F_THROTTLED;
807 + del_timer(&q->timer);
808 + __skb_queue_purge(&q->direct_queue);
809 + sch->q.qlen = 0; q->last_tx = NULL;
812 +static int htb_init(struct Qdisc *sch, struct rtattr *opt)
814 + struct htb_sched *q = (struct htb_sched*)sch->data;
815 + struct rtattr *tb[TCA_HTB_INIT];
816 + struct tc_htb_glob *gopt;
819 + rtattr_parse(tb, TCA_HTB_INIT, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
820 + tb[TCA_HTB_INIT-1] == NULL ||
821 + RTA_PAYLOAD(tb[TCA_HTB_INIT-1]) < sizeof(*gopt))
824 + gopt = RTA_DATA(tb[TCA_HTB_INIT-1]);
825 + memset(q,0,sizeof(*q));
826 + q->debug = gopt->debug;
827 + HTB_DBG(0,1,"htb_init sch=%p handle=%X r2q=%d\n",sch,sch->handle,gopt->rate2quantum);
828 + init_timer(&q->timer);
829 + init_timer(&q->rttim);
830 + skb_queue_head_init(&q->direct_queue);
831 + q->direct_qlen = sch->dev->tx_queue_len;
832 + q->timer.function = htb_timer;
833 + q->timer.data = (unsigned long)sch;
834 + q->rttim.function = htb_rate_timer;
835 + q->rttim.data = (unsigned long)sch;
836 + q->rttim.expires = jiffies + HZ;
837 + add_timer(&q->rttim);
838 + if ((q->rate2quantum = gopt->rate2quantum) < 1)
839 + q->rate2quantum = 1;
840 + q->defcls = gopt->defcls;
841 + q->use_dcache = gopt->use_dcache;
847 +static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
849 + struct htb_sched *q = (struct htb_sched*)sch->data;
850 + unsigned char *b = skb->tail;
851 + struct rtattr *rta;
852 + struct tc_htb_glob gopt;
853 + HTB_DBG(0,1,"htb_dump sch=%p, handle=%X\n",sch,sch->handle);
856 + gopt.deq_rate = q->deq_rate/HTB_EWMAC;
857 + gopt.utilz = q->utilz/HTB_EWMAC;
858 + gopt.trials = q->trials/HTB_EWMAC;
859 + gopt.dcache_hits = q->dcache_hits;
860 + gopt.direct_pkts = q->direct_pkts;
862 + gopt.use_dcache = q->use_dcache;
863 + gopt.rate2quantum = q->rate2quantum;
864 + gopt.defcls = q->defcls;
865 + gopt.debug = q->debug;
866 + rta = (struct rtattr*)b;
867 + RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
868 + RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt);
869 + rta->rta_len = skb->tail - b;
870 + sch->stats.qlen = sch->q.qlen;
871 + RTA_PUT(skb, TCA_STATS, sizeof(sch->stats), &sch->stats);
876 + skb_trim(skb, skb->tail - skb->data);
881 +htb_dump_class(struct Qdisc *sch, unsigned long arg,
882 + struct sk_buff *skb, struct tcmsg *tcm)
884 + struct htb_sched *q = (struct htb_sched*)sch->data;
885 + struct htb_class *cl = (struct htb_class*)arg;
886 + unsigned char *b = skb->tail;
887 + struct rtattr *rta;
888 + struct tc_htb_opt opt;
890 + HTB_DBG(0,1,"htb_dump_class handle=%X clid=%X\n",sch->handle,cl->classid);
893 + tcm->tcm_parent = cl->parent ? cl->parent->classid : TC_H_ROOT;
894 + tcm->tcm_handle = cl->classid;
896 + tcm->tcm_info = cl->q->handle;
897 + cl->stats.qlen = cl->q->q.qlen;
900 + rta = (struct rtattr*)b;
901 + RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
903 + memset (&opt,0,sizeof(opt));
905 + opt.rate = cl->rate->rate; opt.buffer = cl->buffer;
906 + opt.ceil = cl->ceil->rate; opt.cbuffer = cl->cbuffer;
907 + opt.quantum = cl->quantum; opt.prio = cl->prio;
908 + opt.level = cl->level; opt.injectd = cl->injectd;
909 + RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt);
910 + rta->rta_len = skb->tail - b;
912 + cl->stats.bps = cl->rate_bytes/(HTB_EWMAC*HTB_HSIZE);
913 + cl->stats.pps = cl->rate_packets/(HTB_EWMAC*HTB_HSIZE);
915 + cl->xstats.tokens = cl->tokens;
916 + cl->xstats.ctokens = cl->ctokens;
917 + RTA_PUT(skb, TCA_STATS, sizeof(cl->stats), &cl->stats);
918 + RTA_PUT(skb, TCA_XSTATS, sizeof(cl->xstats), &cl->xstats);
923 + skb_trim(skb, b - skb->data);
927 +static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
928 + struct Qdisc **old)
930 + struct htb_class *cl = (struct htb_class*)arg;
932 + if (cl && !cl->level) {
934 + if ((new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)) == NULL)
937 + if ((*old = xchg(&cl->q, new)) != NULL) /* xchg is atomical :-) */
944 +static struct Qdisc *
945 +htb_leaf(struct Qdisc *sch, unsigned long arg)
947 + struct htb_class *cl = (struct htb_class*)arg;
948 + return cl ? cl->q : NULL;
951 +static unsigned long htb_get(struct Qdisc *sch, u32 classid)
953 + struct htb_sched *q = (struct htb_sched *)sch->data;
954 + struct htb_class *cl = htb_find(classid,sch);
955 + HTB_DBG(0,1,"htb_get clid=%X q=%p cl=%p ref=%d\n",classid,q,cl,cl?cl->refcnt:0);
956 + if (cl) cl->refcnt++;
957 + return (unsigned long)cl;
960 +static void htb_destroy_filters(struct tcf_proto **fl)
962 + struct tcf_proto *tp;
964 + while ((tp = *fl) != NULL) {
966 + tp->ops->destroy(tp);
970 +static void htb_destroy_class(struct Qdisc* sch,struct htb_class *cl)
972 + struct htb_sched *q = (struct htb_sched *)sch->data;
973 + HTB_DBG(0,1,"htb_destrycls clid=%X q=%p ref=%d\n", cl?cl->classid:0,cl->q,cl?cl->refcnt:0);
974 + if (cl->q) qdisc_destroy(cl->q);
975 + qdisc_put_rtab(cl->rate);
976 + qdisc_put_rtab(cl->ceil);
977 +#ifdef CONFIG_NET_ESTIMATOR
978 + qdisc_kill_estimator(&cl->stats);
980 + htb_destroy_filters (&cl->filter_list);
981 + /* remove children */
982 + while (cl->children) htb_destroy_class (sch,cl->children);
984 + /* remove class from all lists it is on */
986 + if (cl->hlist.prev)
987 + HTB_DELETE(hlist,cl,q->hash[htb_hash(cl->classid)]);
988 + if (cl->active.prev)
989 + htb_deactivate (q,cl,0);
991 + HTB_DELETE(sibling,cl,cl->parent->children);
993 + HTB_DELETE(sibling,cl,q->root);
998 +/* always caled under BH & queue lock */
999 +static void htb_destroy(struct Qdisc* sch)
1001 + struct htb_sched *q = (struct htb_sched *)sch->data;
1002 + HTB_DBG(0,1,"htb_destroy q=%p\n",q);
1004 + del_timer_sync (&q->timer);
1005 + del_timer_sync (&q->rttim);
1006 + while (q->root) htb_destroy_class(sch,q->root);
1007 + htb_destroy_filters(&q->filter_list);
1008 + __skb_queue_purge(&q->direct_queue);
1009 + MOD_DEC_USE_COUNT;
1012 +static void htb_put(struct Qdisc *sch, unsigned long arg)
1014 + struct htb_sched *q = (struct htb_sched *)sch->data;
1015 + struct htb_class *cl = (struct htb_class*)arg;
1016 + HTB_DBG(0,1,"htb_put q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1018 + if (--cl->refcnt == 0)
1019 + htb_destroy_class(sch,cl);
1023 +htb_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **tca,
1024 + unsigned long *arg)
1026 + int err = -EINVAL,h;
1027 + struct htb_sched *q = (struct htb_sched *)sch->data;
1028 + struct htb_class *cl = (struct htb_class*)*arg,*parent;
1029 + struct rtattr *opt = tca[TCA_OPTIONS-1];
1030 + struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
1031 + struct rtattr *tb[TCA_HTB_RTAB];
1032 + struct tc_htb_opt *hopt;
1034 + if (parentid == TC_H_ROOT) parent = NULL;
1035 + else parent = htb_find (parentid,sch);
1037 + /* extract all subattrs from opt attr */
1039 + rtattr_parse(tb, TCA_HTB_RTAB, RTA_DATA(opt), RTA_PAYLOAD(opt)) ||
1040 + tb[TCA_HTB_PARMS-1] == NULL ||
1041 + RTA_PAYLOAD(tb[TCA_HTB_PARMS-1]) < sizeof(*hopt))
1044 + hopt = RTA_DATA(tb[TCA_HTB_PARMS-1]);
1045 + HTB_DBG(0,1,"htb_chg cl=%p, clid=%X, opt/prio=%d, rate=%u, buff=%d, quant=%d\n", cl,cl?cl->classid:0,(int)hopt->prio,hopt->rate.rate,hopt->buffer,hopt->quantum);
1046 + rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB-1]);
1047 + ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB-1]);
1048 + if (!rtab || !ctab) goto failure;
1050 + if (!cl) { /* new class */
1051 + /* check maximal depth */
1052 + if (parent && parent->parent && parent->parent->level < 2) {
1053 + printk(KERN_ERR "htb: tree is too deep\n");
1057 + cl = kmalloc(sizeof(*cl), GFP_KERNEL);
1058 + if (cl == NULL) goto failure;
1059 + memset(cl, 0, sizeof(*cl));
1060 + cl->refcnt = 1; cl->level = 0; /* assume leaf */
1062 + if (parent && !parent->level) {
1063 + /* turn parent into inner node */
1064 + qdisc_destroy (parent->q); parent->q = &noop_qdisc;
1065 + parent->level = (parent->parent ?
1066 + parent->parent->level : TC_HTB_MAXDEPTH) - 1;
1068 + /* leaf (we) needs elementary qdisc */
1069 + if (!(cl->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
1070 + cl->q = &noop_qdisc;
1072 + cl->classid = classid; cl->parent = parent;
1073 + cl->tokens = hopt->buffer;
1074 + cl->ctokens = hopt->cbuffer;
1075 + cl->mbuffer = 60000000; /* 1min */
1076 + PSCHED_GET_TIME(cl->t_c);
1078 + /* attach to the hash list and parent's family */
1079 + sch_tree_lock(sch);
1080 + h = htb_hash(classid);
1081 + if (!cl->hlist.prev)
1082 + HTB_INSERTB(hlist,q->hash[h],cl);
1084 + HTB_INSERTB(sibling,parent->children,cl);
1085 + else HTB_INSERTB(sibling,q->root,cl);
1087 + } else sch_tree_lock(sch);
1089 + q->last_tx = NULL;
1090 + cl->quantum = rtab->rate.rate / q->rate2quantum;
1091 + cl->injectd = hopt->injectd;
1092 + if (cl->quantum < 100) cl->quantum = 100;
1093 + if (cl->quantum > 60000) cl->quantum = 60000;
1094 + if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1095 + cl->prio = TC_HTB_NUMPRIO - 1;
1097 + cl->buffer = hopt->buffer;
1098 + cl->cbuffer = hopt->cbuffer;
1099 + if (cl->rate) qdisc_put_rtab(cl->rate); cl->rate = rtab;
1100 + if (cl->ceil) qdisc_put_rtab(cl->ceil); cl->ceil = ctab;
1101 + sch_tree_unlock(sch);
1103 + *arg = (unsigned long)cl;
1107 + if (rtab) qdisc_put_rtab(rtab);
1108 + if (ctab) qdisc_put_rtab(ctab);
1112 +static int htb_delete(struct Qdisc *sch, unsigned long arg)
1114 + struct htb_sched *q = (struct htb_sched *)sch->data;
1115 + struct htb_class *cl = (struct htb_class*)arg;
1116 + HTB_DBG(0,1,"htb_delete q=%p cl=%X ref=%d\n",q,cl?cl->classid:0,cl?cl->refcnt:0);
1118 + if (cl->children || cl->filter_cnt) return -EBUSY;
1119 + sch_tree_lock(sch);
1120 + /* delete from hash and active; remainder in destroy_class */
1121 + if (cl->hlist.prev)
1122 + HTB_DELETE(hlist,cl,q->hash[htb_hash(cl->classid)]);
1123 + if (cl->active.prev)
1124 + htb_deactivate (q,cl,0);
1125 + q->last_tx = NULL;
1127 + if (--cl->refcnt == 0)
1128 + htb_destroy_class(sch,cl);
1130 + sch_tree_unlock(sch);
1134 +static struct tcf_proto **htb_find_tcf(struct Qdisc *sch, unsigned long arg)
1136 + struct htb_sched *q = (struct htb_sched *)sch->data;
1137 + struct htb_class *cl = (struct htb_class *)arg;
1138 + struct tcf_proto **fl = cl ? &cl->filter_list : &q->filter_list;
1139 + HTB_DBG(0,2,"htb_tcf q=%p clid=%X fref=%d fl=%p\n",q,cl?cl->classid:0,cl?cl->filter_cnt:q->filter_cnt,*fl);
1143 +static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1146 + struct htb_sched *q = (struct htb_sched *)sch->data;
1147 + struct htb_class *cl = htb_find (classid,sch);
1148 + HTB_DBG(0,2,"htb_bind q=%p clid=%X cl=%p fref=%d\n",q,classid,cl,cl?cl->filter_cnt:q->filter_cnt);
1149 + /*if (cl && !cl->level) return 0;
1150 + The line above used to be there to prevent attachind filters to leaves. But
1151 + at least tc_index filter uses this just to get class for other reasons so
1152 + that we have to allow for it.
1154 + if (cl) cl->filter_cnt++; else q->filter_cnt++;
1155 + return (unsigned long)cl;
1158 +static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1160 + struct htb_sched *q = (struct htb_sched *)sch->data;
1161 + struct htb_class *cl = (struct htb_class *)arg;
1162 + HTB_DBG(0,2,"htb_unbind q=%p cl=%p fref=%d\n",q,cl,cl?cl->filter_cnt:q->filter_cnt);
1163 + if (cl) cl->filter_cnt--; else q->filter_cnt--;
1166 +static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1168 + struct htb_sched *q = (struct htb_sched *)sch->data;
1174 + for (i = 0; i < HTB_HSIZE; i++) {
1175 + struct htb_class *cl = q->hash[i];
1177 + if (arg->count < arg->skip) {
1181 + if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1187 + } while ((cl = cl->hlist.next) != q->hash[i]);
1191 +static struct Qdisc_class_ops htb_class_ops =
1203 + htb_unbind_filter,
1208 +struct Qdisc_ops htb_qdisc_ops =
1213 + sizeof(struct htb_sched),
1223 + NULL /* htb_change */,
1229 +int init_module(void)
1231 + return register_qdisc(&htb_qdisc_ops);
1234 +void cleanup_module(void)
1236 + unregister_qdisc(&htb_qdisc_ops);
1238 +MODULE_LICENSE("GPL");
1240 --- linux-2.4orig/include/linux/pkt_sched.h Mon Feb 28 03:45:10 2000
1241 +++ linux-2.4/include/linux/pkt_sched.h Fri Dec 7 18:06:02 2001
1242 @@ -248,6 +248,55 @@ struct tc_gred_sopt
1247 +#define TC_HTB_NUMPRIO 4
1248 +#define TC_HTB_MAXDEPTH 4
1252 + struct tc_ratespec rate;
1253 + struct tc_ratespec ceil;
1256 + __u32 quantum; /* out only */
1257 + __u32 level; /* out only */
1259 + __u8 injectd; /* inject class distance */
1264 + __u32 rate2quantum; /* bps->quantum divisor */
1265 + __u32 defcls; /* default class number */
1266 + __u32 use_dcache; /* use dequeue cache ? */
1267 + __u32 debug; /* debug flags */
1271 + __u32 deq_rate; /* dequeue rate */
1272 + __u32 utilz; /* dequeue utilization */
1273 + __u32 trials; /* deq_prio trials per dequeue */
1274 + __u32 dcache_hits;
1275 + __u32 direct_pkts; /* count of non shapped packets */
1285 +struct tc_htb_xstats
1289 + __u32 giants; /* too big packets (rate will not be accurate) */
1290 + __u32 injects; /* how many times leaf used injected bw */
1297 #define TC_CBQ_MAXPRIO 8
1298 --- linux-2.4orig/net/sched/sch_api.c Sun Jan 13 15:10:25 2002
1299 +++ linux-2.4/net/sched/sch_api.c Sun Jan 13 14:54:48 2002
1300 @@ -1205,6 +1205,9 @@ int __init pktsched_init(void)
1301 #ifdef CONFIG_NET_SCH_CBQ
1304 +#ifdef CONFIG_NET_SCH_HTB
1307 #ifdef CONFIG_NET_SCH_CSZ