K2.6 patches and update.
[tomato.git] / release / src-rt / linux / linux-2.6 / include / net / sch_generic.h
blob58a793b77527337cd5118081efe125f133002873
1 #ifndef __NET_SCHED_GENERIC_H
2 #define __NET_SCHED_GENERIC_H
4 #include <linux/netdevice.h>
5 #include <linux/types.h>
6 #include <linux/rcupdate.h>
7 #include <linux/module.h>
8 #include <linux/pkt_sched.h>
9 #include <linux/pkt_cls.h>
10 #include <net/gen_stats.h>
11 #include <net/rtnetlink.h>
13 struct Qdisc_ops;
14 struct qdisc_walker;
15 struct tcf_walker;
16 struct module;
18 struct qdisc_rate_table
20 struct tc_ratespec rate;
21 u32 data[256];
22 struct qdisc_rate_table *next;
23 int refcnt;
26 struct Qdisc
28 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
29 struct sk_buff * (*dequeue)(struct Qdisc *dev);
30 unsigned flags;
31 #define TCQ_F_BUILTIN 1
32 #define TCQ_F_THROTTLED 2
33 #define TCQ_F_INGRESS 4
34 int padded;
35 struct Qdisc_ops *ops;
36 u32 handle;
37 u32 parent;
38 atomic_t refcnt;
39 struct sk_buff_head q;
40 struct net_device *dev;
41 struct list_head list;
43 struct gnet_stats_basic bstats;
44 struct gnet_stats_queue qstats;
45 struct gnet_stats_rate_est rate_est;
46 spinlock_t *stats_lock;
47 struct rcu_head q_rcu;
48 int (*reshape_fail)(struct sk_buff *skb,
49 struct Qdisc *q);
51 /* This field is deprecated, but it is still used by CBQ
52 * and it will live until better solution will be invented.
54 struct Qdisc *__parent;
57 struct Qdisc_class_ops
59 /* Child qdisc manipulation */
60 int (*graft)(struct Qdisc *, unsigned long cl,
61 struct Qdisc *, struct Qdisc **);
62 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
63 void (*qlen_notify)(struct Qdisc *, unsigned long);
65 /* Class manipulation routines */
66 unsigned long (*get)(struct Qdisc *, u32 classid);
67 void (*put)(struct Qdisc *, unsigned long);
68 int (*change)(struct Qdisc *, u32, u32,
69 struct rtattr **, unsigned long *);
70 int (*delete)(struct Qdisc *, unsigned long);
71 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
73 /* Filter manipulation */
74 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
75 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
76 u32 classid);
77 void (*unbind_tcf)(struct Qdisc *, unsigned long);
79 /* rtnetlink specific */
80 int (*dump)(struct Qdisc *, unsigned long,
81 struct sk_buff *skb, struct tcmsg*);
82 int (*dump_stats)(struct Qdisc *, unsigned long,
83 struct gnet_dump *);
86 struct Qdisc_ops
88 struct Qdisc_ops *next;
89 struct Qdisc_class_ops *cl_ops;
90 char id[IFNAMSIZ];
91 int priv_size;
93 int (*enqueue)(struct sk_buff *, struct Qdisc *);
94 struct sk_buff * (*dequeue)(struct Qdisc *);
95 int (*requeue)(struct sk_buff *, struct Qdisc *);
96 unsigned int (*drop)(struct Qdisc *);
98 int (*init)(struct Qdisc *, struct rtattr *arg);
99 void (*reset)(struct Qdisc *);
100 void (*destroy)(struct Qdisc *);
101 int (*change)(struct Qdisc *, struct rtattr *arg);
103 int (*dump)(struct Qdisc *, struct sk_buff *);
104 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
106 struct module *owner;
110 struct tcf_result
112 unsigned long class;
113 u32 classid;
116 struct tcf_proto_ops
118 struct tcf_proto_ops *next;
119 char kind[IFNAMSIZ];
121 int (*classify)(struct sk_buff*, struct tcf_proto*,
122 struct tcf_result *);
123 int (*init)(struct tcf_proto*);
124 void (*destroy)(struct tcf_proto*);
126 unsigned long (*get)(struct tcf_proto*, u32 handle);
127 void (*put)(struct tcf_proto*, unsigned long);
128 int (*change)(struct tcf_proto*, unsigned long,
129 u32 handle, struct rtattr **,
130 unsigned long *);
131 int (*delete)(struct tcf_proto*, unsigned long);
132 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
134 /* rtnetlink specific */
135 int (*dump)(struct tcf_proto*, unsigned long,
136 struct sk_buff *skb, struct tcmsg*);
138 struct module *owner;
141 struct tcf_proto
143 /* Fast access part */
144 struct tcf_proto *next;
145 void *root;
146 int (*classify)(struct sk_buff*, struct tcf_proto*,
147 struct tcf_result *);
148 __be16 protocol;
150 /* All the rest */
151 u32 prio;
152 u32 classid;
153 struct Qdisc *q;
154 void *data;
155 struct tcf_proto_ops *ops;
159 extern void qdisc_lock_tree(struct net_device *dev);
160 extern void qdisc_unlock_tree(struct net_device *dev);
162 #define sch_tree_lock(q) qdisc_lock_tree((q)->dev)
163 #define sch_tree_unlock(q) qdisc_unlock_tree((q)->dev)
164 #define tcf_tree_lock(tp) qdisc_lock_tree((tp)->q->dev)
165 #define tcf_tree_unlock(tp) qdisc_unlock_tree((tp)->q->dev)
167 extern struct Qdisc noop_qdisc;
168 extern struct Qdisc_ops noop_qdisc_ops;
170 extern void dev_init_scheduler(struct net_device *dev);
171 extern void dev_shutdown(struct net_device *dev);
172 extern void dev_activate(struct net_device *dev);
173 extern void dev_deactivate(struct net_device *dev);
174 extern void qdisc_reset(struct Qdisc *qdisc);
175 extern void qdisc_destroy(struct Qdisc *qdisc);
176 extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
177 extern struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops);
178 extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
179 struct Qdisc_ops *ops, u32 parentid);
180 extern void tcf_destroy(struct tcf_proto *tp);
181 extern void tcf_destroy_chain(struct tcf_proto *fl);
183 static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
184 struct sk_buff_head *list)
186 __skb_queue_tail(list, skb);
187 sch->qstats.backlog += skb->len;
188 sch->bstats.bytes += skb->len;
189 sch->bstats.packets++;
191 return NET_XMIT_SUCCESS;
194 static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
196 return __qdisc_enqueue_tail(skb, sch, &sch->q);
199 static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
200 struct sk_buff_head *list)
202 struct sk_buff *skb = __skb_dequeue(list);
204 if (likely(skb != NULL))
205 sch->qstats.backlog -= skb->len;
207 return skb;
210 static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
212 return __qdisc_dequeue_head(sch, &sch->q);
215 static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
216 struct sk_buff_head *list)
218 struct sk_buff *skb = __skb_dequeue_tail(list);
220 if (likely(skb != NULL))
221 sch->qstats.backlog -= skb->len;
223 return skb;
226 static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
228 return __qdisc_dequeue_tail(sch, &sch->q);
231 static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
232 struct sk_buff_head *list)
234 __skb_queue_head(list, skb);
235 sch->qstats.backlog += skb->len;
236 sch->qstats.requeues++;
238 return NET_XMIT_SUCCESS;
241 static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
243 return __qdisc_requeue(skb, sch, &sch->q);
246 static inline void __qdisc_reset_queue(struct Qdisc *sch,
247 struct sk_buff_head *list)
250 * We do not know the backlog in bytes of this list, it
251 * is up to the caller to correct it
253 skb_queue_purge(list);
256 static inline void qdisc_reset_queue(struct Qdisc *sch)
258 __qdisc_reset_queue(sch, &sch->q);
259 sch->qstats.backlog = 0;
262 static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
263 struct sk_buff_head *list)
265 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
267 if (likely(skb != NULL)) {
268 unsigned int len = skb->len;
269 kfree_skb(skb);
270 return len;
273 return 0;
276 static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
278 return __qdisc_queue_drop(sch, &sch->q);
281 static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
283 kfree_skb(skb);
284 sch->qstats.drops++;
286 return NET_XMIT_DROP;
289 static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
291 sch->qstats.drops++;
293 #ifdef CONFIG_NET_CLS_POLICE
294 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
295 goto drop;
297 return NET_XMIT_SUCCESS;
299 drop:
300 #endif
301 kfree_skb(skb);
302 return NET_XMIT_DROP;
304 /* Lookup a qdisc_rate_table to determine how long it will take to send a
305 packet given its size.
307 static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, int pktlen)
309 int slot = pktlen + rtab->rate.cell_align;
310 if (slot < 0)
311 slot = 0;
312 slot >>= rtab->rate.cell_log;
313 if (slot > 255)
314 return rtab->data[255] + 1;
315 return rtab->data[slot];
318 #ifdef CONFIG_NET_CLS_ACT
319 static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
321 struct sk_buff *n = skb_clone(skb, gfp_mask);
323 if (n) {
324 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
325 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
326 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
328 return n;
330 #endif
332 #endif