x86, memblock: Remove __memblock_x86_find_in_range_size()
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sched / sch_atm.c
blob3406627895298324fdd9d27186ad8c9c8d9a9964
1 /* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/string.h>
9 #include <linux/errno.h>
10 #include <linux/skbuff.h>
11 #include <linux/atmdev.h>
12 #include <linux/atmclip.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/file.h> /* for fput */
15 #include <net/netlink.h>
16 #include <net/pkt_sched.h>
18 extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
21 * The ATM queuing discipline provides a framework for invoking classifiers
22 * (aka "filters"), which in turn select classes of this queuing discipline.
23 * Each class maps the flow(s) it is handling to a given VC. Multiple classes
24 * may share the same VC.
26 * When creating a class, VCs are specified by passing the number of the open
27 * socket descriptor by which the calling process references the VC. The kernel
28 * keeps the VC open at least until all classes using it are removed.
30 * In this file, most functions are named atm_tc_* to avoid confusion with all
31 * the atm_* in net/atm. This naming convention differs from what's used in the
32 * rest of net/sched.
34 * Known bugs:
35 * - sometimes messes up the IP stack
36 * - any manipulations besides the few operations described in the README, are
37 * untested and likely to crash the system
38 * - should lock the flow while there is data in the queue (?)
41 #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
43 struct atm_flow_data {
44 struct Qdisc *q; /* FIFO, TBF, etc. */
45 struct tcf_proto *filter_list;
46 struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
47 void (*old_pop)(struct atm_vcc *vcc,
48 struct sk_buff *skb); /* chaining */
49 struct atm_qdisc_data *parent; /* parent qdisc */
50 struct socket *sock; /* for closing */
51 u32 classid; /* x:y type ID */
52 int ref; /* reference count */
53 struct gnet_stats_basic_packed bstats;
54 struct gnet_stats_queue qstats;
55 struct list_head list;
56 struct atm_flow_data *excess; /* flow for excess traffic;
57 NULL to set CLP instead */
58 int hdr_len;
59 unsigned char hdr[0]; /* header data; MUST BE LAST */
62 struct atm_qdisc_data {
63 struct atm_flow_data link; /* unclassified skbs go here */
64 struct list_head flows; /* NB: "link" is also on this
65 list */
66 struct tasklet_struct task; /* dequeue tasklet */
69 /* ------------------------- Class/flow operations ------------------------- */
71 static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
73 struct atm_qdisc_data *p = qdisc_priv(sch);
74 struct atm_flow_data *flow;
76 list_for_each_entry(flow, &p->flows, list) {
77 if (flow->classid == classid)
78 return flow;
80 return NULL;
83 static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
84 struct Qdisc *new, struct Qdisc **old)
86 struct atm_qdisc_data *p = qdisc_priv(sch);
87 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
89 pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
90 sch, p, flow, new, old);
91 if (list_empty(&flow->list))
92 return -EINVAL;
93 if (!new)
94 new = &noop_qdisc;
95 *old = flow->q;
96 flow->q = new;
97 if (*old)
98 qdisc_reset(*old);
99 return 0;
102 static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
104 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
106 pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
107 return flow ? flow->q : NULL;
110 static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid)
112 struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
113 struct atm_flow_data *flow;
115 pr_debug("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid);
116 flow = lookup_flow(sch, classid);
117 if (flow)
118 flow->ref++;
119 pr_debug("atm_tc_get: flow %p\n", flow);
120 return (unsigned long)flow;
123 static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
124 unsigned long parent, u32 classid)
126 return atm_tc_get(sch, classid);
130 * atm_tc_put handles all destructions, including the ones that are explicitly
131 * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
132 * anything that still seems to be in use.
134 static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
136 struct atm_qdisc_data *p = qdisc_priv(sch);
137 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
139 pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
140 if (--flow->ref)
141 return;
142 pr_debug("atm_tc_put: destroying\n");
143 list_del_init(&flow->list);
144 pr_debug("atm_tc_put: qdisc %p\n", flow->q);
145 qdisc_destroy(flow->q);
146 tcf_destroy_chain(&flow->filter_list);
147 if (flow->sock) {
148 pr_debug("atm_tc_put: f_count %ld\n",
149 file_count(flow->sock->file));
150 flow->vcc->pop = flow->old_pop;
151 sockfd_put(flow->sock);
153 if (flow->excess)
154 atm_tc_put(sch, (unsigned long)flow->excess);
155 if (flow != &p->link)
156 kfree(flow);
158 * If flow == &p->link, the qdisc no longer works at this point and
159 * needs to be removed. (By the caller of atm_tc_put.)
163 static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
165 struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
167 pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
168 VCC2FLOW(vcc)->old_pop(vcc, skb);
169 tasklet_schedule(&p->task);
172 static const u8 llc_oui_ip[] = {
173 0xaa, /* DSAP: non-ISO */
174 0xaa, /* SSAP: non-ISO */
175 0x03, /* Ctrl: Unnumbered Information Command PDU */
176 0x00, /* OUI: EtherType */
177 0x00, 0x00,
178 0x08, 0x00
179 }; /* Ethertype IP (0800) */
181 static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
182 [TCA_ATM_FD] = { .type = NLA_U32 },
183 [TCA_ATM_EXCESS] = { .type = NLA_U32 },
186 static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
187 struct nlattr **tca, unsigned long *arg)
189 struct atm_qdisc_data *p = qdisc_priv(sch);
190 struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
191 struct atm_flow_data *excess = NULL;
192 struct nlattr *opt = tca[TCA_OPTIONS];
193 struct nlattr *tb[TCA_ATM_MAX + 1];
194 struct socket *sock;
195 int fd, error, hdr_len;
196 void *hdr;
198 pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
199 "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
201 * The concept of parents doesn't apply for this qdisc.
203 if (parent && parent != TC_H_ROOT && parent != sch->handle)
204 return -EINVAL;
206 * ATM classes cannot be changed. In order to change properties of the
207 * ATM connection, that socket needs to be modified directly (via the
208 * native ATM API. In order to send a flow to a different VC, the old
209 * class needs to be removed and a new one added. (This may be changed
210 * later.)
212 if (flow)
213 return -EBUSY;
214 if (opt == NULL)
215 return -EINVAL;
217 error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy);
218 if (error < 0)
219 return error;
221 if (!tb[TCA_ATM_FD])
222 return -EINVAL;
223 fd = nla_get_u32(tb[TCA_ATM_FD]);
224 pr_debug("atm_tc_change: fd %d\n", fd);
225 if (tb[TCA_ATM_HDR]) {
226 hdr_len = nla_len(tb[TCA_ATM_HDR]);
227 hdr = nla_data(tb[TCA_ATM_HDR]);
228 } else {
229 hdr_len = RFC1483LLC_LEN;
230 hdr = NULL; /* default LLC/SNAP for IP */
232 if (!tb[TCA_ATM_EXCESS])
233 excess = NULL;
234 else {
235 excess = (struct atm_flow_data *)
236 atm_tc_get(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
237 if (!excess)
238 return -ENOENT;
240 pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
241 opt->nla_type, nla_len(opt), hdr_len);
242 sock = sockfd_lookup(fd, &error);
243 if (!sock)
244 return error; /* f_count++ */
245 pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
246 if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
247 error = -EPROTOTYPE;
248 goto err_out;
250 /* @@@ should check if the socket is really operational or we'll crash
251 on vcc->send */
252 if (classid) {
253 if (TC_H_MAJ(classid ^ sch->handle)) {
254 pr_debug("atm_tc_change: classid mismatch\n");
255 error = -EINVAL;
256 goto err_out;
258 if (!list_empty(&flow->list)) {
259 error = -EEXIST;
260 goto err_out;
262 } else {
263 int i;
264 unsigned long cl;
266 for (i = 1; i < 0x8000; i++) {
267 classid = TC_H_MAKE(sch->handle, 0x8000 | i);
268 cl = atm_tc_get(sch, classid);
269 if (!cl)
270 break;
271 atm_tc_put(sch, cl);
274 pr_debug("atm_tc_change: new id %x\n", classid);
275 flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
276 pr_debug("atm_tc_change: flow %p\n", flow);
277 if (!flow) {
278 error = -ENOBUFS;
279 goto err_out;
281 flow->filter_list = NULL;
282 flow->q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
283 &pfifo_qdisc_ops, classid);
284 if (!flow->q)
285 flow->q = &noop_qdisc;
286 pr_debug("atm_tc_change: qdisc %p\n", flow->q);
287 flow->sock = sock;
288 flow->vcc = ATM_SD(sock); /* speedup */
289 flow->vcc->user_back = flow;
290 pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
291 flow->old_pop = flow->vcc->pop;
292 flow->parent = p;
293 flow->vcc->pop = sch_atm_pop;
294 flow->classid = classid;
295 flow->ref = 1;
296 flow->excess = excess;
297 list_add(&flow->list, &p->link.list);
298 flow->hdr_len = hdr_len;
299 if (hdr)
300 memcpy(flow->hdr, hdr, hdr_len);
301 else
302 memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
303 *arg = (unsigned long)flow;
304 return 0;
305 err_out:
306 if (excess)
307 atm_tc_put(sch, (unsigned long)excess);
308 sockfd_put(sock);
309 return error;
312 static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
314 struct atm_qdisc_data *p = qdisc_priv(sch);
315 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
317 pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
318 if (list_empty(&flow->list))
319 return -EINVAL;
320 if (flow->filter_list || flow == &p->link)
321 return -EBUSY;
323 * Reference count must be 2: one for "keepalive" (set at class
324 * creation), and one for the reference held when calling delete.
326 if (flow->ref < 2) {
327 printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
328 return -EINVAL;
330 if (flow->ref > 2)
331 return -EBUSY; /* catch references via excess, etc. */
332 atm_tc_put(sch, arg);
333 return 0;
336 static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
338 struct atm_qdisc_data *p = qdisc_priv(sch);
339 struct atm_flow_data *flow;
341 pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
342 if (walker->stop)
343 return;
344 list_for_each_entry(flow, &p->flows, list) {
345 if (walker->count >= walker->skip &&
346 walker->fn(sch, (unsigned long)flow, walker) < 0) {
347 walker->stop = 1;
348 break;
350 walker->count++;
354 static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl)
356 struct atm_qdisc_data *p = qdisc_priv(sch);
357 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
359 pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
360 return flow ? &flow->filter_list : &p->link.filter_list;
363 /* --------------------------- Qdisc operations ---------------------------- */
365 static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
367 struct atm_qdisc_data *p = qdisc_priv(sch);
368 struct atm_flow_data *flow;
369 struct tcf_result res;
370 int result;
371 int ret = NET_XMIT_POLICED;
373 pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
374 result = TC_POLICE_OK; /* be nice to gcc */
375 flow = NULL;
376 if (TC_H_MAJ(skb->priority) != sch->handle ||
377 !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) {
378 list_for_each_entry(flow, &p->flows, list) {
379 if (flow->filter_list) {
380 result = tc_classify_compat(skb,
381 flow->filter_list,
382 &res);
383 if (result < 0)
384 continue;
385 flow = (struct atm_flow_data *)res.class;
386 if (!flow)
387 flow = lookup_flow(sch, res.classid);
388 goto done;
391 flow = NULL;
392 done:
395 if (!flow)
396 flow = &p->link;
397 else {
398 if (flow->vcc)
399 ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
400 /*@@@ looks good ... but it's not supposed to work :-) */
401 #ifdef CONFIG_NET_CLS_ACT
402 switch (result) {
403 case TC_ACT_QUEUED:
404 case TC_ACT_STOLEN:
405 kfree_skb(skb);
406 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
407 case TC_ACT_SHOT:
408 kfree_skb(skb);
409 goto drop;
410 case TC_POLICE_RECLASSIFY:
411 if (flow->excess)
412 flow = flow->excess;
413 else
414 ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
415 break;
417 #endif
420 ret = qdisc_enqueue(skb, flow->q);
421 if (ret != NET_XMIT_SUCCESS) {
422 drop: __maybe_unused
423 if (net_xmit_drop_count(ret)) {
424 sch->qstats.drops++;
425 if (flow)
426 flow->qstats.drops++;
428 return ret;
430 sch->bstats.bytes += qdisc_pkt_len(skb);
431 sch->bstats.packets++;
432 flow->bstats.bytes += qdisc_pkt_len(skb);
433 flow->bstats.packets++;
435 * Okay, this may seem weird. We pretend we've dropped the packet if
436 * it goes via ATM. The reason for this is that the outer qdisc
437 * expects to be able to q->dequeue the packet later on if we return
438 * success at this place. Also, sch->q.qdisc needs to reflect whether
439 * there is a packet egligible for dequeuing or not. Note that the
440 * statistics of the outer qdisc are necessarily wrong because of all
441 * this. There's currently no correct solution for this.
443 if (flow == &p->link) {
444 sch->q.qlen++;
445 return NET_XMIT_SUCCESS;
447 tasklet_schedule(&p->task);
448 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
452 * Dequeue packets and send them over ATM. Note that we quite deliberately
453 * avoid checking net_device's flow control here, simply because sch_atm
454 * uses its own channels, which have nothing to do with any CLIP/LANE/or
455 * non-ATM interfaces.
458 static void sch_atm_dequeue(unsigned long data)
460 struct Qdisc *sch = (struct Qdisc *)data;
461 struct atm_qdisc_data *p = qdisc_priv(sch);
462 struct atm_flow_data *flow;
463 struct sk_buff *skb;
465 pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
466 list_for_each_entry(flow, &p->flows, list) {
467 if (flow == &p->link)
468 continue;
470 * If traffic is properly shaped, this won't generate nasty
471 * little bursts. Otherwise, it may ... (but that's okay)
473 while ((skb = flow->q->ops->peek(flow->q))) {
474 if (!atm_may_send(flow->vcc, skb->truesize))
475 break;
477 skb = qdisc_dequeue_peeked(flow->q);
478 if (unlikely(!skb))
479 break;
481 pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
482 /* remove any LL header somebody else has attached */
483 skb_pull(skb, skb_network_offset(skb));
484 if (skb_headroom(skb) < flow->hdr_len) {
485 struct sk_buff *new;
487 new = skb_realloc_headroom(skb, flow->hdr_len);
488 dev_kfree_skb(skb);
489 if (!new)
490 continue;
491 skb = new;
493 pr_debug("sch_atm_dequeue: ip %p, data %p\n",
494 skb_network_header(skb), skb->data);
495 ATM_SKB(skb)->vcc = flow->vcc;
496 memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
497 flow->hdr_len);
498 atomic_add(skb->truesize,
499 &sk_atm(flow->vcc)->sk_wmem_alloc);
500 /* atm.atm_options are already set by atm_tc_enqueue */
501 flow->vcc->send(flow->vcc, skb);
506 static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
508 struct atm_qdisc_data *p = qdisc_priv(sch);
509 struct sk_buff *skb;
511 pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
512 tasklet_schedule(&p->task);
513 skb = qdisc_dequeue_peeked(p->link.q);
514 if (skb)
515 sch->q.qlen--;
516 return skb;
519 static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
521 struct atm_qdisc_data *p = qdisc_priv(sch);
523 pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
525 return p->link.q->ops->peek(p->link.q);
528 static unsigned int atm_tc_drop(struct Qdisc *sch)
530 struct atm_qdisc_data *p = qdisc_priv(sch);
531 struct atm_flow_data *flow;
532 unsigned int len;
534 pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
535 list_for_each_entry(flow, &p->flows, list) {
536 if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
537 return len;
539 return 0;
542 static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
544 struct atm_qdisc_data *p = qdisc_priv(sch);
546 pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
547 INIT_LIST_HEAD(&p->flows);
548 INIT_LIST_HEAD(&p->link.list);
549 list_add(&p->link.list, &p->flows);
550 p->link.q = qdisc_create_dflt(qdisc_dev(sch), sch->dev_queue,
551 &pfifo_qdisc_ops, sch->handle);
552 if (!p->link.q)
553 p->link.q = &noop_qdisc;
554 pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
555 p->link.filter_list = NULL;
556 p->link.vcc = NULL;
557 p->link.sock = NULL;
558 p->link.classid = sch->handle;
559 p->link.ref = 1;
560 tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
561 return 0;
564 static void atm_tc_reset(struct Qdisc *sch)
566 struct atm_qdisc_data *p = qdisc_priv(sch);
567 struct atm_flow_data *flow;
569 pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
570 list_for_each_entry(flow, &p->flows, list)
571 qdisc_reset(flow->q);
572 sch->q.qlen = 0;
575 static void atm_tc_destroy(struct Qdisc *sch)
577 struct atm_qdisc_data *p = qdisc_priv(sch);
578 struct atm_flow_data *flow, *tmp;
580 pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
581 list_for_each_entry(flow, &p->flows, list)
582 tcf_destroy_chain(&flow->filter_list);
584 list_for_each_entry_safe(flow, tmp, &p->flows, list) {
585 if (flow->ref > 1)
586 printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
587 flow->ref);
588 atm_tc_put(sch, (unsigned long)flow);
590 tasklet_kill(&p->task);
593 static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
594 struct sk_buff *skb, struct tcmsg *tcm)
596 struct atm_qdisc_data *p = qdisc_priv(sch);
597 struct atm_flow_data *flow = (struct atm_flow_data *)cl;
598 struct nlattr *nest;
600 pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
601 sch, p, flow, skb, tcm);
602 if (list_empty(&flow->list))
603 return -EINVAL;
604 tcm->tcm_handle = flow->classid;
605 tcm->tcm_info = flow->q->handle;
607 nest = nla_nest_start(skb, TCA_OPTIONS);
608 if (nest == NULL)
609 goto nla_put_failure;
611 NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr);
612 if (flow->vcc) {
613 struct sockaddr_atmpvc pvc;
614 int state;
616 pvc.sap_family = AF_ATMPVC;
617 pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
618 pvc.sap_addr.vpi = flow->vcc->vpi;
619 pvc.sap_addr.vci = flow->vcc->vci;
620 NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc);
621 state = ATM_VF2VS(flow->vcc->flags);
622 NLA_PUT_U32(skb, TCA_ATM_STATE, state);
624 if (flow->excess)
625 NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
626 else {
627 NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
630 nla_nest_end(skb, nest);
631 return skb->len;
633 nla_put_failure:
634 nla_nest_cancel(skb, nest);
635 return -1;
637 static int
638 atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
639 struct gnet_dump *d)
641 struct atm_flow_data *flow = (struct atm_flow_data *)arg;
643 flow->qstats.qlen = flow->q->q.qlen;
645 if (gnet_stats_copy_basic(d, &flow->bstats) < 0 ||
646 gnet_stats_copy_queue(d, &flow->qstats) < 0)
647 return -1;
649 return 0;
652 static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
654 return 0;
657 static const struct Qdisc_class_ops atm_class_ops = {
658 .graft = atm_tc_graft,
659 .leaf = atm_tc_leaf,
660 .get = atm_tc_get,
661 .put = atm_tc_put,
662 .change = atm_tc_change,
663 .delete = atm_tc_delete,
664 .walk = atm_tc_walk,
665 .tcf_chain = atm_tc_find_tcf,
666 .bind_tcf = atm_tc_bind_filter,
667 .unbind_tcf = atm_tc_put,
668 .dump = atm_tc_dump_class,
669 .dump_stats = atm_tc_dump_class_stats,
672 static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
673 .cl_ops = &atm_class_ops,
674 .id = "atm",
675 .priv_size = sizeof(struct atm_qdisc_data),
676 .enqueue = atm_tc_enqueue,
677 .dequeue = atm_tc_dequeue,
678 .peek = atm_tc_peek,
679 .drop = atm_tc_drop,
680 .init = atm_tc_init,
681 .reset = atm_tc_reset,
682 .destroy = atm_tc_destroy,
683 .dump = atm_tc_dump,
684 .owner = THIS_MODULE,
687 static int __init atm_init(void)
689 return register_qdisc(&atm_qdisc_ops);
692 static void __exit atm_exit(void)
694 unregister_qdisc(&atm_qdisc_ops);
697 module_init(atm_init)
698 module_exit(atm_exit)
699 MODULE_LICENSE("GPL");