Resurrect code mistakenly #ifdef'd out before.
[dragonfly.git] / sys / net / altq / altq_cbq.c
blob60cd22f4d9c469e59ef316eca20a4b2f2d7b8b61
1 /* $KAME: altq_cbq.c,v 1.20 2004/04/17 10:54:48 kjc Exp $ */
2 /* $DragonFly: src/sys/net/altq/altq_cbq.c,v 1.6 2006/12/22 23:44:55 swildner Exp $ */
4 /*
5 * Copyright (c) Sun Microsystems, Inc. 1993-1998 All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the SMCC Technology
21 * Development Group at Sun Microsystems, Inc.
23 * 4. The name of the Sun Microsystems, Inc nor may not be used to endorse or
24 * promote products derived from this software without specific prior
25 * written permission.
27 * SUN MICROSYSTEMS DOES NOT CLAIM MERCHANTABILITY OF THIS SOFTWARE OR THE
28 * SUITABILITY OF THIS SOFTWARE FOR ANY PARTICULAR PURPOSE. The software is
29 * provided "as is" without express or implied warranty of any kind.
31 * These notices must be retained in any copies of any part of this software.
34 #include "opt_altq.h"
35 #include "opt_inet.h"
36 #include "opt_inet6.h"
38 #ifdef ALTQ_CBQ /* cbq is enabled by ALTQ_CBQ option in opt_altq.h */
40 #include <sys/param.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/socket.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/callout.h>
47 #include <sys/errno.h>
48 #include <sys/time.h>
49 #include <sys/thread.h>
51 #include <net/if.h>
52 #include <net/ifq_var.h>
53 #include <netinet/in.h>
55 #include <net/pf/pfvar.h>
56 #include <net/altq/altq.h>
57 #include <net/altq/altq_cbq.h>
59 #include <sys/thread2.h>
62 * Forward Declarations.
64 static int cbq_class_destroy(cbq_state_t *, struct rm_class *);
65 static struct rm_class *clh_to_clp(cbq_state_t *, uint32_t);
66 static int cbq_clear_interface(cbq_state_t *);
67 static int cbq_request(struct ifaltq *, int, void *);
68 static int cbq_enqueue(struct ifaltq *, struct mbuf *,
69 struct altq_pktattr *);
70 static struct mbuf *cbq_dequeue(struct ifaltq *, struct mbuf *, int);
71 static void cbqrestart(struct ifaltq *);
72 static void get_class_stats(class_stats_t *, struct rm_class *);
73 static void cbq_purge(cbq_state_t *);
76 * int
77 * cbq_class_destroy(cbq_mod_state_t *, struct rm_class *) - This
78 * function destroys a given traffic class. Before destroying
79 * the class, all traffic for that class is released.
81 static int
82 cbq_class_destroy(cbq_state_t *cbqp, struct rm_class *cl)
84 int i;
86 /* delete the class */
87 rmc_delete_class(&cbqp->ifnp, cl);
90 * free the class handle
92 for (i = 0; i < CBQ_MAX_CLASSES; i++)
93 if (cbqp->cbq_class_tbl[i] == cl)
94 cbqp->cbq_class_tbl[i] = NULL;
96 if (cl == cbqp->ifnp.root_)
97 cbqp->ifnp.root_ = NULL;
98 if (cl == cbqp->ifnp.default_)
99 cbqp->ifnp.default_ = NULL;
100 return (0);
103 /* convert class handle to class pointer */
104 static struct rm_class *
105 clh_to_clp(cbq_state_t *cbqp, uint32_t chandle)
107 int i;
108 struct rm_class *cl;
110 if (chandle == 0)
111 return (NULL);
113 * first, try optimistically the slot matching the lower bits of
114 * the handle. if it fails, do the linear table search.
116 i = chandle % CBQ_MAX_CLASSES;
117 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
118 cl->stats_.handle == chandle)
119 return (cl);
120 for (i = 0; i < CBQ_MAX_CLASSES; i++)
121 if ((cl = cbqp->cbq_class_tbl[i]) != NULL &&
122 cl->stats_.handle == chandle)
123 return (cl);
124 return (NULL);
127 static int
128 cbq_clear_interface(cbq_state_t *cbqp)
130 int again, i;
131 struct rm_class *cl;
133 /* clear out the classes now */
134 do {
135 again = 0;
136 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
137 if ((cl = cbqp->cbq_class_tbl[i]) != NULL) {
138 if (is_a_parent_class(cl))
139 again++;
140 else {
141 cbq_class_destroy(cbqp, cl);
142 cbqp->cbq_class_tbl[i] = NULL;
143 if (cl == cbqp->ifnp.root_)
144 cbqp->ifnp.root_ = NULL;
145 if (cl == cbqp->ifnp.default_)
146 cbqp->ifnp.default_ = NULL;
150 } while (again);
152 return (0);
155 static int
156 cbq_request(struct ifaltq *ifq, int req, void *arg)
158 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
160 crit_enter();
161 switch (req) {
162 case ALTRQ_PURGE:
163 cbq_purge(cbqp);
164 break;
166 crit_exit();
167 return (0);
170 /* copy the stats info in rm_class to class_states_t */
171 static void
172 get_class_stats(class_stats_t *statsp, struct rm_class *cl)
174 statsp->xmit_cnt = cl->stats_.xmit_cnt;
175 statsp->drop_cnt = cl->stats_.drop_cnt;
176 statsp->over = cl->stats_.over;
177 statsp->borrows = cl->stats_.borrows;
178 statsp->overactions = cl->stats_.overactions;
179 statsp->delays = cl->stats_.delays;
181 statsp->depth = cl->depth_;
182 statsp->priority = cl->pri_;
183 statsp->maxidle = cl->maxidle_;
184 statsp->minidle = cl->minidle_;
185 statsp->offtime = cl->offtime_;
186 statsp->qmax = qlimit(cl->q_);
187 statsp->ns_per_byte = cl->ns_per_byte_;
188 statsp->wrr_allot = cl->w_allotment_;
189 statsp->qcnt = qlen(cl->q_);
190 statsp->avgidle = cl->avgidle_;
192 statsp->qtype = qtype(cl->q_);
193 #ifdef ALTQ_RED
194 if (q_is_red(cl->q_))
195 red_getstats(cl->red_, &statsp->red[0]);
196 #endif
197 #ifdef ALTQ_RIO
198 if (q_is_rio(cl->q_))
199 rio_getstats((rio_t *)cl->red_, &statsp->red[0]);
200 #endif
204 cbq_pfattach(struct pf_altq *a)
206 struct ifnet *ifp;
207 int error;
209 if ((ifp = ifunit(a->ifname)) == NULL || a->altq_disc == NULL)
210 return (EINVAL);
211 crit_enter();
212 error = altq_attach(&ifp->if_snd, ALTQT_CBQ, a->altq_disc,
213 cbq_enqueue, cbq_dequeue, cbq_request, NULL, NULL);
214 crit_exit();
215 return (error);
219 cbq_add_altq(struct pf_altq *a)
221 cbq_state_t *cbqp;
222 struct ifnet *ifp;
224 if ((ifp = ifunit(a->ifname)) == NULL)
225 return (EINVAL);
226 if (!ifq_is_ready(&ifp->if_snd))
227 return (ENODEV);
229 /* allocate and initialize cbq_state_t */
230 cbqp = kmalloc(sizeof(*cbqp), M_ALTQ, M_WAITOK | M_ZERO);
231 callout_init(&cbqp->cbq_callout);
232 cbqp->cbq_qlen = 0;
233 cbqp->ifnp.ifq_ = &ifp->if_snd; /* keep the ifq */
234 ifq_purge(&ifp->if_snd);
236 /* keep the state in pf_altq */
237 a->altq_disc = cbqp;
239 return (0);
243 cbq_remove_altq(struct pf_altq *a)
245 cbq_state_t *cbqp;
247 if ((cbqp = a->altq_disc) == NULL)
248 return (EINVAL);
249 a->altq_disc = NULL;
251 cbq_clear_interface(cbqp);
253 if (cbqp->ifnp.default_)
254 cbq_class_destroy(cbqp, cbqp->ifnp.default_);
255 if (cbqp->ifnp.root_)
256 cbq_class_destroy(cbqp, cbqp->ifnp.root_);
258 /* deallocate cbq_state_t */
259 kfree(cbqp, M_ALTQ);
261 return (0);
265 cbq_add_queue(struct pf_altq *a)
267 struct rm_class *borrow, *parent;
268 cbq_state_t *cbqp;
269 struct rm_class *cl;
270 struct cbq_opts *opts;
271 int i;
273 if ((cbqp = a->altq_disc) == NULL)
274 return (EINVAL);
275 if (a->qid == 0)
276 return (EINVAL);
279 * find a free slot in the class table. if the slot matching
280 * the lower bits of qid is free, use this slot. otherwise,
281 * use the first free slot.
283 i = a->qid % CBQ_MAX_CLASSES;
284 if (cbqp->cbq_class_tbl[i] != NULL) {
285 for (i = 0; i < CBQ_MAX_CLASSES; i++)
286 if (cbqp->cbq_class_tbl[i] == NULL)
287 break;
288 if (i == CBQ_MAX_CLASSES)
289 return (EINVAL);
292 opts = &a->pq_u.cbq_opts;
293 /* check parameters */
294 if (a->priority >= CBQ_MAXPRI)
295 return (EINVAL);
297 /* Get pointers to parent and borrow classes. */
298 parent = clh_to_clp(cbqp, a->parent_qid);
299 if (opts->flags & CBQCLF_BORROW)
300 borrow = parent;
301 else
302 borrow = NULL;
305 * A class must borrow from it's parent or it can not
306 * borrow at all. Hence, borrow can be null.
308 if (parent == NULL && (opts->flags & CBQCLF_ROOTCLASS) == 0) {
309 kprintf("cbq_add_queue: no parent class!\n");
310 return (EINVAL);
313 if ((borrow != parent) && (borrow != NULL)) {
314 kprintf("cbq_add_class: borrow class != parent\n");
315 return (EINVAL);
319 * check parameters
321 switch (opts->flags & CBQCLF_CLASSMASK) {
322 case CBQCLF_ROOTCLASS:
323 if (parent != NULL)
324 return (EINVAL);
325 if (cbqp->ifnp.root_)
326 return (EINVAL);
327 break;
328 case CBQCLF_DEFCLASS:
329 if (cbqp->ifnp.default_)
330 return (EINVAL);
331 break;
332 case 0:
333 if (a->qid == 0)
334 return (EINVAL);
335 break;
336 default:
337 /* more than two flags bits set */
338 return (EINVAL);
342 * create a class. if this is a root class, initialize the
343 * interface.
345 if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_ROOTCLASS) {
346 rmc_init(cbqp->ifnp.ifq_, &cbqp->ifnp, opts->ns_per_byte,
347 cbqrestart, a->qlimit, RM_MAXQUEUED,
348 opts->maxidle, opts->minidle, opts->offtime,
349 opts->flags);
350 cl = cbqp->ifnp.root_;
351 } else {
352 cl = rmc_newclass(a->priority,
353 &cbqp->ifnp, opts->ns_per_byte,
354 rmc_delay_action, a->qlimit, parent, borrow,
355 opts->maxidle, opts->minidle, opts->offtime,
356 opts->pktsize, opts->flags);
358 if (cl == NULL)
359 return (ENOMEM);
361 /* return handle to user space. */
362 cl->stats_.handle = a->qid;
363 cl->stats_.depth = cl->depth_;
365 /* save the allocated class */
366 cbqp->cbq_class_tbl[i] = cl;
368 if ((opts->flags & CBQCLF_CLASSMASK) == CBQCLF_DEFCLASS)
369 cbqp->ifnp.default_ = cl;
371 return (0);
375 cbq_remove_queue(struct pf_altq *a)
377 struct rm_class *cl;
378 cbq_state_t *cbqp;
379 int i;
381 if ((cbqp = a->altq_disc) == NULL)
382 return (EINVAL);
384 if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
385 return (EINVAL);
387 /* if we are a parent class, then return an error. */
388 if (is_a_parent_class(cl))
389 return (EINVAL);
391 /* delete the class */
392 rmc_delete_class(&cbqp->ifnp, cl);
395 * free the class handle
397 for (i = 0; i < CBQ_MAX_CLASSES; i++)
398 if (cbqp->cbq_class_tbl[i] == cl) {
399 cbqp->cbq_class_tbl[i] = NULL;
400 if (cl == cbqp->ifnp.root_)
401 cbqp->ifnp.root_ = NULL;
402 if (cl == cbqp->ifnp.default_)
403 cbqp->ifnp.default_ = NULL;
404 break;
407 return (0);
411 cbq_getqstats(struct pf_altq *a, void *ubuf, int *nbytes)
413 cbq_state_t *cbqp;
414 struct rm_class *cl;
415 class_stats_t stats;
416 int error = 0;
418 if ((cbqp = altq_lookup(a->ifname, ALTQT_CBQ)) == NULL)
419 return (EBADF);
421 if ((cl = clh_to_clp(cbqp, a->qid)) == NULL)
422 return (EINVAL);
424 if (*nbytes < sizeof(stats))
425 return (EINVAL);
427 get_class_stats(&stats, cl);
429 if ((error = copyout((caddr_t)&stats, ubuf, sizeof(stats))) != 0)
430 return (error);
431 *nbytes = sizeof(stats);
432 return (0);
436 * int
437 * cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pattr)
438 * - Queue data packets.
440 * cbq_enqueue is set to ifp->if_altqenqueue and called by an upper
441 * layer (e.g. ether_output). cbq_enqueue queues the given packet
442 * to the cbq, then invokes the driver's start routine.
444 * Returns: 0 if the queueing is successful.
445 * ENOBUFS if a packet dropping occurred as a result of
446 * the queueing.
449 static int
450 cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
452 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
453 struct rm_class *cl;
454 int len;
456 /* grab class set by classifier */
457 if ((m->m_flags & M_PKTHDR) == 0) {
458 /* should not happen */
459 if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n");
460 m_freem(m);
461 return (ENOBUFS);
463 if (m->m_pkthdr.fw_flags & ALTQ_MBUF_TAGGED)
464 cl = clh_to_clp(cbqp, m->m_pkthdr.altq_qid);
465 else
466 cl = NULL;
467 if (cl == NULL) {
468 cl = cbqp->ifnp.default_;
469 if (cl == NULL) {
470 m_freem(m);
471 return (ENOBUFS);
474 crit_enter();
475 cl->pktattr_ = NULL;
476 len = m_pktlen(m);
477 if (rmc_queue_packet(cl, m) != 0) {
478 /* drop occurred. some mbuf was freed in rmc_queue_packet. */
479 PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
480 crit_exit();
481 return (ENOBUFS);
484 /* successfully queued. */
485 ++cbqp->cbq_qlen;
486 ++ifq->ifq_len;
487 crit_exit();
488 return (0);
491 static struct mbuf *
492 cbq_dequeue(struct ifaltq *ifq, struct mbuf *mpolled, int op)
494 cbq_state_t *cbqp = (cbq_state_t *)ifq->altq_disc;
495 struct mbuf *m;
497 crit_enter();
498 m = rmc_dequeue_next(&cbqp->ifnp, op);
500 if (m && op == ALTDQ_REMOVE) {
501 --cbqp->cbq_qlen; /* decrement # of packets in cbq */
502 --ifq->ifq_len;
504 /* Update the class. */
505 rmc_update_class_util(&cbqp->ifnp);
507 crit_exit();
508 KKASSERT(mpolled == NULL || mpolled == m);
509 return (m);
513 * void
514 * cbqrestart(queue_t *) - Restart sending of data.
515 * called from rmc_restart in a critical section via timeout after waking up
516 * a suspended class.
517 * Returns: NONE
520 static void
521 cbqrestart(struct ifaltq *ifq)
523 cbq_state_t *cbqp;
524 struct ifnet *ifp;
526 if (!ifq_is_enabled(ifq))
527 /* cbq must have been detached */
528 return;
530 if ((cbqp = (cbq_state_t *)ifq->altq_disc) == NULL)
531 /* should not happen */
532 return;
534 ifp = ifq->altq_ifp;
535 if (ifp->if_start &&
536 cbqp->cbq_qlen > 0 && (ifp->if_flags & IFF_OACTIVE) == 0)
537 (*ifp->if_start)(ifp);
540 static void
541 cbq_purge(cbq_state_t *cbqp)
543 struct rm_class *cl;
544 int i;
545 for (i = 0; i < CBQ_MAX_CLASSES; i++) {
546 if ((cl = cbqp->cbq_class_tbl[i]) != NULL)
547 rmc_dropall(cl);
549 if (ifq_is_enabled(cbqp->ifnp.ifq_))
550 cbqp->ifnp.ifq_->ifq_len = 0;
553 #endif /* ALTQ_CBQ */