ifnet: Add ringmap, which does ring/cpu map and generates redirect table.
[dragonfly.git] / sys / net / ifq_var.h
blob9b2be6d539984d8f4b8ec79b41d75b9ee0a36977
1 /*-
2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
32 #ifndef _NET_IFQ_VAR_H_
33 #define _NET_IFQ_VAR_H_
35 #ifndef _KERNEL
36 #error "This file should not be included by userland programs."
37 #endif
39 #ifndef _SYS_SYSTM_H_
40 #include <sys/systm.h>
41 #endif
42 #ifndef _SYS_THREAD2_H_
43 #include <sys/thread2.h>
44 #endif
45 #ifndef _SYS_SERIALIZE_H_
46 #include <sys/serialize.h>
47 #endif
48 #ifndef _SYS_MBUF_H_
49 #include <sys/mbuf.h>
50 #endif
51 #ifndef _NET_IF_VAR_H_
52 #include <net/if_var.h>
53 #endif
54 #ifndef _NET_ALTQ_IF_ALTQ_H_
55 #include <net/altq/if_altq.h>
56 #endif
58 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
59 KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
60 ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
61 ("not ifp's default subqueue"));
63 struct ifaltq;
64 struct ifaltq_subque;
67 * Subqueue watchdog
69 typedef void (*ifsq_watchdog_t)(struct ifaltq_subque *);
71 struct ifsubq_watchdog {
72 struct callout wd_callout;
73 int wd_timer;
74 struct ifaltq_subque *wd_subq;
75 ifsq_watchdog_t wd_watchdog;
79 * Support for "classic" ALTQ interfaces.
81 int ifsq_classic_enqueue(struct ifaltq_subque *, struct mbuf *,
82 struct altq_pktattr *);
83 struct mbuf *ifsq_classic_dequeue(struct ifaltq_subque *, int);
84 int ifsq_classic_request(struct ifaltq_subque *, int, void *);
85 void ifq_set_classic(struct ifaltq *);
87 void ifq_set_maxlen(struct ifaltq *, int);
88 void ifq_set_methods(struct ifaltq *, altq_mapsubq_t,
89 ifsq_enqueue_t, ifsq_dequeue_t, ifsq_request_t);
90 int ifq_mapsubq_default(struct ifaltq *, int);
91 int ifq_mapsubq_mask(struct ifaltq *, int);
92 int ifq_mapsubq_modulo(struct ifaltq *, int);
94 void ifsq_devstart(struct ifaltq_subque *ifsq);
95 void ifsq_devstart_sched(struct ifaltq_subque *ifsq);
97 void ifsq_watchdog_init(struct ifsubq_watchdog *,
98 struct ifaltq_subque *, ifsq_watchdog_t);
99 void ifsq_watchdog_start(struct ifsubq_watchdog *);
100 void ifsq_watchdog_stop(struct ifsubq_watchdog *);
103 * Dispatch a packet to an interface.
105 int ifq_dispatch(struct ifnet *, struct mbuf *,
106 struct altq_pktattr *);
108 #ifdef ALTQ
110 static __inline int
111 ifq_is_enabled(struct ifaltq *_ifq)
113 return(_ifq->altq_flags & ALTQF_ENABLED);
116 static __inline int
117 ifq_is_attached(struct ifaltq *_ifq)
119 return(_ifq->altq_disc != NULL);
122 #else /* !ALTQ */
124 static __inline int
125 ifq_is_enabled(struct ifaltq *_ifq)
127 return(0);
130 static __inline int
131 ifq_is_attached(struct ifaltq *_ifq)
133 return(0);
136 #endif /* ALTQ */
138 static __inline int
139 ifq_is_ready(struct ifaltq *_ifq)
141 return(_ifq->altq_flags & ALTQF_READY);
144 static __inline void
145 ifq_set_ready(struct ifaltq *_ifq)
147 _ifq->altq_flags |= ALTQF_READY;
151 * Subqueue lock must be held
153 static __inline int
154 ifsq_enqueue_locked(struct ifaltq_subque *_ifsq, struct mbuf *_m,
155 struct altq_pktattr *_pa)
157 #ifdef ALTQ
158 if (!ifq_is_enabled(_ifsq->ifsq_altq))
159 return ifsq_classic_enqueue(_ifsq, _m, _pa);
160 else
161 #endif
162 return _ifsq->ifsq_enqueue(_ifsq, _m, _pa);
165 static __inline int
166 ifsq_enqueue(struct ifaltq_subque *_ifsq, struct mbuf *_m,
167 struct altq_pktattr *_pa)
169 int _error;
171 ALTQ_SQ_LOCK(_ifsq);
172 _error = ifsq_enqueue_locked(_ifsq, _m, _pa);
173 ALTQ_SQ_UNLOCK(_ifsq);
174 return _error;
177 static __inline struct mbuf *
178 ifsq_dequeue(struct ifaltq_subque *_ifsq)
180 struct mbuf *_m;
182 ALTQ_SQ_LOCK(_ifsq);
183 if (_ifsq->ifsq_prepended != NULL) {
184 _m = _ifsq->ifsq_prepended;
185 _ifsq->ifsq_prepended = NULL;
186 ALTQ_SQ_CNTR_DEC(_ifsq, _m->m_pkthdr.len);
187 ALTQ_SQ_UNLOCK(_ifsq);
188 return _m;
191 #ifdef ALTQ
192 if (_ifsq->ifsq_altq->altq_tbr != NULL)
193 _m = tbr_dequeue(_ifsq, ALTDQ_REMOVE);
194 else if (!ifq_is_enabled(_ifsq->ifsq_altq))
195 _m = ifsq_classic_dequeue(_ifsq, ALTDQ_REMOVE);
196 else
197 #endif
198 _m = _ifsq->ifsq_dequeue(_ifsq, ALTDQ_REMOVE);
199 ALTQ_SQ_UNLOCK(_ifsq);
200 return _m;
204 * Subqueue lock must be held
206 static __inline struct mbuf *
207 ifsq_poll_locked(struct ifaltq_subque *_ifsq)
209 if (_ifsq->ifsq_prepended != NULL)
210 return _ifsq->ifsq_prepended;
212 #ifdef ALTQ
213 if (_ifsq->ifsq_altq->altq_tbr != NULL)
214 return tbr_dequeue(_ifsq, ALTDQ_POLL);
215 else if (!ifq_is_enabled(_ifsq->ifsq_altq))
216 return ifsq_classic_dequeue(_ifsq, ALTDQ_POLL);
217 else
218 #endif
219 return _ifsq->ifsq_dequeue(_ifsq, ALTDQ_POLL);
222 static __inline struct mbuf *
223 ifsq_poll(struct ifaltq_subque *_ifsq)
225 struct mbuf *_m;
227 ALTQ_SQ_LOCK(_ifsq);
228 _m = ifsq_poll_locked(_ifsq);
229 ALTQ_SQ_UNLOCK(_ifsq);
230 return _m;
233 static __inline int
234 ifsq_poll_pktlen(struct ifaltq_subque *_ifsq)
236 struct mbuf *_m;
237 int _len = 0;
239 ALTQ_SQ_LOCK(_ifsq);
241 _m = ifsq_poll_locked(_ifsq);
242 if (_m != NULL) {
243 M_ASSERTPKTHDR(_m);
244 _len = _m->m_pkthdr.len;
247 ALTQ_SQ_UNLOCK(_ifsq);
249 return _len;
253 * Subqueue lock must be held
255 static __inline void
256 ifsq_purge_locked(struct ifaltq_subque *_ifsq)
258 if (_ifsq->ifsq_prepended != NULL) {
259 ALTQ_SQ_CNTR_DEC(_ifsq, _ifsq->ifsq_prepended->m_pkthdr.len);
260 m_freem(_ifsq->ifsq_prepended);
261 _ifsq->ifsq_prepended = NULL;
264 #ifdef ALTQ
265 if (!ifq_is_enabled(_ifsq->ifsq_altq))
266 ifsq_classic_request(_ifsq, ALTRQ_PURGE, NULL);
267 else
268 #endif
269 _ifsq->ifsq_request(_ifsq, ALTRQ_PURGE, NULL);
272 static __inline void
273 ifsq_purge(struct ifaltq_subque *_ifsq)
275 ALTQ_SQ_LOCK(_ifsq);
276 ifsq_purge_locked(_ifsq);
277 ALTQ_SQ_UNLOCK(_ifsq);
280 static __inline void
281 ifq_lock_all(struct ifaltq *_ifq)
283 int _q;
285 for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
286 ALTQ_SQ_LOCK(&_ifq->altq_subq[_q]);
289 static __inline void
290 ifq_unlock_all(struct ifaltq *_ifq)
292 int _q;
294 for (_q = _ifq->altq_subq_cnt - 1; _q >= 0; --_q)
295 ALTQ_SQ_UNLOCK(&_ifq->altq_subq[_q]);
299 * All of the subqueue locks must be held
301 static __inline void
302 ifq_purge_all_locked(struct ifaltq *_ifq)
304 int _q;
306 for (_q = 0; _q < _ifq->altq_subq_cnt; ++_q)
307 ifsq_purge_locked(&_ifq->altq_subq[_q]);
310 static __inline void
311 ifq_purge_all(struct ifaltq *_ifq)
313 ifq_lock_all(_ifq);
314 ifq_purge_all_locked(_ifq);
315 ifq_unlock_all(_ifq);
318 static __inline void
319 ifq_classify(struct ifaltq *_ifq, struct mbuf *_m, uint8_t _af,
320 struct altq_pktattr *_pa)
322 #ifdef ALTQ
323 if (ifq_is_enabled(_ifq)) {
324 _pa->pattr_af = _af;
325 _pa->pattr_hdr = mtod(_m, caddr_t);
326 if (ifq_is_enabled(_ifq) &&
327 (_ifq->altq_flags & ALTQF_CLASSIFY)) {
328 /* XXX default subqueue */
329 struct ifaltq_subque *_ifsq =
330 &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
332 ALTQ_SQ_LOCK(_ifsq);
333 if (ifq_is_enabled(_ifq) &&
334 (_ifq->altq_flags & ALTQF_CLASSIFY))
335 _ifq->altq_classify(_ifq, _m, _pa);
336 ALTQ_SQ_UNLOCK(_ifsq);
339 #endif
342 static __inline void
343 ifsq_prepend(struct ifaltq_subque *_ifsq, struct mbuf *_m)
345 ALTQ_SQ_LOCK(_ifsq);
346 KASSERT(_ifsq->ifsq_prepended == NULL, ("pending prepended mbuf"));
347 _ifsq->ifsq_prepended = _m;
348 ALTQ_SQ_CNTR_INC(_ifsq, _m->m_pkthdr.len);
349 ALTQ_SQ_UNLOCK(_ifsq);
353 * Subqueue hardware serializer must be held
355 static __inline void
356 ifsq_set_oactive(struct ifaltq_subque *_ifsq)
358 _ifsq->ifsq_hw_oactive = 1;
362 * Subqueue hardware serializer must be held
364 static __inline void
365 ifsq_clr_oactive(struct ifaltq_subque *_ifsq)
367 _ifsq->ifsq_hw_oactive = 0;
371 * Subqueue hardware serializer must be held
373 static __inline int
374 ifsq_is_oactive(const struct ifaltq_subque *_ifsq)
376 return _ifsq->ifsq_hw_oactive;
380 * Hand a packet to the interface's default subqueue.
382 * The default subqueue hardware serializer must be held. If the
383 * subqueue hardware serializer is not held yet, ifq_dispatch()
384 * should be used to get better performance.
386 static __inline int
387 ifq_handoff(struct ifnet *_ifp, struct mbuf *_m, struct altq_pktattr *_pa)
389 struct ifaltq_subque *_ifsq;
390 int _error;
391 int _qid = ALTQ_SUBQ_INDEX_DEFAULT; /* XXX default subqueue */
393 _ifsq = &_ifp->if_snd.altq_subq[_qid];
395 ASSERT_ALTQ_SQ_SERIALIZED_HW(_ifsq);
396 _error = ifsq_enqueue(_ifsq, _m, _pa);
397 if (_error == 0) {
398 IFNET_STAT_INC(_ifp, obytes, _m->m_pkthdr.len);
399 if (_m->m_flags & M_MCAST)
400 IFNET_STAT_INC(_ifp, omcasts, 1);
401 if (!ifsq_is_oactive(_ifsq))
402 (*_ifp->if_start)(_ifp, _ifsq);
403 } else {
404 IFNET_STAT_INC(_ifp, oqdrops, 1);
406 return(_error);
409 static __inline int
410 ifsq_is_empty(const struct ifaltq_subque *_ifsq)
412 return(_ifsq->ifsq_len == 0);
416 * Subqueue lock must be held
418 static __inline int
419 ifsq_data_ready(struct ifaltq_subque *_ifsq)
421 #ifdef ALTQ
422 if (_ifsq->ifsq_altq->altq_tbr != NULL)
423 return (ifsq_poll_locked(_ifsq) != NULL);
424 else
425 #endif
426 return !ifsq_is_empty(_ifsq);
430 * Subqueue lock must be held
432 static __inline int
433 ifsq_is_started(const struct ifaltq_subque *_ifsq)
435 return _ifsq->ifsq_started;
439 * Subqueue lock must be held
441 static __inline void
442 ifsq_set_started(struct ifaltq_subque *_ifsq)
444 _ifsq->ifsq_started = 1;
448 * Subqueue lock must be held
450 static __inline void
451 ifsq_clr_started(struct ifaltq_subque *_ifsq)
453 _ifsq->ifsq_started = 0;
456 static __inline struct ifsubq_stage *
457 ifsq_get_stage(struct ifaltq_subque *_ifsq, int _cpuid)
459 return &_ifsq->ifsq_stage[_cpuid];
462 static __inline int
463 ifsq_get_cpuid(const struct ifaltq_subque *_ifsq)
465 return _ifsq->ifsq_cpuid;
468 static __inline void
469 ifsq_set_cpuid(struct ifaltq_subque *_ifsq, int _cpuid)
471 KASSERT(_cpuid >= 0 && _cpuid < ncpus,
472 ("invalid ifsq_cpuid %d", _cpuid));
473 _ifsq->ifsq_cpuid = _cpuid;
476 static __inline struct lwkt_msg *
477 ifsq_get_ifstart_lmsg(struct ifaltq_subque *_ifsq, int _cpuid)
479 return &_ifsq->ifsq_ifstart_nmsg[_cpuid].lmsg;
482 static __inline int
483 ifsq_get_index(const struct ifaltq_subque *_ifsq)
485 return _ifsq->ifsq_index;
488 static __inline void
489 ifsq_set_priv(struct ifaltq_subque *_ifsq, void *_priv)
491 _ifsq->ifsq_hw_priv = _priv;
494 static __inline void *
495 ifsq_get_priv(const struct ifaltq_subque *_ifsq)
497 return _ifsq->ifsq_hw_priv;
500 static __inline struct ifnet *
501 ifsq_get_ifp(const struct ifaltq_subque *_ifsq)
503 return _ifsq->ifsq_ifp;
506 static __inline void
507 ifsq_set_hw_serialize(struct ifaltq_subque *_ifsq,
508 struct lwkt_serialize *_hwslz)
510 KASSERT(_hwslz != NULL, ("NULL hw serialize"));
511 KASSERT(_ifsq->ifsq_hw_serialize == NULL,
512 ("hw serialize has been setup"));
513 _ifsq->ifsq_hw_serialize = _hwslz;
516 static __inline void
517 ifsq_serialize_hw(struct ifaltq_subque *_ifsq)
519 lwkt_serialize_enter(_ifsq->ifsq_hw_serialize);
522 static __inline void
523 ifsq_deserialize_hw(struct ifaltq_subque *_ifsq)
525 lwkt_serialize_exit(_ifsq->ifsq_hw_serialize);
528 static __inline int
529 ifsq_tryserialize_hw(struct ifaltq_subque *_ifsq)
531 return lwkt_serialize_try(_ifsq->ifsq_hw_serialize);
534 static __inline struct ifaltq_subque *
535 ifq_get_subq_default(const struct ifaltq *_ifq)
537 return &_ifq->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT];
540 static __inline struct ifaltq_subque *
541 ifq_get_subq(const struct ifaltq *_ifq, int _idx)
543 KASSERT(_idx >= 0 && _idx < _ifq->altq_subq_cnt,
544 ("invalid qid %d", _idx));
545 return &_ifq->altq_subq[_idx];
548 static __inline struct ifaltq_subque *
549 ifq_map_subq(struct ifaltq *_ifq, int _cpuid)
551 int _idx = _ifq->altq_mapsubq(_ifq, _cpuid);
552 return ifq_get_subq(_ifq, _idx);
555 static __inline void
556 ifq_set_subq_cnt(struct ifaltq *_ifq, int _cnt)
558 _ifq->altq_subq_cnt = _cnt;
561 static __inline void
562 ifq_set_subq_mask(struct ifaltq *_ifq, uint32_t _mask)
565 KASSERT(((_mask + 1) & _mask) == 0, ("invalid mask %08x", _mask));
566 _ifq->altq_subq_mappriv = _mask;
569 static __inline void
570 ifq_set_subq_divisor(struct ifaltq *_ifq, uint32_t _divisor)
573 KASSERT(_divisor > 0, ("invalid divisor %u", _divisor));
574 KASSERT(_divisor <= _ifq->altq_subq_cnt,
575 ("invalid divisor %u, max %d", _divisor, _ifq->altq_subq_cnt));
576 _ifq->altq_subq_mappriv = _divisor;
579 /* COMPAT */
580 static __inline int
581 ifq_is_oactive(const struct ifaltq *_ifq)
583 return ifsq_is_oactive(ifq_get_subq_default(_ifq));
586 /* COMPAT */
587 static __inline void
588 ifq_set_oactive(struct ifaltq *_ifq)
590 ifsq_set_oactive(ifq_get_subq_default(_ifq));
593 /* COMPAT */
594 static __inline void
595 ifq_clr_oactive(struct ifaltq *_ifq)
597 ifsq_clr_oactive(ifq_get_subq_default(_ifq));
600 /* COMPAT */
601 static __inline int
602 ifq_is_empty(struct ifaltq *_ifq)
604 return ifsq_is_empty(ifq_get_subq_default(_ifq));
607 /* COMPAT */
608 static __inline void
609 ifq_purge(struct ifaltq *_ifq)
611 ifsq_purge(ifq_get_subq_default(_ifq));
614 /* COMPAT */
615 static __inline struct mbuf *
616 ifq_dequeue(struct ifaltq *_ifq)
618 return ifsq_dequeue(ifq_get_subq_default(_ifq));
621 /* COMPAT */
622 static __inline void
623 ifq_prepend(struct ifaltq *_ifq, struct mbuf *_m)
625 ifsq_prepend(ifq_get_subq_default(_ifq), _m);
628 /* COMPAT */
629 static __inline void
630 ifq_set_cpuid(struct ifaltq *_ifq, int _cpuid)
632 KASSERT(_ifq->altq_subq_cnt == 1,
633 ("invalid subqueue count %d", _ifq->altq_subq_cnt));
634 ifsq_set_cpuid(ifq_get_subq_default(_ifq), _cpuid);
637 /* COMPAT */
638 static __inline void
639 ifq_set_hw_serialize(struct ifaltq *_ifq, struct lwkt_serialize *_hwslz)
641 KASSERT(_ifq->altq_subq_cnt == 1,
642 ("invalid subqueue count %d", _ifq->altq_subq_cnt));
643 ifsq_set_hw_serialize(ifq_get_subq_default(_ifq), _hwslz);
646 #endif /* _NET_IFQ_VAR_H_ */