2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifndef _NET_IFQ_VAR_H_
33 #define _NET_IFQ_VAR_H_
36 #error "This file should not be included by userland programs."
40 #include <sys/systm.h>
42 #ifndef _SYS_THREAD2_H_
43 #include <sys/thread2.h>
45 #ifndef _SYS_SERIALIZE_H_
46 #include <sys/serialize.h>
51 #ifndef _NET_IF_VAR_H_
52 #include <net/if_var.h>
54 #ifndef _NET_ALTQ_IF_ALTQ_H_
55 #include <net/altq/if_altq.h>
58 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
59 KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
60 ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
61 ("not ifp's default subqueue"));
69 typedef void (*ifsq_watchdog_t
)(struct ifaltq_subque
*);
71 struct ifsubq_watchdog
{
72 struct callout wd_callout
;
74 struct ifaltq_subque
*wd_subq
;
75 ifsq_watchdog_t wd_watchdog
;
79 * Support for "classic" ALTQ interfaces.
81 int ifsq_classic_enqueue(struct ifaltq_subque
*, struct mbuf
*,
82 struct altq_pktattr
*);
83 struct mbuf
*ifsq_classic_dequeue(struct ifaltq_subque
*, int);
84 int ifsq_classic_request(struct ifaltq_subque
*, int, void *);
85 void ifq_set_classic(struct ifaltq
*);
87 void ifq_set_maxlen(struct ifaltq
*, int);
88 void ifq_set_methods(struct ifaltq
*, altq_mapsubq_t
,
89 ifsq_enqueue_t
, ifsq_dequeue_t
, ifsq_request_t
);
90 int ifq_mapsubq_default(struct ifaltq
*, int);
91 int ifq_mapsubq_mask(struct ifaltq
*, int);
92 int ifq_mapsubq_modulo(struct ifaltq
*, int);
94 void ifsq_devstart(struct ifaltq_subque
*ifsq
);
95 void ifsq_devstart_sched(struct ifaltq_subque
*ifsq
);
97 void ifsq_watchdog_init(struct ifsubq_watchdog
*,
98 struct ifaltq_subque
*, ifsq_watchdog_t
);
99 void ifsq_watchdog_start(struct ifsubq_watchdog
*);
100 void ifsq_watchdog_stop(struct ifsubq_watchdog
*);
103 * Dispatch a packet to an interface.
105 int ifq_dispatch(struct ifnet
*, struct mbuf
*,
106 struct altq_pktattr
*);
111 ifq_is_enabled(struct ifaltq
*_ifq
)
113 return(_ifq
->altq_flags
& ALTQF_ENABLED
);
117 ifq_is_attached(struct ifaltq
*_ifq
)
119 return(_ifq
->altq_disc
!= NULL
);
125 ifq_is_enabled(struct ifaltq
*_ifq
)
131 ifq_is_attached(struct ifaltq
*_ifq
)
139 ifq_is_ready(struct ifaltq
*_ifq
)
141 return(_ifq
->altq_flags
& ALTQF_READY
);
145 ifq_set_ready(struct ifaltq
*_ifq
)
147 _ifq
->altq_flags
|= ALTQF_READY
;
151 * Subqueue lock must be held
154 ifsq_enqueue_locked(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
,
155 struct altq_pktattr
*_pa
)
158 if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
159 return ifsq_classic_enqueue(_ifsq
, _m
, _pa
);
162 return _ifsq
->ifsq_enqueue(_ifsq
, _m
, _pa
);
166 ifsq_enqueue(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
,
167 struct altq_pktattr
*_pa
)
172 _error
= ifsq_enqueue_locked(_ifsq
, _m
, _pa
);
173 ALTQ_SQ_UNLOCK(_ifsq
);
177 static __inline
struct mbuf
*
178 ifsq_dequeue(struct ifaltq_subque
*_ifsq
)
183 if (_ifsq
->ifsq_prepended
!= NULL
) {
184 _m
= _ifsq
->ifsq_prepended
;
185 _ifsq
->ifsq_prepended
= NULL
;
186 ALTQ_SQ_CNTR_DEC(_ifsq
, _m
->m_pkthdr
.len
);
187 ALTQ_SQ_UNLOCK(_ifsq
);
192 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
193 _m
= tbr_dequeue(_ifsq
, ALTDQ_REMOVE
);
194 else if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
195 _m
= ifsq_classic_dequeue(_ifsq
, ALTDQ_REMOVE
);
198 _m
= _ifsq
->ifsq_dequeue(_ifsq
, ALTDQ_REMOVE
);
199 ALTQ_SQ_UNLOCK(_ifsq
);
204 * Subqueue lock must be held
206 static __inline
struct mbuf
*
207 ifsq_poll_locked(struct ifaltq_subque
*_ifsq
)
209 if (_ifsq
->ifsq_prepended
!= NULL
)
210 return _ifsq
->ifsq_prepended
;
213 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
214 return tbr_dequeue(_ifsq
, ALTDQ_POLL
);
215 else if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
216 return ifsq_classic_dequeue(_ifsq
, ALTDQ_POLL
);
219 return _ifsq
->ifsq_dequeue(_ifsq
, ALTDQ_POLL
);
222 static __inline
struct mbuf
*
223 ifsq_poll(struct ifaltq_subque
*_ifsq
)
228 _m
= ifsq_poll_locked(_ifsq
);
229 ALTQ_SQ_UNLOCK(_ifsq
);
234 ifsq_poll_pktlen(struct ifaltq_subque
*_ifsq
)
241 _m
= ifsq_poll_locked(_ifsq
);
244 _len
= _m
->m_pkthdr
.len
;
247 ALTQ_SQ_UNLOCK(_ifsq
);
253 * Subqueue lock must be held
256 ifsq_purge_locked(struct ifaltq_subque
*_ifsq
)
258 if (_ifsq
->ifsq_prepended
!= NULL
) {
259 ALTQ_SQ_CNTR_DEC(_ifsq
, _ifsq
->ifsq_prepended
->m_pkthdr
.len
);
260 m_freem(_ifsq
->ifsq_prepended
);
261 _ifsq
->ifsq_prepended
= NULL
;
265 if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
266 ifsq_classic_request(_ifsq
, ALTRQ_PURGE
, NULL
);
269 _ifsq
->ifsq_request(_ifsq
, ALTRQ_PURGE
, NULL
);
273 ifsq_purge(struct ifaltq_subque
*_ifsq
)
276 ifsq_purge_locked(_ifsq
);
277 ALTQ_SQ_UNLOCK(_ifsq
);
281 ifq_lock_all(struct ifaltq
*_ifq
)
285 for (_q
= 0; _q
< _ifq
->altq_subq_cnt
; ++_q
)
286 ALTQ_SQ_LOCK(&_ifq
->altq_subq
[_q
]);
290 ifq_unlock_all(struct ifaltq
*_ifq
)
294 for (_q
= _ifq
->altq_subq_cnt
- 1; _q
>= 0; --_q
)
295 ALTQ_SQ_UNLOCK(&_ifq
->altq_subq
[_q
]);
299 * All of the subqueue locks must be held
302 ifq_purge_all_locked(struct ifaltq
*_ifq
)
306 for (_q
= 0; _q
< _ifq
->altq_subq_cnt
; ++_q
)
307 ifsq_purge_locked(&_ifq
->altq_subq
[_q
]);
311 ifq_purge_all(struct ifaltq
*_ifq
)
314 ifq_purge_all_locked(_ifq
);
315 ifq_unlock_all(_ifq
);
319 ifq_classify(struct ifaltq
*_ifq
, struct mbuf
*_m
, uint8_t _af
,
320 struct altq_pktattr
*_pa
)
323 if (ifq_is_enabled(_ifq
)) {
325 _pa
->pattr_hdr
= mtod(_m
, caddr_t
);
326 if (ifq_is_enabled(_ifq
) &&
327 (_ifq
->altq_flags
& ALTQF_CLASSIFY
)) {
328 /* XXX default subqueue */
329 struct ifaltq_subque
*_ifsq
=
330 &_ifq
->altq_subq
[ALTQ_SUBQ_INDEX_DEFAULT
];
333 if (ifq_is_enabled(_ifq
) &&
334 (_ifq
->altq_flags
& ALTQF_CLASSIFY
))
335 _ifq
->altq_classify(_ifq
, _m
, _pa
);
336 ALTQ_SQ_UNLOCK(_ifsq
);
343 ifsq_prepend(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
)
346 KASSERT(_ifsq
->ifsq_prepended
== NULL
, ("pending prepended mbuf"));
347 _ifsq
->ifsq_prepended
= _m
;
348 ALTQ_SQ_CNTR_INC(_ifsq
, _m
->m_pkthdr
.len
);
349 ALTQ_SQ_UNLOCK(_ifsq
);
353 * Subqueue hardware serializer must be held
356 ifsq_set_oactive(struct ifaltq_subque
*_ifsq
)
358 _ifsq
->ifsq_hw_oactive
= 1;
362 * Subqueue hardware serializer must be held
365 ifsq_clr_oactive(struct ifaltq_subque
*_ifsq
)
367 _ifsq
->ifsq_hw_oactive
= 0;
371 * Subqueue hardware serializer must be held
374 ifsq_is_oactive(const struct ifaltq_subque
*_ifsq
)
376 return _ifsq
->ifsq_hw_oactive
;
380 * Hand a packet to the interface's default subqueue.
382 * The default subqueue hardware serializer must be held. If the
383 * subqueue hardware serializer is not held yet, ifq_dispatch()
384 * should be used to get better performance.
387 ifq_handoff(struct ifnet
*_ifp
, struct mbuf
*_m
, struct altq_pktattr
*_pa
)
389 struct ifaltq_subque
*_ifsq
;
391 int _qid
= ALTQ_SUBQ_INDEX_DEFAULT
; /* XXX default subqueue */
393 _ifsq
= &_ifp
->if_snd
.altq_subq
[_qid
];
395 ASSERT_ALTQ_SQ_SERIALIZED_HW(_ifsq
);
396 _error
= ifsq_enqueue(_ifsq
, _m
, _pa
);
398 IFNET_STAT_INC(_ifp
, obytes
, _m
->m_pkthdr
.len
);
399 if (_m
->m_flags
& M_MCAST
)
400 IFNET_STAT_INC(_ifp
, omcasts
, 1);
401 if (!ifsq_is_oactive(_ifsq
))
402 (*_ifp
->if_start
)(_ifp
, _ifsq
);
404 IFNET_STAT_INC(_ifp
, oqdrops
, 1);
410 ifsq_is_empty(const struct ifaltq_subque
*_ifsq
)
412 return(_ifsq
->ifsq_len
== 0);
416 * Subqueue lock must be held
419 ifsq_data_ready(struct ifaltq_subque
*_ifsq
)
422 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
423 return (ifsq_poll_locked(_ifsq
) != NULL
);
426 return !ifsq_is_empty(_ifsq
);
430 * Subqueue lock must be held
433 ifsq_is_started(const struct ifaltq_subque
*_ifsq
)
435 return _ifsq
->ifsq_started
;
439 * Subqueue lock must be held
442 ifsq_set_started(struct ifaltq_subque
*_ifsq
)
444 _ifsq
->ifsq_started
= 1;
448 * Subqueue lock must be held
451 ifsq_clr_started(struct ifaltq_subque
*_ifsq
)
453 _ifsq
->ifsq_started
= 0;
456 static __inline
struct ifsubq_stage
*
457 ifsq_get_stage(struct ifaltq_subque
*_ifsq
, int _cpuid
)
459 return &_ifsq
->ifsq_stage
[_cpuid
];
463 ifsq_get_cpuid(const struct ifaltq_subque
*_ifsq
)
465 return _ifsq
->ifsq_cpuid
;
469 ifsq_set_cpuid(struct ifaltq_subque
*_ifsq
, int _cpuid
)
471 KASSERT(_cpuid
>= 0 && _cpuid
< ncpus
,
472 ("invalid ifsq_cpuid %d", _cpuid
));
473 _ifsq
->ifsq_cpuid
= _cpuid
;
476 static __inline
struct lwkt_msg
*
477 ifsq_get_ifstart_lmsg(struct ifaltq_subque
*_ifsq
, int _cpuid
)
479 return &_ifsq
->ifsq_ifstart_nmsg
[_cpuid
].lmsg
;
483 ifsq_get_index(const struct ifaltq_subque
*_ifsq
)
485 return _ifsq
->ifsq_index
;
489 ifsq_set_priv(struct ifaltq_subque
*_ifsq
, void *_priv
)
491 _ifsq
->ifsq_hw_priv
= _priv
;
494 static __inline
void *
495 ifsq_get_priv(const struct ifaltq_subque
*_ifsq
)
497 return _ifsq
->ifsq_hw_priv
;
500 static __inline
struct ifnet
*
501 ifsq_get_ifp(const struct ifaltq_subque
*_ifsq
)
503 return _ifsq
->ifsq_ifp
;
507 ifsq_set_hw_serialize(struct ifaltq_subque
*_ifsq
,
508 struct lwkt_serialize
*_hwslz
)
510 KASSERT(_hwslz
!= NULL
, ("NULL hw serialize"));
511 KASSERT(_ifsq
->ifsq_hw_serialize
== NULL
,
512 ("hw serialize has been setup"));
513 _ifsq
->ifsq_hw_serialize
= _hwslz
;
517 ifsq_serialize_hw(struct ifaltq_subque
*_ifsq
)
519 lwkt_serialize_enter(_ifsq
->ifsq_hw_serialize
);
523 ifsq_deserialize_hw(struct ifaltq_subque
*_ifsq
)
525 lwkt_serialize_exit(_ifsq
->ifsq_hw_serialize
);
529 ifsq_tryserialize_hw(struct ifaltq_subque
*_ifsq
)
531 return lwkt_serialize_try(_ifsq
->ifsq_hw_serialize
);
534 static __inline
struct ifaltq_subque
*
535 ifq_get_subq_default(const struct ifaltq
*_ifq
)
537 return &_ifq
->altq_subq
[ALTQ_SUBQ_INDEX_DEFAULT
];
540 static __inline
struct ifaltq_subque
*
541 ifq_get_subq(const struct ifaltq
*_ifq
, int _idx
)
543 KASSERT(_idx
>= 0 && _idx
< _ifq
->altq_subq_cnt
,
544 ("invalid qid %d", _idx
));
545 return &_ifq
->altq_subq
[_idx
];
548 static __inline
struct ifaltq_subque
*
549 ifq_map_subq(struct ifaltq
*_ifq
, int _cpuid
)
551 int _idx
= _ifq
->altq_mapsubq(_ifq
, _cpuid
);
552 return ifq_get_subq(_ifq
, _idx
);
556 ifq_set_subq_cnt(struct ifaltq
*_ifq
, int _cnt
)
558 _ifq
->altq_subq_cnt
= _cnt
;
562 ifq_set_subq_mask(struct ifaltq
*_ifq
, uint32_t _mask
)
565 KASSERT(((_mask
+ 1) & _mask
) == 0, ("invalid mask %08x", _mask
));
566 _ifq
->altq_subq_mappriv
= _mask
;
570 ifq_set_subq_divisor(struct ifaltq
*_ifq
, uint32_t _divisor
)
573 KASSERT(_divisor
> 0, ("invalid divisor %u", _divisor
));
574 KASSERT(_divisor
<= _ifq
->altq_subq_cnt
,
575 ("invalid divisor %u, max %d", _divisor
, _ifq
->altq_subq_cnt
));
576 _ifq
->altq_subq_mappriv
= _divisor
;
581 ifq_is_oactive(const struct ifaltq
*_ifq
)
583 return ifsq_is_oactive(ifq_get_subq_default(_ifq
));
588 ifq_set_oactive(struct ifaltq
*_ifq
)
590 ifsq_set_oactive(ifq_get_subq_default(_ifq
));
595 ifq_clr_oactive(struct ifaltq
*_ifq
)
597 ifsq_clr_oactive(ifq_get_subq_default(_ifq
));
602 ifq_is_empty(struct ifaltq
*_ifq
)
604 return ifsq_is_empty(ifq_get_subq_default(_ifq
));
609 ifq_purge(struct ifaltq
*_ifq
)
611 ifsq_purge(ifq_get_subq_default(_ifq
));
615 static __inline
struct mbuf
*
616 ifq_dequeue(struct ifaltq
*_ifq
)
618 return ifsq_dequeue(ifq_get_subq_default(_ifq
));
623 ifq_prepend(struct ifaltq
*_ifq
, struct mbuf
*_m
)
625 ifsq_prepend(ifq_get_subq_default(_ifq
), _m
);
630 ifq_set_cpuid(struct ifaltq
*_ifq
, int _cpuid
)
632 KASSERT(_ifq
->altq_subq_cnt
== 1,
633 ("invalid subqueue count %d", _ifq
->altq_subq_cnt
));
634 ifsq_set_cpuid(ifq_get_subq_default(_ifq
), _cpuid
);
639 ifq_set_hw_serialize(struct ifaltq
*_ifq
, struct lwkt_serialize
*_hwslz
)
641 KASSERT(_ifq
->altq_subq_cnt
== 1,
642 ("invalid subqueue count %d", _ifq
->altq_subq_cnt
));
643 ifsq_set_hw_serialize(ifq_get_subq_default(_ifq
), _hwslz
);
646 #endif /* _NET_IFQ_VAR_H_ */