2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifndef _NET_IFQ_VAR_H_
33 #define _NET_IFQ_VAR_H_
36 #error "This file should not be included by userland programs."
40 #include <sys/systm.h>
42 #ifndef _SYS_THREAD2_H_
43 #include <sys/thread2.h>
45 #ifndef _SYS_SERIALIZE_H_
46 #include <sys/serialize.h>
51 #ifndef _NET_IF_VAR_H_
52 #include <net/if_var.h>
54 #ifndef _NET_ALTQ_IF_ALTQ_H_
55 #include <net/altq/if_altq.h>
58 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
59 KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
60 ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
61 ("not ifp's default subqueue"));
69 typedef void (*ifsq_watchdog_t
)(struct ifaltq_subque
*);
71 struct ifsubq_watchdog
{
72 struct callout wd_callout
;
74 struct ifaltq_subque
*wd_subq
;
75 ifsq_watchdog_t wd_watchdog
;
79 * Support for "classic" ALTQ interfaces.
81 int ifsq_classic_enqueue(struct ifaltq_subque
*, struct mbuf
*,
82 struct altq_pktattr
*);
83 struct mbuf
*ifsq_classic_dequeue(struct ifaltq_subque
*, int);
84 int ifsq_classic_request(struct ifaltq_subque
*, int, void *);
85 void ifq_set_classic(struct ifaltq
*);
87 void ifq_set_maxlen(struct ifaltq
*, int);
88 void ifq_set_methods(struct ifaltq
*, altq_mapsubq_t
,
89 ifsq_enqueue_t
, ifsq_dequeue_t
, ifsq_request_t
);
90 int ifq_mapsubq_default(struct ifaltq
*, int);
91 int ifq_mapsubq_modulo(struct ifaltq
*, int);
93 void ifsq_devstart(struct ifaltq_subque
*ifsq
);
94 void ifsq_devstart_sched(struct ifaltq_subque
*ifsq
);
96 void ifsq_watchdog_init(struct ifsubq_watchdog
*,
97 struct ifaltq_subque
*, ifsq_watchdog_t
);
98 void ifsq_watchdog_start(struct ifsubq_watchdog
*);
99 void ifsq_watchdog_stop(struct ifsubq_watchdog
*);
102 * Dispatch a packet to an interface.
104 int ifq_dispatch(struct ifnet
*, struct mbuf
*,
105 struct altq_pktattr
*);
110 ifq_is_enabled(struct ifaltq
*_ifq
)
112 return(_ifq
->altq_flags
& ALTQF_ENABLED
);
116 ifq_is_attached(struct ifaltq
*_ifq
)
118 return(_ifq
->altq_disc
!= NULL
);
124 ifq_is_enabled(struct ifaltq
*_ifq
)
130 ifq_is_attached(struct ifaltq
*_ifq
)
138 ifq_is_ready(struct ifaltq
*_ifq
)
140 return(_ifq
->altq_flags
& ALTQF_READY
);
144 ifq_set_ready(struct ifaltq
*_ifq
)
146 _ifq
->altq_flags
|= ALTQF_READY
;
150 * Subqueue lock must be held
153 ifsq_enqueue_locked(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
,
154 struct altq_pktattr
*_pa
)
157 if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
158 return ifsq_classic_enqueue(_ifsq
, _m
, _pa
);
161 return _ifsq
->ifsq_enqueue(_ifsq
, _m
, _pa
);
165 ifsq_enqueue(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
,
166 struct altq_pktattr
*_pa
)
171 _error
= ifsq_enqueue_locked(_ifsq
, _m
, _pa
);
172 ALTQ_SQ_UNLOCK(_ifsq
);
176 static __inline
struct mbuf
*
177 ifsq_dequeue(struct ifaltq_subque
*_ifsq
)
182 if (_ifsq
->ifsq_prepended
!= NULL
) {
183 _m
= _ifsq
->ifsq_prepended
;
184 _ifsq
->ifsq_prepended
= NULL
;
185 ALTQ_SQ_CNTR_DEC(_ifsq
, _m
->m_pkthdr
.len
);
186 ALTQ_SQ_UNLOCK(_ifsq
);
191 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
192 _m
= tbr_dequeue(_ifsq
, ALTDQ_REMOVE
);
193 else if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
194 _m
= ifsq_classic_dequeue(_ifsq
, ALTDQ_REMOVE
);
197 _m
= _ifsq
->ifsq_dequeue(_ifsq
, ALTDQ_REMOVE
);
198 ALTQ_SQ_UNLOCK(_ifsq
);
203 * Subqueue lock must be held
205 static __inline
struct mbuf
*
206 ifsq_poll_locked(struct ifaltq_subque
*_ifsq
)
208 if (_ifsq
->ifsq_prepended
!= NULL
)
209 return _ifsq
->ifsq_prepended
;
212 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
213 return tbr_dequeue(_ifsq
, ALTDQ_POLL
);
214 else if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
215 return ifsq_classic_dequeue(_ifsq
, ALTDQ_POLL
);
218 return _ifsq
->ifsq_dequeue(_ifsq
, ALTDQ_POLL
);
221 static __inline
struct mbuf
*
222 ifsq_poll(struct ifaltq_subque
*_ifsq
)
227 _m
= ifsq_poll_locked(_ifsq
);
228 ALTQ_SQ_UNLOCK(_ifsq
);
233 ifsq_poll_pktlen(struct ifaltq_subque
*_ifsq
)
240 _m
= ifsq_poll_locked(_ifsq
);
243 _len
= _m
->m_pkthdr
.len
;
246 ALTQ_SQ_UNLOCK(_ifsq
);
252 * Subqueue lock must be held
255 ifsq_purge_locked(struct ifaltq_subque
*_ifsq
)
257 if (_ifsq
->ifsq_prepended
!= NULL
) {
258 ALTQ_SQ_CNTR_DEC(_ifsq
, _ifsq
->ifsq_prepended
->m_pkthdr
.len
);
259 m_freem(_ifsq
->ifsq_prepended
);
260 _ifsq
->ifsq_prepended
= NULL
;
264 if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
265 ifsq_classic_request(_ifsq
, ALTRQ_PURGE
, NULL
);
268 _ifsq
->ifsq_request(_ifsq
, ALTRQ_PURGE
, NULL
);
272 ifsq_purge(struct ifaltq_subque
*_ifsq
)
275 ifsq_purge_locked(_ifsq
);
276 ALTQ_SQ_UNLOCK(_ifsq
);
280 ifq_lock_all(struct ifaltq
*_ifq
)
284 for (_q
= 0; _q
< _ifq
->altq_subq_cnt
; ++_q
)
285 ALTQ_SQ_LOCK(&_ifq
->altq_subq
[_q
]);
289 ifq_unlock_all(struct ifaltq
*_ifq
)
293 for (_q
= _ifq
->altq_subq_cnt
- 1; _q
>= 0; --_q
)
294 ALTQ_SQ_UNLOCK(&_ifq
->altq_subq
[_q
]);
298 * All of the subqueue locks must be held
301 ifq_purge_all_locked(struct ifaltq
*_ifq
)
305 for (_q
= 0; _q
< _ifq
->altq_subq_cnt
; ++_q
)
306 ifsq_purge_locked(&_ifq
->altq_subq
[_q
]);
310 ifq_purge_all(struct ifaltq
*_ifq
)
313 ifq_purge_all_locked(_ifq
);
314 ifq_unlock_all(_ifq
);
318 ifq_classify(struct ifaltq
*_ifq
, struct mbuf
*_m
, uint8_t _af
,
319 struct altq_pktattr
*_pa
)
322 if (ifq_is_enabled(_ifq
)) {
324 _pa
->pattr_hdr
= mtod(_m
, caddr_t
);
325 if (ifq_is_enabled(_ifq
) &&
326 (_ifq
->altq_flags
& ALTQF_CLASSIFY
)) {
327 /* XXX default subqueue */
328 struct ifaltq_subque
*_ifsq
=
329 &_ifq
->altq_subq
[ALTQ_SUBQ_INDEX_DEFAULT
];
332 if (ifq_is_enabled(_ifq
) &&
333 (_ifq
->altq_flags
& ALTQF_CLASSIFY
))
334 _ifq
->altq_classify(_ifq
, _m
, _pa
);
335 ALTQ_SQ_UNLOCK(_ifsq
);
342 ifsq_prepend(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
)
345 KASSERT(_ifsq
->ifsq_prepended
== NULL
, ("pending prepended mbuf"));
346 _ifsq
->ifsq_prepended
= _m
;
347 ALTQ_SQ_CNTR_INC(_ifsq
, _m
->m_pkthdr
.len
);
348 ALTQ_SQ_UNLOCK(_ifsq
);
352 * Subqueue hardware serializer must be held
355 ifsq_set_oactive(struct ifaltq_subque
*_ifsq
)
357 _ifsq
->ifsq_hw_oactive
= 1;
361 * Subqueue hardware serializer must be held
364 ifsq_clr_oactive(struct ifaltq_subque
*_ifsq
)
366 _ifsq
->ifsq_hw_oactive
= 0;
370 * Subqueue hardware serializer must be held
373 ifsq_is_oactive(const struct ifaltq_subque
*_ifsq
)
375 return _ifsq
->ifsq_hw_oactive
;
379 * Hand a packet to the interface's default subqueue.
381 * The default subqueue hardware serializer must be held. If the
382 * subqueue hardware serializer is not held yet, ifq_dispatch()
383 * should be used to get better performance.
386 ifq_handoff(struct ifnet
*_ifp
, struct mbuf
*_m
, struct altq_pktattr
*_pa
)
388 struct ifaltq_subque
*_ifsq
;
390 int _qid
= ALTQ_SUBQ_INDEX_DEFAULT
; /* XXX default subqueue */
392 _ifsq
= &_ifp
->if_snd
.altq_subq
[_qid
];
394 ASSERT_ALTQ_SQ_SERIALIZED_HW(_ifsq
);
395 _error
= ifsq_enqueue(_ifsq
, _m
, _pa
);
397 IFNET_STAT_INC(_ifp
, obytes
, _m
->m_pkthdr
.len
);
398 if (_m
->m_flags
& M_MCAST
)
399 IFNET_STAT_INC(_ifp
, omcasts
, 1);
400 if (!ifsq_is_oactive(_ifsq
))
401 (*_ifp
->if_start
)(_ifp
, _ifsq
);
403 IFNET_STAT_INC(_ifp
, oqdrops
, 1);
409 ifsq_is_empty(const struct ifaltq_subque
*_ifsq
)
411 return(_ifsq
->ifsq_len
== 0);
415 * Subqueue lock must be held
418 ifsq_data_ready(struct ifaltq_subque
*_ifsq
)
421 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
422 return (ifsq_poll_locked(_ifsq
) != NULL
);
425 return !ifsq_is_empty(_ifsq
);
429 * Subqueue lock must be held
432 ifsq_is_started(const struct ifaltq_subque
*_ifsq
)
434 return _ifsq
->ifsq_started
;
438 * Subqueue lock must be held
441 ifsq_set_started(struct ifaltq_subque
*_ifsq
)
443 _ifsq
->ifsq_started
= 1;
447 * Subqueue lock must be held
450 ifsq_clr_started(struct ifaltq_subque
*_ifsq
)
452 _ifsq
->ifsq_started
= 0;
455 static __inline
struct ifsubq_stage
*
456 ifsq_get_stage(struct ifaltq_subque
*_ifsq
, int _cpuid
)
458 return &_ifsq
->ifsq_stage
[_cpuid
];
462 ifsq_get_cpuid(const struct ifaltq_subque
*_ifsq
)
464 return _ifsq
->ifsq_cpuid
;
468 ifsq_set_cpuid(struct ifaltq_subque
*_ifsq
, int _cpuid
)
470 KASSERT(_cpuid
>= 0 && _cpuid
< ncpus
,
471 ("invalid ifsq_cpuid %d", _cpuid
));
472 _ifsq
->ifsq_cpuid
= _cpuid
;
475 static __inline
struct lwkt_msg
*
476 ifsq_get_ifstart_lmsg(struct ifaltq_subque
*_ifsq
, int _cpuid
)
478 return &_ifsq
->ifsq_ifstart_nmsg
[_cpuid
].lmsg
;
482 ifsq_get_index(const struct ifaltq_subque
*_ifsq
)
484 return _ifsq
->ifsq_index
;
488 ifsq_set_priv(struct ifaltq_subque
*_ifsq
, void *_priv
)
490 _ifsq
->ifsq_hw_priv
= _priv
;
493 static __inline
void *
494 ifsq_get_priv(const struct ifaltq_subque
*_ifsq
)
496 return _ifsq
->ifsq_hw_priv
;
499 static __inline
struct ifnet
*
500 ifsq_get_ifp(const struct ifaltq_subque
*_ifsq
)
502 return _ifsq
->ifsq_ifp
;
506 ifsq_set_hw_serialize(struct ifaltq_subque
*_ifsq
,
507 struct lwkt_serialize
*_hwslz
)
509 KASSERT(_hwslz
!= NULL
, ("NULL hw serialize"));
510 KASSERT(_ifsq
->ifsq_hw_serialize
== NULL
,
511 ("hw serialize has been setup"));
512 _ifsq
->ifsq_hw_serialize
= _hwslz
;
516 ifsq_serialize_hw(struct ifaltq_subque
*_ifsq
)
518 lwkt_serialize_enter(_ifsq
->ifsq_hw_serialize
);
522 ifsq_deserialize_hw(struct ifaltq_subque
*_ifsq
)
524 lwkt_serialize_exit(_ifsq
->ifsq_hw_serialize
);
528 ifsq_tryserialize_hw(struct ifaltq_subque
*_ifsq
)
530 return lwkt_serialize_try(_ifsq
->ifsq_hw_serialize
);
533 static __inline
struct ifaltq_subque
*
534 ifq_get_subq_default(const struct ifaltq
*_ifq
)
536 return &_ifq
->altq_subq
[ALTQ_SUBQ_INDEX_DEFAULT
];
539 static __inline
struct ifaltq_subque
*
540 ifq_get_subq(const struct ifaltq
*_ifq
, int _idx
)
542 KASSERT(_idx
>= 0 && _idx
< _ifq
->altq_subq_cnt
,
543 ("invalid qid %d", _idx
));
544 return &_ifq
->altq_subq
[_idx
];
547 static __inline
struct ifaltq_subque
*
548 ifq_map_subq(struct ifaltq
*_ifq
, int _cpuid
)
550 int _idx
= _ifq
->altq_mapsubq(_ifq
, _cpuid
);
551 return ifq_get_subq(_ifq
, _idx
);
555 ifq_set_subq_cnt(struct ifaltq
*_ifq
, int _cnt
)
557 _ifq
->altq_subq_cnt
= _cnt
;
561 ifq_set_subq_divisor(struct ifaltq
*_ifq
, uint32_t _divisor
)
564 KASSERT(_divisor
> 0, ("invalid divisor %u", _divisor
));
565 KASSERT(_divisor
<= _ifq
->altq_subq_cnt
,
566 ("invalid divisor %u, max %d", _divisor
, _ifq
->altq_subq_cnt
));
567 _ifq
->altq_subq_mappriv
= _divisor
;
572 ifq_is_oactive(const struct ifaltq
*_ifq
)
574 return ifsq_is_oactive(ifq_get_subq_default(_ifq
));
579 ifq_set_oactive(struct ifaltq
*_ifq
)
581 ifsq_set_oactive(ifq_get_subq_default(_ifq
));
586 ifq_clr_oactive(struct ifaltq
*_ifq
)
588 ifsq_clr_oactive(ifq_get_subq_default(_ifq
));
593 ifq_is_empty(struct ifaltq
*_ifq
)
595 return ifsq_is_empty(ifq_get_subq_default(_ifq
));
600 ifq_purge(struct ifaltq
*_ifq
)
602 ifsq_purge(ifq_get_subq_default(_ifq
));
606 static __inline
struct mbuf
*
607 ifq_dequeue(struct ifaltq
*_ifq
)
609 return ifsq_dequeue(ifq_get_subq_default(_ifq
));
614 ifq_prepend(struct ifaltq
*_ifq
, struct mbuf
*_m
)
616 ifsq_prepend(ifq_get_subq_default(_ifq
), _m
);
621 ifq_set_cpuid(struct ifaltq
*_ifq
, int _cpuid
)
623 KASSERT(_ifq
->altq_subq_cnt
== 1,
624 ("invalid subqueue count %d", _ifq
->altq_subq_cnt
));
625 ifsq_set_cpuid(ifq_get_subq_default(_ifq
), _cpuid
);
630 ifq_set_hw_serialize(struct ifaltq
*_ifq
, struct lwkt_serialize
*_hwslz
)
632 KASSERT(_ifq
->altq_subq_cnt
== 1,
633 ("invalid subqueue count %d", _ifq
->altq_subq_cnt
));
634 ifsq_set_hw_serialize(ifq_get_subq_default(_ifq
), _hwslz
);
637 #endif /* _NET_IFQ_VAR_H_ */