2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifndef _NET_IFQ_VAR_H_
33 #define _NET_IFQ_VAR_H_
36 #error "This file should not be included by userland programs."
40 #include <sys/systm.h>
42 #ifndef _SYS_THREAD2_H_
43 #include <sys/thread2.h>
45 #ifndef _SYS_SERIALIZE_H_
46 #include <sys/serialize.h>
51 #ifndef _NET_IF_VAR_H_
52 #include <net/if_var.h>
54 #ifndef _NET_ALTQ_IF_ALTQ_H_
55 #include <net/altq/if_altq.h>
58 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
59 KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
60 ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
61 ("not ifp's default subqueue"));
69 typedef void (*ifsq_watchdog_t
)(struct ifaltq_subque
*);
71 struct ifsubq_watchdog
{
72 struct callout wd_callout
;
74 struct ifaltq_subque
*wd_subq
;
75 ifsq_watchdog_t wd_watchdog
;
79 * Support for "classic" ALTQ interfaces.
81 int ifsq_classic_enqueue(struct ifaltq_subque
*, struct mbuf
*,
82 struct altq_pktattr
*);
83 struct mbuf
*ifsq_classic_dequeue(struct ifaltq_subque
*, int);
84 int ifsq_classic_request(struct ifaltq_subque
*, int, void *);
85 void ifq_set_classic(struct ifaltq
*);
87 void ifq_set_maxlen(struct ifaltq
*, int);
88 void ifq_set_methods(struct ifaltq
*, altq_mapsubq_t
,
89 ifsq_enqueue_t
, ifsq_dequeue_t
, ifsq_request_t
);
90 int ifq_mapsubq_default(struct ifaltq
*, int);
91 int ifq_mapsubq_mask(struct ifaltq
*, int);
93 void ifsq_devstart(struct ifaltq_subque
*ifsq
);
94 void ifsq_devstart_sched(struct ifaltq_subque
*ifsq
);
96 void ifsq_watchdog_init(struct ifsubq_watchdog
*,
97 struct ifaltq_subque
*, ifsq_watchdog_t
);
98 void ifsq_watchdog_start(struct ifsubq_watchdog
*);
99 void ifsq_watchdog_stop(struct ifsubq_watchdog
*);
102 * Dispatch a packet to an interface.
104 int ifq_dispatch(struct ifnet
*, struct mbuf
*,
105 struct altq_pktattr
*);
110 ifq_is_enabled(struct ifaltq
*_ifq
)
112 return(_ifq
->altq_flags
& ALTQF_ENABLED
);
116 ifq_is_attached(struct ifaltq
*_ifq
)
118 return(_ifq
->altq_disc
!= NULL
);
124 ifq_is_enabled(struct ifaltq
*_ifq
)
130 ifq_is_attached(struct ifaltq
*_ifq
)
138 ifq_is_ready(struct ifaltq
*_ifq
)
140 return(_ifq
->altq_flags
& ALTQF_READY
);
144 ifq_set_ready(struct ifaltq
*_ifq
)
146 _ifq
->altq_flags
|= ALTQF_READY
;
150 * Subqueue lock must be held
153 ifsq_enqueue_locked(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
,
154 struct altq_pktattr
*_pa
)
157 if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
158 return ifsq_classic_enqueue(_ifsq
, _m
, _pa
);
161 return _ifsq
->ifsq_enqueue(_ifsq
, _m
, _pa
);
165 ifsq_enqueue(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
,
166 struct altq_pktattr
*_pa
)
171 _error
= ifsq_enqueue_locked(_ifsq
, _m
, _pa
);
172 ALTQ_SQ_UNLOCK(_ifsq
);
176 static __inline
struct mbuf
*
177 ifsq_dequeue(struct ifaltq_subque
*_ifsq
)
182 if (_ifsq
->ifsq_prepended
!= NULL
) {
183 _m
= _ifsq
->ifsq_prepended
;
184 _ifsq
->ifsq_prepended
= NULL
;
185 ALTQ_SQ_CNTR_DEC(_ifsq
, _m
->m_pkthdr
.len
);
186 ALTQ_SQ_UNLOCK(_ifsq
);
191 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
192 _m
= tbr_dequeue(_ifsq
, ALTDQ_REMOVE
);
193 else if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
194 _m
= ifsq_classic_dequeue(_ifsq
, ALTDQ_REMOVE
);
197 _m
= _ifsq
->ifsq_dequeue(_ifsq
, ALTDQ_REMOVE
);
198 ALTQ_SQ_UNLOCK(_ifsq
);
203 * Subqueue lock must be held
205 static __inline
struct mbuf
*
206 ifsq_poll_locked(struct ifaltq_subque
*_ifsq
)
208 if (_ifsq
->ifsq_prepended
!= NULL
)
209 return _ifsq
->ifsq_prepended
;
212 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
213 return tbr_dequeue(_ifsq
, ALTDQ_POLL
);
214 else if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
215 return ifsq_classic_dequeue(_ifsq
, ALTDQ_POLL
);
218 return _ifsq
->ifsq_dequeue(_ifsq
, ALTDQ_POLL
);
221 static __inline
struct mbuf
*
222 ifsq_poll(struct ifaltq_subque
*_ifsq
)
227 _m
= ifsq_poll_locked(_ifsq
);
228 ALTQ_SQ_UNLOCK(_ifsq
);
233 ifsq_poll_pktlen(struct ifaltq_subque
*_ifsq
)
240 _m
= ifsq_poll_locked(_ifsq
);
243 _len
= _m
->m_pkthdr
.len
;
246 ALTQ_SQ_UNLOCK(_ifsq
);
252 * Subqueue lock must be held
255 ifsq_purge_locked(struct ifaltq_subque
*_ifsq
)
257 if (_ifsq
->ifsq_prepended
!= NULL
) {
258 ALTQ_SQ_CNTR_DEC(_ifsq
, _ifsq
->ifsq_prepended
->m_pkthdr
.len
);
259 m_freem(_ifsq
->ifsq_prepended
);
260 _ifsq
->ifsq_prepended
= NULL
;
264 if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
265 ifsq_classic_request(_ifsq
, ALTRQ_PURGE
, NULL
);
268 _ifsq
->ifsq_request(_ifsq
, ALTRQ_PURGE
, NULL
);
272 ifsq_purge(struct ifaltq_subque
*_ifsq
)
275 ifsq_purge_locked(_ifsq
);
276 ALTQ_SQ_UNLOCK(_ifsq
);
280 ifq_lock_all(struct ifaltq
*_ifq
)
284 for (_q
= 0; _q
< _ifq
->altq_subq_cnt
; ++_q
)
285 ALTQ_SQ_LOCK(&_ifq
->altq_subq
[_q
]);
289 ifq_unlock_all(struct ifaltq
*_ifq
)
293 for (_q
= _ifq
->altq_subq_cnt
- 1; _q
>= 0; --_q
)
294 ALTQ_SQ_UNLOCK(&_ifq
->altq_subq
[_q
]);
298 * All of the subqueue locks must be held
301 ifq_purge_all_locked(struct ifaltq
*_ifq
)
305 for (_q
= 0; _q
< _ifq
->altq_subq_cnt
; ++_q
)
306 ifsq_purge_locked(&_ifq
->altq_subq
[_q
]);
310 ifq_purge_all(struct ifaltq
*_ifq
)
313 ifq_purge_all_locked(_ifq
);
314 ifq_unlock_all(_ifq
);
318 ifq_classify(struct ifaltq
*_ifq
, struct mbuf
*_m
, uint8_t _af
,
319 struct altq_pktattr
*_pa
)
322 if (ifq_is_enabled(_ifq
)) {
324 _pa
->pattr_hdr
= mtod(_m
, caddr_t
);
325 if (ifq_is_enabled(_ifq
) &&
326 (_ifq
->altq_flags
& ALTQF_CLASSIFY
)) {
327 /* XXX default subqueue */
328 struct ifaltq_subque
*_ifsq
=
329 &_ifq
->altq_subq
[ALTQ_SUBQ_INDEX_DEFAULT
];
332 if (ifq_is_enabled(_ifq
) &&
333 (_ifq
->altq_flags
& ALTQF_CLASSIFY
))
334 _ifq
->altq_classify(_ifq
, _m
, _pa
);
335 ALTQ_SQ_UNLOCK(_ifsq
);
342 ifsq_prepend(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
)
345 KASSERT(_ifsq
->ifsq_prepended
== NULL
, ("pending prepended mbuf"));
346 _ifsq
->ifsq_prepended
= _m
;
347 ALTQ_SQ_CNTR_INC(_ifsq
, _m
->m_pkthdr
.len
);
348 ALTQ_SQ_UNLOCK(_ifsq
);
352 * Subqueue hardware serializer must be held
355 ifsq_set_oactive(struct ifaltq_subque
*_ifsq
)
357 _ifsq
->ifsq_hw_oactive
= 1;
361 * Subqueue hardware serializer must be held
364 ifsq_clr_oactive(struct ifaltq_subque
*_ifsq
)
366 _ifsq
->ifsq_hw_oactive
= 0;
370 * Subqueue hardware serializer must be held
373 ifsq_is_oactive(const struct ifaltq_subque
*_ifsq
)
375 return _ifsq
->ifsq_hw_oactive
;
379 * Hand a packet to the interface's default subqueue.
381 * The default subqueue hardware serializer must be held. If the
382 * subqueue hardware serializer is not held yet, ifq_dispatch()
383 * should be used to get better performance.
386 ifq_handoff(struct ifnet
*_ifp
, struct mbuf
*_m
, struct altq_pktattr
*_pa
)
388 struct ifaltq_subque
*_ifsq
;
390 int _qid
= ALTQ_SUBQ_INDEX_DEFAULT
; /* XXX default subqueue */
392 _ifsq
= &_ifp
->if_snd
.altq_subq
[_qid
];
394 ASSERT_ALTQ_SQ_SERIALIZED_HW(_ifsq
);
395 _error
= ifsq_enqueue(_ifsq
, _m
, _pa
);
397 IFNET_STAT_INC(_ifp
, obytes
, _m
->m_pkthdr
.len
);
398 if (_m
->m_flags
& M_MCAST
)
399 IFNET_STAT_INC(_ifp
, omcasts
, 1);
400 if (!ifsq_is_oactive(_ifsq
))
401 (*_ifp
->if_start
)(_ifp
, _ifsq
);
407 ifsq_is_empty(const struct ifaltq_subque
*_ifsq
)
409 return(_ifsq
->ifsq_len
== 0);
413 * Subqueue lock must be held
416 ifsq_data_ready(struct ifaltq_subque
*_ifsq
)
419 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
420 return (ifsq_poll_locked(_ifsq
) != NULL
);
423 return !ifsq_is_empty(_ifsq
);
427 * Subqueue lock must be held
430 ifsq_is_started(const struct ifaltq_subque
*_ifsq
)
432 return _ifsq
->ifsq_started
;
436 * Subqueue lock must be held
439 ifsq_set_started(struct ifaltq_subque
*_ifsq
)
441 _ifsq
->ifsq_started
= 1;
445 * Subqueue lock must be held
448 ifsq_clr_started(struct ifaltq_subque
*_ifsq
)
450 _ifsq
->ifsq_started
= 0;
453 static __inline
struct ifsubq_stage
*
454 ifsq_get_stage(struct ifaltq_subque
*_ifsq
, int _cpuid
)
456 return &_ifsq
->ifsq_stage
[_cpuid
];
460 ifsq_get_cpuid(const struct ifaltq_subque
*_ifsq
)
462 return _ifsq
->ifsq_cpuid
;
466 ifsq_set_cpuid(struct ifaltq_subque
*_ifsq
, int _cpuid
)
468 KASSERT(_cpuid
>= 0 && _cpuid
< ncpus
,
469 ("invalid ifsq_cpuid %d", _cpuid
));
470 _ifsq
->ifsq_cpuid
= _cpuid
;
473 static __inline
struct lwkt_msg
*
474 ifsq_get_ifstart_lmsg(struct ifaltq_subque
*_ifsq
, int _cpuid
)
476 return &_ifsq
->ifsq_ifstart_nmsg
[_cpuid
].lmsg
;
480 ifsq_get_index(const struct ifaltq_subque
*_ifsq
)
482 return _ifsq
->ifsq_index
;
486 ifsq_set_priv(struct ifaltq_subque
*_ifsq
, void *_priv
)
488 _ifsq
->ifsq_hw_priv
= _priv
;
491 static __inline
void *
492 ifsq_get_priv(const struct ifaltq_subque
*_ifsq
)
494 return _ifsq
->ifsq_hw_priv
;
497 static __inline
struct ifnet
*
498 ifsq_get_ifp(const struct ifaltq_subque
*_ifsq
)
500 return _ifsq
->ifsq_ifp
;
504 ifsq_set_hw_serialize(struct ifaltq_subque
*_ifsq
,
505 struct lwkt_serialize
*_hwslz
)
507 KASSERT(_hwslz
!= NULL
, ("NULL hw serialize"));
508 KASSERT(_ifsq
->ifsq_hw_serialize
== NULL
,
509 ("hw serialize has been setup"));
510 _ifsq
->ifsq_hw_serialize
= _hwslz
;
514 ifsq_serialize_hw(struct ifaltq_subque
*_ifsq
)
516 lwkt_serialize_enter(_ifsq
->ifsq_hw_serialize
);
520 ifsq_deserialize_hw(struct ifaltq_subque
*_ifsq
)
522 lwkt_serialize_exit(_ifsq
->ifsq_hw_serialize
);
526 ifsq_tryserialize_hw(struct ifaltq_subque
*_ifsq
)
528 return lwkt_serialize_try(_ifsq
->ifsq_hw_serialize
);
531 static __inline
struct ifaltq_subque
*
532 ifq_get_subq_default(const struct ifaltq
*_ifq
)
534 return &_ifq
->altq_subq
[ALTQ_SUBQ_INDEX_DEFAULT
];
537 static __inline
struct ifaltq_subque
*
538 ifq_get_subq(const struct ifaltq
*_ifq
, int _idx
)
540 KASSERT(_idx
>= 0 && _idx
< _ifq
->altq_subq_cnt
,
541 ("invalid qid %d", _idx
));
542 return &_ifq
->altq_subq
[_idx
];
545 static __inline
struct ifaltq_subque
*
546 ifq_map_subq(struct ifaltq
*_ifq
, int _cpuid
)
548 int _idx
= _ifq
->altq_mapsubq(_ifq
, _cpuid
);
549 return ifq_get_subq(_ifq
, _idx
);
553 ifq_set_subq_cnt(struct ifaltq
*_ifq
, int _cnt
)
555 _ifq
->altq_subq_cnt
= _cnt
;
559 ifq_set_subq_mask(struct ifaltq
*_ifq
, uint32_t _mask
)
561 KASSERT(((_mask
+ 1) & _mask
) == 0, ("invalid mask %08x", _mask
));
562 _ifq
->altq_subq_mask
= _mask
;
567 ifq_is_oactive(const struct ifaltq
*_ifq
)
569 return ifsq_is_oactive(ifq_get_subq_default(_ifq
));
574 ifq_set_oactive(struct ifaltq
*_ifq
)
576 ifsq_set_oactive(ifq_get_subq_default(_ifq
));
581 ifq_clr_oactive(struct ifaltq
*_ifq
)
583 ifsq_clr_oactive(ifq_get_subq_default(_ifq
));
588 ifq_is_empty(struct ifaltq
*_ifq
)
590 return ifsq_is_empty(ifq_get_subq_default(_ifq
));
595 ifq_purge(struct ifaltq
*_ifq
)
597 ifsq_purge(ifq_get_subq_default(_ifq
));
601 static __inline
struct mbuf
*
602 ifq_dequeue(struct ifaltq
*_ifq
)
604 return ifsq_dequeue(ifq_get_subq_default(_ifq
));
609 ifq_prepend(struct ifaltq
*_ifq
, struct mbuf
*_m
)
611 ifsq_prepend(ifq_get_subq_default(_ifq
), _m
);
616 ifq_set_cpuid(struct ifaltq
*_ifq
, int _cpuid
)
618 KASSERT(_ifq
->altq_subq_cnt
== 1,
619 ("invalid subqueue count %d", _ifq
->altq_subq_cnt
));
620 ifsq_set_cpuid(ifq_get_subq_default(_ifq
), _cpuid
);
625 ifq_set_hw_serialize(struct ifaltq
*_ifq
, struct lwkt_serialize
*_hwslz
)
627 KASSERT(_ifq
->altq_subq_cnt
== 1,
628 ("invalid subqueue count %d", _ifq
->altq_subq_cnt
));
629 ifsq_set_hw_serialize(ifq_get_subq_default(_ifq
), _hwslz
);
632 #endif /* _NET_IFQ_VAR_H_ */