2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
14 * 3. Neither the name of The DragonFly Project nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific, prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
21 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
22 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
24 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
25 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
26 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
28 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #ifndef _NET_IFQ_VAR_H_
33 #define _NET_IFQ_VAR_H_
36 #error "This file should not be included by userland programs."
40 #include <sys/systm.h>
42 #ifndef _SYS_SERIALIZE_H_
43 #include <sys/serialize.h>
48 #ifndef _NET_IF_VAR_H_
49 #include <net/if_var.h>
51 #ifndef _NET_ALTQ_IF_ALTQ_H_
52 #include <net/altq/if_altq.h>
55 #define ASSERT_ALTQ_SQ_DEFAULT(ifp, ifsq) \
56 KASSERT(ifsq_get_ifp((ifsq)) == (ifp) && \
57 ifsq_get_index((ifsq)) == ALTQ_SUBQ_INDEX_DEFAULT, \
58 ("not ifp's default subqueue"));
66 typedef void (*ifsq_watchdog_t
)(struct ifaltq_subque
*);
68 struct ifsubq_watchdog
{
69 struct callout wd_callout
;
72 struct ifaltq_subque
*wd_subq
;
73 ifsq_watchdog_t wd_watchdog
;
76 #define IF_WDOG_ALLTICKS 0x00000001
77 #define IF_WDOG_LASTTICK 0x00000002
80 * Support for "classic" ALTQ interfaces.
82 int ifsq_classic_enqueue(struct ifaltq_subque
*, struct mbuf
*,
83 struct altq_pktattr
*);
84 struct mbuf
*ifsq_classic_dequeue(struct ifaltq_subque
*, int);
85 int ifsq_classic_request(struct ifaltq_subque
*, int, void *);
86 void ifq_set_classic(struct ifaltq
*);
88 void ifq_set_maxlen(struct ifaltq
*, int);
89 void ifq_set_methods(struct ifaltq
*, altq_mapsubq_t
,
90 ifsq_enqueue_t
, ifsq_dequeue_t
, ifsq_request_t
);
91 int ifq_mapsubq_default(struct ifaltq
*, int);
92 int ifq_mapsubq_modulo(struct ifaltq
*, int);
94 void ifsq_devstart(struct ifaltq_subque
*ifsq
);
95 void ifsq_devstart_sched(struct ifaltq_subque
*ifsq
);
97 void ifsq_watchdog_init(struct ifsubq_watchdog
*,
98 struct ifaltq_subque
*, ifsq_watchdog_t
, int);
99 void ifsq_watchdog_start(struct ifsubq_watchdog
*);
100 void ifsq_watchdog_stop(struct ifsubq_watchdog
*);
101 void ifsq_watchdog_set_count(struct ifsubq_watchdog
*, int);
104 * Dispatch a packet to an interface.
106 int ifq_dispatch(struct ifnet
*, struct mbuf
*,
107 struct altq_pktattr
*);
112 ifq_is_enabled(struct ifaltq
*_ifq
)
114 return(_ifq
->altq_flags
& ALTQF_ENABLED
);
118 ifq_is_attached(struct ifaltq
*_ifq
)
120 return(_ifq
->altq_disc
!= NULL
);
126 ifq_is_enabled(struct ifaltq
*_ifq
)
132 ifq_is_attached(struct ifaltq
*_ifq
)
140 ifq_is_ready(struct ifaltq
*_ifq
)
142 return(_ifq
->altq_flags
& ALTQF_READY
);
146 ifq_set_ready(struct ifaltq
*_ifq
)
148 _ifq
->altq_flags
|= ALTQF_READY
;
152 * Subqueue lock must be held
155 ifsq_enqueue_locked(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
,
156 struct altq_pktattr
*_pa
)
159 if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
160 return ifsq_classic_enqueue(_ifsq
, _m
, _pa
);
163 return _ifsq
->ifsq_enqueue(_ifsq
, _m
, _pa
);
167 ifsq_enqueue(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
,
168 struct altq_pktattr
*_pa
)
173 _error
= ifsq_enqueue_locked(_ifsq
, _m
, _pa
);
174 ALTQ_SQ_UNLOCK(_ifsq
);
178 static __inline
struct mbuf
*
179 ifsq_dequeue(struct ifaltq_subque
*_ifsq
)
184 if (_ifsq
->ifsq_prepended
!= NULL
) {
185 _m
= _ifsq
->ifsq_prepended
;
186 _ifsq
->ifsq_prepended
= NULL
;
187 ALTQ_SQ_CNTR_DEC(_ifsq
, _m
->m_pkthdr
.len
);
188 ALTQ_SQ_UNLOCK(_ifsq
);
193 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
194 _m
= tbr_dequeue(_ifsq
, ALTDQ_REMOVE
);
195 else if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
196 _m
= ifsq_classic_dequeue(_ifsq
, ALTDQ_REMOVE
);
199 _m
= _ifsq
->ifsq_dequeue(_ifsq
, ALTDQ_REMOVE
);
200 ALTQ_SQ_UNLOCK(_ifsq
);
205 * Subqueue lock must be held
207 static __inline
struct mbuf
*
208 ifsq_poll_locked(struct ifaltq_subque
*_ifsq
)
210 if (_ifsq
->ifsq_prepended
!= NULL
)
211 return _ifsq
->ifsq_prepended
;
214 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
215 return tbr_dequeue(_ifsq
, ALTDQ_POLL
);
216 else if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
217 return ifsq_classic_dequeue(_ifsq
, ALTDQ_POLL
);
220 return _ifsq
->ifsq_dequeue(_ifsq
, ALTDQ_POLL
);
223 static __inline
struct mbuf
*
224 ifsq_poll(struct ifaltq_subque
*_ifsq
)
229 _m
= ifsq_poll_locked(_ifsq
);
230 ALTQ_SQ_UNLOCK(_ifsq
);
235 ifsq_poll_pktlen(struct ifaltq_subque
*_ifsq
)
242 _m
= ifsq_poll_locked(_ifsq
);
245 _len
= _m
->m_pkthdr
.len
;
248 ALTQ_SQ_UNLOCK(_ifsq
);
254 * Subqueue lock must be held
257 ifsq_purge_locked(struct ifaltq_subque
*_ifsq
)
259 if (_ifsq
->ifsq_prepended
!= NULL
) {
260 ALTQ_SQ_CNTR_DEC(_ifsq
, _ifsq
->ifsq_prepended
->m_pkthdr
.len
);
261 m_freem(_ifsq
->ifsq_prepended
);
262 _ifsq
->ifsq_prepended
= NULL
;
266 if (!ifq_is_enabled(_ifsq
->ifsq_altq
))
267 ifsq_classic_request(_ifsq
, ALTRQ_PURGE
, NULL
);
270 _ifsq
->ifsq_request(_ifsq
, ALTRQ_PURGE
, NULL
);
274 ifsq_purge(struct ifaltq_subque
*_ifsq
)
277 ifsq_purge_locked(_ifsq
);
278 ALTQ_SQ_UNLOCK(_ifsq
);
282 ifq_lock_all(struct ifaltq
*_ifq
)
286 for (_q
= 0; _q
< _ifq
->altq_subq_cnt
; ++_q
)
287 ALTQ_SQ_LOCK(&_ifq
->altq_subq
[_q
]);
291 ifq_unlock_all(struct ifaltq
*_ifq
)
295 for (_q
= _ifq
->altq_subq_cnt
- 1; _q
>= 0; --_q
)
296 ALTQ_SQ_UNLOCK(&_ifq
->altq_subq
[_q
]);
300 * All of the subqueue locks must be held
303 ifq_purge_all_locked(struct ifaltq
*_ifq
)
307 for (_q
= 0; _q
< _ifq
->altq_subq_cnt
; ++_q
)
308 ifsq_purge_locked(&_ifq
->altq_subq
[_q
]);
312 ifq_purge_all(struct ifaltq
*_ifq
)
315 ifq_purge_all_locked(_ifq
);
316 ifq_unlock_all(_ifq
);
320 ifq_classify(struct ifaltq
*_ifq
, struct mbuf
*_m
, uint8_t _af
,
321 struct altq_pktattr
*_pa
)
324 if (ifq_is_enabled(_ifq
)) {
326 _pa
->pattr_hdr
= mtod(_m
, caddr_t
);
327 if (ifq_is_enabled(_ifq
) &&
328 (_ifq
->altq_flags
& ALTQF_CLASSIFY
)) {
329 /* XXX default subqueue */
330 struct ifaltq_subque
*_ifsq
=
331 &_ifq
->altq_subq
[ALTQ_SUBQ_INDEX_DEFAULT
];
334 if (ifq_is_enabled(_ifq
) &&
335 (_ifq
->altq_flags
& ALTQF_CLASSIFY
))
336 _ifq
->altq_classify(_ifq
, _m
, _pa
);
337 ALTQ_SQ_UNLOCK(_ifsq
);
344 ifsq_prepend(struct ifaltq_subque
*_ifsq
, struct mbuf
*_m
)
347 KASSERT(_ifsq
->ifsq_prepended
== NULL
, ("pending prepended mbuf"));
348 _ifsq
->ifsq_prepended
= _m
;
349 ALTQ_SQ_CNTR_INC(_ifsq
, _m
->m_pkthdr
.len
);
350 ALTQ_SQ_UNLOCK(_ifsq
);
354 * Subqueue hardware serializer must be held
357 ifsq_set_oactive(struct ifaltq_subque
*_ifsq
)
359 _ifsq
->ifsq_hw_oactive
= 1;
363 * Subqueue hardware serializer must be held
366 ifsq_clr_oactive(struct ifaltq_subque
*_ifsq
)
368 _ifsq
->ifsq_hw_oactive
= 0;
372 * Subqueue hardware serializer must be held
375 ifsq_is_oactive(const struct ifaltq_subque
*_ifsq
)
377 return _ifsq
->ifsq_hw_oactive
;
381 * Hand a packet to the interface's default subqueue.
383 * The default subqueue hardware serializer must be held. If the
384 * subqueue hardware serializer is not held yet, ifq_dispatch()
385 * should be used to get better performance.
388 ifq_handoff(struct ifnet
*_ifp
, struct mbuf
*_m
, struct altq_pktattr
*_pa
)
390 struct ifaltq_subque
*_ifsq
;
392 int _qid
= ALTQ_SUBQ_INDEX_DEFAULT
; /* XXX default subqueue */
394 _ifsq
= &_ifp
->if_snd
.altq_subq
[_qid
];
396 ASSERT_ALTQ_SQ_SERIALIZED_HW(_ifsq
);
397 _error
= ifsq_enqueue(_ifsq
, _m
, _pa
);
399 IFNET_STAT_INC(_ifp
, obytes
, _m
->m_pkthdr
.len
);
400 if (_m
->m_flags
& M_MCAST
)
401 IFNET_STAT_INC(_ifp
, omcasts
, 1);
402 if (!ifsq_is_oactive(_ifsq
))
403 (*_ifp
->if_start
)(_ifp
, _ifsq
);
405 IFNET_STAT_INC(_ifp
, oqdrops
, 1);
411 ifsq_is_empty(const struct ifaltq_subque
*_ifsq
)
413 return(_ifsq
->ifsq_len
== 0);
417 * Subqueue lock must be held
420 ifsq_data_ready(struct ifaltq_subque
*_ifsq
)
423 if (_ifsq
->ifsq_altq
->altq_tbr
!= NULL
)
424 return (ifsq_poll_locked(_ifsq
) != NULL
);
427 return !ifsq_is_empty(_ifsq
);
431 * Subqueue lock must be held
434 ifsq_is_started(const struct ifaltq_subque
*_ifsq
)
436 return _ifsq
->ifsq_started
;
440 * Subqueue lock must be held
443 ifsq_set_started(struct ifaltq_subque
*_ifsq
)
445 _ifsq
->ifsq_started
= 1;
449 * Subqueue lock must be held
452 ifsq_clr_started(struct ifaltq_subque
*_ifsq
)
454 _ifsq
->ifsq_started
= 0;
457 static __inline
struct ifsubq_stage
*
458 ifsq_get_stage(struct ifaltq_subque
*_ifsq
, int _cpuid
)
460 return &_ifsq
->ifsq_stage
[_cpuid
];
464 ifsq_get_cpuid(const struct ifaltq_subque
*_ifsq
)
466 return _ifsq
->ifsq_cpuid
;
470 ifsq_set_cpuid(struct ifaltq_subque
*_ifsq
, int _cpuid
)
472 KASSERT(_cpuid
>= 0 && _cpuid
< ncpus
,
473 ("invalid ifsq_cpuid %d", _cpuid
));
474 _ifsq
->ifsq_cpuid
= _cpuid
;
477 static __inline
struct lwkt_msg
*
478 ifsq_get_ifstart_lmsg(struct ifaltq_subque
*_ifsq
, int _cpuid
)
480 return &_ifsq
->ifsq_ifstart_nmsg
[_cpuid
].lmsg
;
484 ifsq_get_index(const struct ifaltq_subque
*_ifsq
)
486 return _ifsq
->ifsq_index
;
490 ifsq_set_priv(struct ifaltq_subque
*_ifsq
, void *_priv
)
492 _ifsq
->ifsq_hw_priv
= _priv
;
495 static __inline
void *
496 ifsq_get_priv(const struct ifaltq_subque
*_ifsq
)
498 return _ifsq
->ifsq_hw_priv
;
501 static __inline
struct ifnet
*
502 ifsq_get_ifp(const struct ifaltq_subque
*_ifsq
)
504 return _ifsq
->ifsq_ifp
;
508 ifsq_set_hw_serialize(struct ifaltq_subque
*_ifsq
,
509 struct lwkt_serialize
*_hwslz
)
511 KASSERT(_hwslz
!= NULL
, ("NULL hw serialize"));
512 KASSERT(_ifsq
->ifsq_hw_serialize
== NULL
,
513 ("hw serialize has been setup"));
514 _ifsq
->ifsq_hw_serialize
= _hwslz
;
518 ifsq_serialize_hw(struct ifaltq_subque
*_ifsq
)
520 lwkt_serialize_enter(_ifsq
->ifsq_hw_serialize
);
524 ifsq_deserialize_hw(struct ifaltq_subque
*_ifsq
)
526 lwkt_serialize_exit(_ifsq
->ifsq_hw_serialize
);
530 ifsq_tryserialize_hw(struct ifaltq_subque
*_ifsq
)
532 return lwkt_serialize_try(_ifsq
->ifsq_hw_serialize
);
535 static __inline
struct ifaltq_subque
*
536 ifq_get_subq_default(const struct ifaltq
*_ifq
)
538 return &_ifq
->altq_subq
[ALTQ_SUBQ_INDEX_DEFAULT
];
541 static __inline
struct ifaltq_subque
*
542 ifq_get_subq(const struct ifaltq
*_ifq
, int _idx
)
544 KASSERT(_idx
>= 0 && _idx
< _ifq
->altq_subq_cnt
,
545 ("invalid qid %d", _idx
));
546 return &_ifq
->altq_subq
[_idx
];
549 static __inline
struct ifaltq_subque
*
550 ifq_map_subq(struct ifaltq
*_ifq
, int _cpuid
)
552 int _idx
= _ifq
->altq_mapsubq(_ifq
, _cpuid
);
553 return ifq_get_subq(_ifq
, _idx
);
557 ifq_set_subq_cnt(struct ifaltq
*_ifq
, int _cnt
)
559 _ifq
->altq_subq_cnt
= _cnt
;
563 ifq_set_subq_divisor(struct ifaltq
*_ifq
, uint32_t _divisor
)
566 KASSERT(_divisor
> 0, ("invalid divisor %u", _divisor
));
567 KASSERT(_divisor
<= _ifq
->altq_subq_cnt
,
568 ("invalid divisor %u, max %d", _divisor
, _ifq
->altq_subq_cnt
));
569 _ifq
->altq_subq_mappriv
= _divisor
;
574 ifq_is_oactive(const struct ifaltq
*_ifq
)
576 return ifsq_is_oactive(ifq_get_subq_default(_ifq
));
581 ifq_set_oactive(struct ifaltq
*_ifq
)
583 ifsq_set_oactive(ifq_get_subq_default(_ifq
));
588 ifq_clr_oactive(struct ifaltq
*_ifq
)
590 ifsq_clr_oactive(ifq_get_subq_default(_ifq
));
595 ifq_is_empty(struct ifaltq
*_ifq
)
597 return ifsq_is_empty(ifq_get_subq_default(_ifq
));
602 ifq_purge(struct ifaltq
*_ifq
)
604 ifsq_purge(ifq_get_subq_default(_ifq
));
608 static __inline
struct mbuf
*
609 ifq_dequeue(struct ifaltq
*_ifq
)
611 return ifsq_dequeue(ifq_get_subq_default(_ifq
));
616 ifq_prepend(struct ifaltq
*_ifq
, struct mbuf
*_m
)
618 ifsq_prepend(ifq_get_subq_default(_ifq
), _m
);
623 ifq_set_cpuid(struct ifaltq
*_ifq
, int _cpuid
)
625 KASSERT(_ifq
->altq_subq_cnt
== 1,
626 ("invalid subqueue count %d", _ifq
->altq_subq_cnt
));
627 ifsq_set_cpuid(ifq_get_subq_default(_ifq
), _cpuid
);
632 ifq_set_hw_serialize(struct ifaltq
*_ifq
, struct lwkt_serialize
*_hwslz
)
634 KASSERT(_ifq
->altq_subq_cnt
== 1,
635 ("invalid subqueue count %d", _ifq
->altq_subq_cnt
));
636 ifsq_set_hw_serialize(ifq_get_subq_default(_ifq
), _hwslz
);
639 #endif /* _NET_IFQ_VAR_H_ */