1 /* $KAME: if_altq.h,v 1.11 2003/07/10 12:07:50 kjc Exp $ */
4 * Copyright (C) 1997-2003
5 * Sony Computer Science Laboratories Inc. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY SONY CSL AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL SONY CSL OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #ifndef _NET_ALTQ_IF_ALTQ_H_
29 #define _NET_ALTQ_IF_ALTQ_H_
31 #ifndef _SYS_SERIALIZE_H_
32 #include <sys/serialize.h>
35 /* Default subqueue */
36 #define ALTQ_SUBQ_INDEX_DEFAULT 0
44 typedef int (*altq_mapsubq_t
)(struct ifaltq
*, int);
46 typedef int (*ifsq_enqueue_t
)(struct ifaltq_subque
*, struct mbuf
*,
47 struct altq_pktattr
*);
48 typedef struct mbuf
*(*ifsq_dequeue_t
)(struct ifaltq_subque
*, int);
49 typedef int (*ifsq_request_t
)(struct ifaltq_subque
*, int, void *);
52 struct ifaltq_subque
*stg_subq
;
56 TAILQ_ENTRY(ifsubq_stage
) stg_link
;
59 #define IFSQ_STAGE_FLAG_QUED 0x1
60 #define IFSQ_STAGE_FLAG_SCHED 0x2
62 struct ifaltq_subque
{
63 struct lwkt_serialize ifsq_lock
;
66 struct ifaltq
*ifsq_altq
;
67 struct ifnet
*ifsq_ifp
;
68 void *ifsq_hw_priv
; /* hw private data */
70 struct mbuf
*ifsq_prio_head
;
71 struct mbuf
*ifsq_prio_tail
;
72 struct mbuf
*ifsq_norm_head
;
73 struct mbuf
*ifsq_norm_tail
;
76 int ifsq_len
; /* packet counter */
78 int ifsq_bcnt
; /* byte counter */
81 ifsq_enqueue_t ifsq_enqueue
;
82 ifsq_dequeue_t ifsq_dequeue
;
83 ifsq_request_t ifsq_request
;
85 struct lwkt_serialize
*ifsq_hw_serialize
;
87 struct mbuf
*ifsq_prepended
;/* mbuf dequeued, but not yet xmit */
88 int ifsq_started
; /* ifnet.if_start interlock */
89 int ifsq_hw_oactive
;/* hw too busy, protected by driver */
90 int ifsq_cpuid
; /* owner cpu */
91 struct ifsubq_stage
*ifsq_stage
;/* packet staging information */
92 struct netmsg_base
*ifsq_ifstart_nmsg
;
93 /* percpu msgs to sched if_start */
98 #define ALTQ_SQ_ASSERT_LOCKED(ifsq) ASSERT_SERIALIZED(&(ifsq)->ifsq_lock)
99 #define ALTQ_SQ_LOCK_INIT(ifsq) lwkt_serialize_init(&(ifsq)->ifsq_lock)
100 #define ALTQ_SQ_LOCK(ifsq) \
101 lwkt_serialize_adaptive_enter(&(ifsq)->ifsq_lock)
102 #define ALTQ_SQ_UNLOCK(ifsq) lwkt_serialize_exit(&(ifsq)->ifsq_lock)
104 #define ASSERT_ALTQ_SQ_SERIALIZED_HW(ifsq) \
105 ASSERT_SERIALIZED((ifsq)->ifsq_hw_serialize)
106 #define ASSERT_ALTQ_SQ_NOT_SERIALIZED_HW(ifsq) \
107 ASSERT_NOT_SERIALIZED((ifsq)->ifsq_hw_serialize)
109 #define ALTQ_SQ_PKTCNT_INC(ifsq) \
111 (ifsq)->ifsq_len++; \
114 #define ALTQ_SQ_PKTCNT_DEC(ifsq) \
116 KASSERT((ifsq)->ifsq_len > 0, ("invalid packet count")); \
117 (ifsq)->ifsq_len--; \
120 #define ALTQ_SQ_CNTR_INC(ifsq, bcnt) \
122 ALTQ_SQ_PKTCNT_INC((ifsq)); \
123 (ifsq)->ifsq_bcnt += (bcnt); \
126 #define ALTQ_SQ_CNTR_DEC(ifsq, bcnt) \
128 ALTQ_SQ_PKTCNT_DEC((ifsq)); \
129 KASSERT((ifsq)->ifsq_bcnt >= (bcnt), ("invalid byte count")); \
130 (ifsq)->ifsq_bcnt -= (bcnt); \
133 #define ALTQ_SQ_CNTR_RESET(ifsq) \
135 (ifsq)->ifsq_len = 0; \
136 (ifsq)->ifsq_bcnt = 0; \
139 #define ALTQ_SQ_PRIO_CNTR_INC(ifsq, bcnt) \
141 (ifsq)->ifsq_prio_len++; \
142 (ifsq)->ifsq_prio_bcnt += (bcnt); \
145 #define ALTQ_SQ_PRIO_CNTR_DEC(ifsq, bcnt) \
147 KASSERT((ifsq)->ifsq_prio_len > 0, \
148 ("invalid prio packet count")); \
149 (ifsq)->ifsq_prio_len--; \
150 KASSERT((ifsq)->ifsq_prio_bcnt >= (bcnt), \
151 ("invalid prio byte count")); \
152 (ifsq)->ifsq_prio_bcnt -= (bcnt); \
158 * Structure defining a queue for a network interface.
161 /* alternate queueing related fields */
162 int altq_type
; /* discipline type */
163 int altq_flags
; /* flags (e.g. ready, in-use) */
164 void *altq_disc
; /* for discipline-specific use */
165 struct ifnet
*altq_ifp
; /* back pointer to interface */
167 /* classifier fields */
168 void *altq_clfier
; /* classifier-specific use */
169 void *(*altq_classify
)(struct ifaltq
*, struct mbuf
*,
170 struct altq_pktattr
*);
172 /* token bucket regulator */
173 struct tb_regulator
*altq_tbr
;
175 /* Sub-queues mapping */
176 altq_mapsubq_t altq_mapsubq
;
177 uint32_t altq_subq_mask
;
181 struct ifaltq_subque
*altq_subq
;
188 #define ALTQ_LOCK(ifq) \
189 ALTQ_SQ_LOCK(&(ifq)->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT])
191 #define ALTQ_UNLOCK(ifq) \
192 ALTQ_SQ_UNLOCK(&(ifq)->altq_subq[ALTQ_SUBQ_INDEX_DEFAULT])
198 * packet attributes used by queueing disciplines.
199 * pattr_class is a discipline-dependent scheduling class that is
200 * set by a classifier.
201 * pattr_hdr and pattr_af may be used by a discipline to access
202 * the header within a mbuf. (e.g. ECN needs to update the CE bit)
203 * note that pattr_hdr could be stale after m_pullup, though link
204 * layer output routines usually don't use m_pullup. link-level
205 * compression also invalidates these fields. thus, pattr_hdr needs
206 * to be verified when a discipline touches the header.
208 struct altq_pktattr
{
209 void *pattr_class
; /* sched class set by classifier */
210 int pattr_af
; /* address family */
211 caddr_t pattr_hdr
; /* saved header position in mbuf */
215 * a token-bucket regulator limits the rate that a network driver can
216 * dequeue packets from the output queue.
217 * modern cards are able to buffer a large amount of packets and dequeue
218 * too many packets at a time. this bursty dequeue behavior makes it
219 * impossible to schedule packets by queueing disciplines.
220 * a token-bucket is used to control the burst size in a device
221 * independent manner.
223 struct tb_regulator
{
224 int64_t tbr_rate
; /* (scaled) token bucket rate */
225 int64_t tbr_depth
; /* (scaled) token bucket depth */
227 int64_t tbr_token
; /* (scaled) current token */
228 int64_t tbr_filluptime
; /* (scaled) time to fill up bucket */
229 uint64_t tbr_last
; /* last time token was updated */
231 int tbr_lastop
; /* last dequeue operation type
232 needed for poll-and-dequeue */
236 #define ALTQF_READY 0x01 /* driver supports alternate queueing */
237 #define ALTQF_ENABLED 0x02 /* altq is in use */
238 #define ALTQF_CLASSIFY 0x04 /* classify packets */
239 #define ALTQF_DRIVER1 0x40 /* driver specific */
241 /* if_altqflags set internally only: */
242 #define ALTQF_CANTCHANGE (ALTQF_READY)
244 /* altq_dequeue 2nd arg */
245 #define ALTDQ_REMOVE 1 /* dequeue mbuf from the queue */
246 #define ALTDQ_POLL 2 /* don't dequeue mbuf from the queue */
248 /* altq request types (currently only purge is defined) */
249 #define ALTRQ_PURGE 1 /* purge all packets */
251 int altq_attach(struct ifaltq
*, int, void *, altq_mapsubq_t
,
252 ifsq_enqueue_t
, ifsq_dequeue_t
, ifsq_request_t
, void *,
253 void *(*)(struct ifaltq
*, struct mbuf
*, struct altq_pktattr
*));
254 int altq_detach(struct ifaltq
*);
255 int altq_enable(struct ifaltq
*);
256 int altq_disable(struct ifaltq
*);
257 struct mbuf
*tbr_dequeue(struct ifaltq_subque
*, int);
258 extern int (*altq_input
)(struct mbuf
*, int);
261 #endif /* _NET_ALTQ_IF_ALTQ_H_ */