Add fairq to altq. Fairq is a fair queueing algorithm with bandwidth
[dragonfly.git] / sys / sys / mbuf.h
blob8d0e67bbafd31a439a594f71b1af37483fe0906d
1 /*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * Copyright (c) 1982, 1986, 1988, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
36 * $FreeBSD: src/sys/sys/mbuf.h,v 1.44.2.17 2003/04/15 06:15:02 silby Exp $
37 * $DragonFly: src/sys/sys/mbuf.h,v 1.47 2008/04/06 18:58:10 dillon Exp $
40 #ifndef _SYS_MBUF_H_
41 #define _SYS_MBUF_H_
43 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
45 #ifndef _SYS_TYPES_H_
46 #include <sys/types.h>
47 #endif
48 #ifndef _SYS_PARAM_H_
49 #include <sys/param.h>
50 #endif
51 #ifndef _SYS_QUEUE_H_
52 #include <sys/queue.h>
53 #endif
54 #ifndef _NET_NETISR_H_
55 #include <net/netisr.h>
56 #endif
59 * Mbufs are of a single size, MSIZE (machine/param.h), which
60 * includes overhead. An mbuf may add a single "mbuf cluster" of size
61 * MCLBYTES (also in machine/param.h), which has no additional overhead
62 * and is used instead of the internal data area; this is done when
63 * at least MINCLSIZE of data must be stored.
65 #define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */
66 #define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */
67 #define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */
68 #define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
71 * Macros for type conversion:
72 * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type.
73 * mtocl(x) - convert pointer within cluster to cluster index #
74 * cltom(x) - convert cluster # to ptr to beginning of cluster
76 #define mtod(m, t) ((t)((m)->m_data))
79 * Header present at the beginning of every mbuf.
81 struct m_hdr {
82 struct mbuf *mh_next; /* next buffer in chain */
83 struct mbuf *mh_nextpkt; /* next chain in queue/record */
84 caddr_t mh_data; /* location of data */
85 int mh_len; /* amount of data in this mbuf */
86 int mh_flags; /* flags; see below */
87 short mh_type; /* type of data in this mbuf */
88 short mh_pad; /* padding */
89 struct netmsg_packet mh_netmsg; /* hardware->proto stack msg */
93 * Packet tag structure (see below for details).
95 struct m_tag {
96 SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
97 u_int16_t m_tag_id; /* Tag ID */
98 u_int16_t m_tag_len; /* Length of data */
99 u_int32_t m_tag_cookie; /* ABI/Module ID */
102 SLIST_HEAD(packet_tags, m_tag);
105 * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set.
107 * Be careful: The fields have been carefully ordered to avoid hidden padding.
108 * Keep this in mind, when adding or removing fields!
110 struct pkthdr {
111 struct ifnet *rcvif; /* rcv interface */
112 int len; /* total packet length */
113 struct packet_tags tags; /* list of packet tags */
115 /* variables for ip and tcp reassembly */
116 void *header; /* pointer to packet header */
118 /* variables for hardware checksum */
119 int csum_flags; /* flags regarding checksum */
120 int csum_data; /* data field used by csum routines */
122 /* firewall flags */
123 uint32_t fw_flags; /* flags for PF */
125 /* variables for PF processing */
126 uint16_t pf_tag; /* PF tag id */
127 uint8_t pf_routed; /* PF routing counter */
129 /* variables for ALTQ processing */
130 uint8_t ecn_af; /* address family for ECN */
131 uint32_t altq_qid; /* queue id */
132 uint32_t altq_state_hash; /* identifies 'connections' */
134 uint16_t ether_vlantag; /* ethernet 802.1p+q vlan tag */
135 uint16_t pad; /* explicit padding */
139 * Description of external storage mapped into mbuf; valid only if M_EXT is set.
141 struct m_ext {
142 caddr_t ext_buf; /* start of buffer */
143 void (*ext_free)(void *);
144 u_int ext_size; /* size of buffer, for ext_free */
145 void (*ext_ref)(void *);
146 void *ext_arg;
150 * The core of the mbuf object along with some shortcut defines for
151 * practical purposes.
153 struct mbuf {
154 struct m_hdr m_hdr;
155 union {
156 struct {
157 struct pkthdr MH_pkthdr; /* M_PKTHDR set */
158 union {
159 struct m_ext MH_ext; /* M_EXT set */
160 char MH_databuf[MHLEN];
161 } MH_dat;
162 } MH;
163 char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */
164 } M_dat;
166 #define m_next m_hdr.mh_next
167 #define m_len m_hdr.mh_len
168 #define m_data m_hdr.mh_data
169 #define m_type m_hdr.mh_type
170 #define m_flags m_hdr.mh_flags
171 #define m_nextpkt m_hdr.mh_nextpkt
172 #define m_pkthdr M_dat.MH.MH_pkthdr
173 #define m_ext M_dat.MH.MH_dat.MH_ext
174 #define m_pktdat M_dat.MH.MH_dat.MH_databuf
175 #define m_dat M_dat.M_databuf
178 * Code that uses m_act should be converted to use m_nextpkt
179 * instead; m_act is historical and deprecated.
181 #define m_act m_nextpkt
184 * mbuf flags.
186 #define M_EXT 0x0001 /* has associated external storage */
187 #define M_PKTHDR 0x0002 /* start of record */
188 #define M_EOR 0x0004 /* end of record */
189 #define M_PROTO1 0x0008 /* protocol-specific */
190 #define M_PROTO2 0x0010 /* protocol-specific */
191 #define M_PROTO3 0x0020 /* protocol-specific */
192 #define M_PROTO4 0x0040 /* protocol-specific */
193 #define M_PROTO5 0x0080 /* protocol-specific */
196 * mbuf pkthdr flags (also stored in m_flags).
198 #define M_BCAST 0x0100 /* send/received as link-level broadcast */
199 #define M_MCAST 0x0200 /* send/received as link-level multicast */
200 #define M_FRAG 0x0400 /* packet is a fragment of a larger packet */
201 #define M_FIRSTFRAG 0x0800 /* packet is first fragment */
202 #define M_LASTFRAG 0x1000 /* packet is last fragment */
203 #define M_CLCACHE 0x2000 /* mbuf allocated from the cluster cache */
204 #define M_EXT_CLUSTER 0x4000 /* standard cluster else special */
205 #define M_PHCACHE 0x8000 /* mbuf allocated from the pkt header cache */
206 #define M_NOTIFICATION 0x10000 /* notification event */
207 #define M_VLANTAG 0x20000 /* ether_vlantag is valid */
210 * Flags copied when copying m_pkthdr.
212 #define M_COPYFLAGS (M_PKTHDR|M_EOR|M_PROTO1|M_PROTO1|M_PROTO2|M_PROTO3 | \
213 M_PROTO4|M_PROTO5|M_BCAST|M_MCAST|M_FRAG | \
214 M_FIRSTFRAG|M_LASTFRAG|M_VLANTAG)
217 * Flags indicating hw checksum support and sw checksum requirements.
219 #define CSUM_IP 0x0001 /* will csum IP */
220 #define CSUM_TCP 0x0002 /* will csum TCP */
221 #define CSUM_UDP 0x0004 /* will csum UDP */
222 #define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */
223 #define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */
225 #define CSUM_IP_CHECKED 0x0100 /* did csum IP */
226 #define CSUM_IP_VALID 0x0200 /* ... the csum is valid */
227 #define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */
228 #define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */
229 #define CSUM_FRAG_NOT_CHECKED 0x1000 /* did _not_ csum fragment
230 * NB: This flag is only used
231 * by IP defragmenter.
234 #define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP)
235 #define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */
238 * Flags indicating PF processing status
240 #define FW_MBUF_GENERATED 0x00000001
241 #define PF_MBUF_TAGGED 0x00000002 /* pf_tag field is valid */
242 #define PF_MBUF_ROUTED 0x00000004 /* pf_routed field is valid */
243 #define PF_MBUF_TRANSLATE_LOCALHOST \
244 0x00000008
245 #define PF_MBUF_FRAGCACHE 0x00000010
246 #define ALTQ_MBUF_TAGGED 0x00000020 /* altq_qid is valid */
247 #define PF_MBUF_GENERATED FW_MBUF_GENERATED
248 #define IPFW_MBUF_GENERATED FW_MBUF_GENERATED
249 #define DUMMYNET_MBUF_TAGGED 0x00000080
250 #define ALTQ_MBUF_STATE_HASHED 0x00000100
253 * mbuf types.
255 #define MT_FREE 0 /* should be on free list */
256 #define MT_DATA 1 /* dynamic (data) allocation */
257 #define MT_HEADER 2 /* packet header */
258 #define MT_SONAME 3 /* socket name */
259 #define MT_TAG 4 /* volatile metadata associated to pkts */
260 #define MT_CONTROL 5 /* extra-data protocol message */
261 #define MT_OOBDATA 6 /* expedited data */
262 #define MT_NTYPES 7 /* number of mbuf types for mbtypes[] */
265 * General mbuf allocator statistics structure.
267 struct mbstat {
268 u_long m_mbufs; /* mbufs obtained from page pool */
269 u_long m_clusters; /* clusters obtained from page pool */
270 u_long m_spare; /* spare field */
271 u_long m_clfree; /* free clusters */
272 u_long m_drops; /* times failed to find space */
273 u_long m_wait; /* times waited for space */
274 u_long m_drain; /* times drained protocols for space */
275 u_long m_mcfail; /* times m_copym failed */
276 u_long m_mpfail; /* times m_pullup failed */
277 u_long m_msize; /* length of an mbuf */
278 u_long m_mclbytes; /* length of an mbuf cluster */
279 u_long m_minclsize; /* min length of data to allocate a cluster */
280 u_long m_mlen; /* length of data in an mbuf */
281 u_long m_mhlen; /* length of data in a header mbuf */
285 * Flags specifying how an allocation should be made.
288 #define MB_DONTWAIT 0x4
289 #define MB_TRYWAIT 0x8
290 #define MB_WAIT MB_TRYWAIT
293 * Mbuf to Malloc Flag Conversion.
295 #define MBTOM(how) ((how) & MB_TRYWAIT ? M_WAITOK : M_NOWAIT)
298 * These are identifying numbers passed to the m_mballoc_wait function,
299 * allowing us to determine whether the call came from an MGETHDR or
300 * an MGET.
302 #define MGETHDR_C 1
303 #define MGET_C 2
306 * mbuf allocation/deallocation macros (YYY deprecated, too big):
308 * MGET(struct mbuf *m, int how, int type)
309 * allocates an mbuf and initializes it to contain internal data.
311 * MGETHDR(struct mbuf *m, int how, int type)
312 * allocates an mbuf and initializes it to contain a packet header
313 * and internal data.
315 #define MGET(m, how, type) do { \
316 (m) = m_get((how), (type)); \
317 } while (0)
319 #define MGETHDR(m, how, type) do { \
320 (m) = m_gethdr((how), (type)); \
321 } while (0)
324 * MCLGET adds such clusters to a normal mbuf. The flag M_EXT is set upon
325 * success.
326 * Deprecated. Use m_getcl() or m_getl() instead.
328 #define MCLGET(m, how) do { \
329 m_mclget((m), (how)); \
330 } while (0)
333 * NB: M_COPY_PKTHDR is deprecated; use either M_MOVE_PKTHDR
334 * or m_dup_pkthdr.
337 * Move mbuf pkthdr from "from" to "to".
338 * from should have M_PKTHDR set, and to must be empty.
339 * from no longer has a pkthdr after this operation.
341 #define M_MOVE_PKTHDR(_to, _from) m_move_pkthdr((_to), (_from))
344 * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place
345 * an object of the specified size at the end of the mbuf, longword aligned.
347 #define M_ALIGN(m, len) do { \
348 (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \
349 } while (0)
352 * As above, for mbufs allocated with m_gethdr/MGETHDR
353 * or initialized by M_COPY_PKTHDR.
355 #define MH_ALIGN(m, len) do { \
356 (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \
357 } while (0)
360 * Check if we can write to an mbuf.
362 #define M_EXT_WRITABLE(m) (m_sharecount(m) == 1)
363 #define M_WRITABLE(m) (!((m)->m_flags & M_EXT) || M_EXT_WRITABLE(m))
366 * Check if the supplied mbuf has a packet header, or else panic.
368 #define M_ASSERTPKTHDR(m) \
369 KASSERT(m != NULL && m->m_flags & M_PKTHDR, \
370 ("%s: invalid mbuf or no mbuf packet header!", __func__))
373 * Compute the amount of space available before the current start of data.
374 * The M_EXT_WRITABLE() is a temporary, conservative safety measure: the burden
375 * of checking writability of the mbuf data area rests solely with the caller.
377 #define M_LEADINGSPACE(m) \
378 ((m)->m_flags & M_EXT ? \
379 (M_EXT_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \
380 (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \
381 (m)->m_data - (m)->m_dat)
384 * Compute the amount of space available after the end of data in an mbuf.
385 * The M_WRITABLE() is a temporary, conservative safety measure: the burden
386 * of checking writability of the mbuf data area rests solely with the caller.
388 #define M_TRAILINGSPACE(m) \
389 ((m)->m_flags & M_EXT ? \
390 (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \
391 - ((m)->m_data + (m)->m_len) : 0) : \
392 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
395 * Arrange to prepend space of size plen to mbuf m.
396 * If a new mbuf must be allocated, how specifies whether to wait.
397 * If how is MB_DONTWAIT and allocation fails, the original mbuf chain
398 * is freed and m is set to NULL.
400 #define M_PREPEND(m, plen, how) do { \
401 struct mbuf **_mmp = &(m); \
402 struct mbuf *_mm = *_mmp; \
403 int _mplen = (plen); \
404 int __mhow = (how); \
406 if (M_LEADINGSPACE(_mm) >= _mplen) { \
407 _mm->m_data -= _mplen; \
408 _mm->m_len += _mplen; \
409 } else \
410 _mm = m_prepend(_mm, _mplen, __mhow); \
411 if (_mm != NULL && _mm->m_flags & M_PKTHDR) \
412 _mm->m_pkthdr.len += _mplen; \
413 *_mmp = _mm; \
414 } while (0)
416 /* Length to m_copy to copy all. */
417 #define M_COPYALL 1000000000
419 /* Compatibility with 4.3 */
420 #define m_copy(m, o, l) m_copym((m), (o), (l), MB_DONTWAIT)
422 #ifdef _KERNEL
423 extern u_int m_clalloc_wid; /* mbuf cluster wait count */
424 extern u_int m_mballoc_wid; /* mbuf wait count */
425 extern int max_linkhdr; /* largest link-level header */
426 extern int max_protohdr; /* largest protocol header */
427 extern int max_hdr; /* largest link+protocol header */
428 extern int max_datalen; /* MHLEN - max_hdr */
429 extern int mbuf_wait; /* mbuf sleep time */
430 extern int nmbclusters;
431 extern int nmbufs;
433 struct uio;
435 void m_adj(struct mbuf *, int);
436 void m_cat(struct mbuf *, struct mbuf *);
437 u_int m_countm(struct mbuf *m, struct mbuf **lastm, u_int *mbcnt);
438 void m_copyback(struct mbuf *, int, int, caddr_t);
439 void m_copydata(const struct mbuf *, int, int, caddr_t);
440 struct mbuf *m_copym(const struct mbuf *, int, int, int);
441 struct mbuf *m_copypacket(struct mbuf *, int);
442 struct mbuf *m_defrag(struct mbuf *, int);
443 struct mbuf *m_defrag_nofree(struct mbuf *, int);
444 struct mbuf *m_devget(char *, int, int, struct ifnet *,
445 void (*copy)(volatile const void *, volatile void *, size_t));
446 struct mbuf *m_dup(struct mbuf *, int);
447 int m_dup_pkthdr(struct mbuf *, const struct mbuf *, int);
448 struct mbuf *m_free(struct mbuf *);
449 void m_freem(struct mbuf *);
450 struct mbuf *m_get(int, int);
451 struct mbuf *m_getc(int len, int how, int type);
452 struct mbuf *m_getcl(int how, short type, int flags);
453 struct mbuf *m_getclr(int, int);
454 struct mbuf *m_gethdr(int, int);
455 struct mbuf *m_getm(struct mbuf *, int, int, int);
456 struct mbuf *m_last(struct mbuf *m);
457 u_int m_lengthm(struct mbuf *m, struct mbuf **lastm);
458 void m_move_pkthdr(struct mbuf *, struct mbuf *);
459 struct mbuf *m_prepend(struct mbuf *, int, int);
460 void m_print(const struct mbuf *m);
461 struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
462 struct mbuf *m_pullup(struct mbuf *, int);
463 struct mbuf *m_split(struct mbuf *, int, int);
464 struct mbuf *m_uiomove(struct uio *);
465 void m_mclget(struct mbuf *m, int how);
466 int m_sharecount(struct mbuf *m);
467 void m_chtype(struct mbuf *m, int type);
469 #ifdef MBUF_DEBUG
470 void mbuftrackid(struct mbuf *, int);
471 #else
472 #define mbuftrackid(m, id) /* empty */
473 #endif
476 * Allocate the right type of mbuf for the desired total length.
477 * The mbuf returned does not necessarily cover the entire requested length.
478 * This function follows mbuf chaining policy of allowing MINCLSIZE
479 * amount of chained mbufs.
481 static __inline struct mbuf *
482 m_getl(int len, int how, int type, int flags, int *psize)
484 struct mbuf *m;
485 int size;
487 if (len >= MINCLSIZE) {
488 m = m_getcl(how, type, flags);
489 size = MCLBYTES;
490 } else if (flags & M_PKTHDR) {
491 m = m_gethdr(how, type);
492 size = MHLEN;
493 } else {
494 m = m_get(how, type);
495 size = MLEN;
497 if (psize != NULL)
498 *psize = size;
499 return (m);
503 * Get a single mbuf that covers the requested number of bytes.
504 * This function does not create mbuf chains. It explicitly marks
505 * places in the code that abuse mbufs for contiguous data buffers.
507 static __inline struct mbuf *
508 m_getb(int len, int how, int type, int flags)
510 struct mbuf *m;
511 int mbufsize = (flags & M_PKTHDR) ? MHLEN : MLEN;
513 if (len > mbufsize)
514 m = m_getcl(how, type, flags);
515 else if (flags & M_PKTHDR)
516 m = m_gethdr(how, type);
517 else
518 m = m_get(how, type);
519 return (m);
523 * Packets may have annotations attached by affixing a list
524 * of "packet tags" to the pkthdr structure. Packet tags are
525 * dynamically allocated semi-opaque data structures that have
526 * a fixed header (struct m_tag) that specifies the size of the
527 * memory block and a <cookie,type> pair that identifies it.
528 * The cookie is a 32-bit unique unsigned value used to identify
529 * a module or ABI. By convention this value is chose as the
530 * date+time that the module is created, expressed as the number of
531 * seconds since the epoch (e.g. using date -u +'%s'). The type value
532 * is an ABI/module-specific value that identifies a particular annotation
533 * and is private to the module. For compatibility with systems
534 * like openbsd that define packet tags w/o an ABI/module cookie,
535 * the value PACKET_ABI_COMPAT is used to implement m_tag_get and
536 * m_tag_find compatibility shim functions and several tag types are
537 * defined below. Users that do not require compatibility should use
538 * a private cookie value so that packet tag-related definitions
539 * can be maintained privately.
541 * Note that the packet tag returned by m_tag_alloc has the default
542 * memory alignment implemented by kmalloc. To reference private data
543 * one can use a construct like:
545 * struct m_tag *mtag = m_tag_alloc(...);
546 * struct foo *p = m_tag_data(mtag);
548 * if the alignment of struct m_tag is sufficient for referencing members
549 * of struct foo. Otherwise it is necessary to embed struct m_tag within
550 * the private data structure to insure proper alignment; e.g.
552 * struct foo {
553 * struct m_tag tag;
554 * ...
555 * };
556 * struct foo *p = (struct foo *)m_tag_alloc(...);
557 * struct m_tag *mtag = &p->tag;
560 #define PACKET_TAG_NONE 0 /* Nadda */
562 /* Packet tag for use with PACKET_ABI_COMPAT */
563 #define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */
564 /* struct tdb_indent */
565 #define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */
566 /* struct tdb_indent */
567 #define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */
568 /* struct tdb_indent, never added */
569 #define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */
570 /* struct tdb_indent, never added */
571 #define PACKET_TAG_IPSEC_PENDING_TDB 5 /* Reminder to do IPsec */
572 /* struct tdb_indent, never added */
573 #define PACKET_TAG_ENCAP 6 /* Encap. processing */
574 /* struct ifnet *, the GIF interface */
575 #define PACKET_TAG_IPSEC_HISTORY 7 /* IPSEC history */
576 /* struct ipsec_history */
577 #define PACKET_TAG_IPV6_INPUT 8 /* IPV6 input processing */
578 /* struct ip6aux */
579 #define PACKET_TAG_IPFW_DIVERT 9 /* divert info */
580 /* uint16_t */
581 #define PACKET_TAG_DUMMYNET 15 /* dummynet info */
582 /* struct dn_pkt */
583 #define PACKET_TAG_CARP 28 /* CARP info */
586 * As a temporary and low impact solution to replace the even uglier
587 * approach used so far in some parts of the network stack (which relies
588 * on global variables), packet tag-like annotations are stored in MT_TAG
589 * mbufs (or lookalikes) prepended to the actual mbuf chain.
591 * m_type = MT_TAG
592 * m_flags = m_tag_id
593 * m_next = next buffer in chain.
595 * BE VERY CAREFUL not to pass these blocks to the mbuf handling routines.
597 #define _m_tag_id m_hdr.mh_flags
599 /* Packet tags used in the FreeBSD network stack */
600 #define PACKET_TAG_IPFORWARD 18 /* ipforward info */
601 /* struct sockaddr_in * as m_data */
603 /* Packet tag routines */
604 struct m_tag *m_tag_alloc(u_int32_t, int, int, int);
605 void m_tag_free(struct m_tag *);
606 void m_tag_prepend(struct mbuf *, struct m_tag *);
607 void m_tag_unlink(struct mbuf *, struct m_tag *);
608 void m_tag_delete(struct mbuf *, struct m_tag *);
609 void m_tag_delete_chain(struct mbuf *);
610 struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *);
611 struct m_tag *m_tag_copy(struct m_tag *, int);
612 int m_tag_copy_chain(struct mbuf *, const struct mbuf *, int);
613 void m_tag_init(struct mbuf *);
614 struct m_tag *m_tag_first(struct mbuf *);
615 struct m_tag *m_tag_next(struct mbuf *, struct m_tag *);
617 /* these are for openbsd compatibility */
618 #define MTAG_ABI_COMPAT 0 /* compatibility ABI */
620 static __inline void *
621 m_tag_data(struct m_tag *tag)
623 return ((void *)(tag + 1));
626 static __inline struct m_tag *
627 m_tag_get(int type, int length, int wait)
629 return m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait);
632 static __inline struct m_tag *
633 m_tag_find(struct mbuf *m, int type, struct m_tag *start)
635 return m_tag_locate(m, MTAG_ABI_COMPAT, type, start);
638 #endif /* _KERNEL */
640 #endif /* _KERNEL || _KERNEL_STRUCTURES */
641 #endif /* !_SYS_MBUF_H_ */