AMD64 - Move fdisk and nextboot up one level and remove src/sbin/i386.
[dragonfly.git] / sys / sys / mbuf.h
blob6a66bdf25a8a88207b341151cbac908eebd7a027
1 /*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * Copyright (c) 1982, 1986, 1988, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
35 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
36 * $FreeBSD: src/sys/sys/mbuf.h,v 1.44.2.17 2003/04/15 06:15:02 silby Exp $
37 * $DragonFly: src/sys/sys/mbuf.h,v 1.54 2008/10/19 08:39:55 sephe Exp $
40 #ifndef _SYS_MBUF_H_
41 #define _SYS_MBUF_H_
43 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
45 #ifndef _SYS_TYPES_H_
46 #include <sys/types.h>
47 #endif
48 #ifndef _SYS_PARAM_H_
49 #include <sys/param.h>
50 #endif
51 #ifndef _SYS_QUEUE_H_
52 #include <sys/queue.h>
53 #endif
54 #ifndef _NET_NETISR_H_
55 #include <net/netisr.h>
56 #endif
59 * Mbufs are of a single size, MSIZE (machine/param.h), which
60 * includes overhead. An mbuf may add a single "mbuf cluster" of size
61 * MCLBYTES (also in machine/param.h), which has no additional overhead
62 * and is used instead of the internal data area; this is done when
63 * at least MINCLSIZE of data must be stored.
65 #define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */
66 #define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */
67 #define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */
68 #define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
71 * Macros for type conversion:
72 * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type.
73 * mtocl(x) - convert pointer within cluster to cluster index #
74 * cltom(x) - convert cluster # to ptr to beginning of cluster
76 #define mtod(m, t) ((t)((m)->m_data))
79 * Header present at the beginning of every mbuf.
81 struct m_hdr {
82 struct mbuf *mh_next; /* next buffer in chain */
83 struct mbuf *mh_nextpkt; /* next chain in queue/record */
84 caddr_t mh_data; /* location of data */
85 int mh_len; /* amount of data in this mbuf */
86 int mh_flags; /* flags; see below */
87 short mh_type; /* type of data in this mbuf */
88 short mh_pad; /* padding */
89 struct netmsg_packet mh_netmsg; /* hardware->proto stack msg */
93 * Packet tag structure (see below for details).
95 struct m_tag {
96 SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
97 u_int16_t m_tag_id; /* Tag ID */
98 u_int16_t m_tag_len; /* Length of data */
99 u_int32_t m_tag_cookie; /* ABI/Module ID */
102 SLIST_HEAD(packet_tags, m_tag);
105 * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set.
107 * Be careful: The fields have been carefully ordered to avoid hidden padding.
108 * Keep this in mind, when adding or removing fields!
110 struct pkthdr {
111 struct ifnet *rcvif; /* rcv interface */
112 int len; /* total packet length */
113 struct packet_tags tags; /* list of packet tags */
115 /* variables for ip and tcp reassembly */
116 void *header; /* pointer to packet header */
118 /* variables for hardware checksum */
119 int csum_flags; /* flags regarding checksum */
120 int csum_data; /* data field used by csum routines */
122 /* firewall flags */
123 uint32_t fw_flags; /* flags for PF */
125 /* variables for PF processing */
126 uint16_t pf_tag; /* PF tag id */
127 uint8_t pf_routed; /* PF routing counter */
129 /* variables for ALTQ processing */
130 uint8_t ecn_af; /* address family for ECN */
131 uint32_t altq_qid; /* queue id */
132 uint32_t altq_state_hash; /* identifies 'connections' */
134 uint16_t ether_vlantag; /* ethernet 802.1p+q vlan tag */
135 uint16_t hash; /* packet hash */
139 * Description of external storage mapped into mbuf; valid only if M_EXT is set.
141 struct m_ext {
142 caddr_t ext_buf; /* start of buffer */
143 void (*ext_free)(void *);
144 u_int ext_size; /* size of buffer, for ext_free */
145 void (*ext_ref)(void *);
146 void *ext_arg;
150 * The core of the mbuf object along with some shortcut defines for
151 * practical purposes.
153 struct mbuf {
154 struct m_hdr m_hdr;
155 union {
156 struct {
157 struct pkthdr MH_pkthdr; /* M_PKTHDR set */
158 union {
159 struct m_ext MH_ext; /* M_EXT set */
160 char MH_databuf[MHLEN];
161 } MH_dat;
162 } MH;
163 char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */
164 } M_dat;
166 #define m_next m_hdr.mh_next
167 #define m_len m_hdr.mh_len
168 #define m_data m_hdr.mh_data
169 #define m_type m_hdr.mh_type
170 #define m_flags m_hdr.mh_flags
171 #define m_nextpkt m_hdr.mh_nextpkt
172 #define m_pkthdr M_dat.MH.MH_pkthdr
173 #define m_ext M_dat.MH.MH_dat.MH_ext
174 #define m_pktdat M_dat.MH.MH_dat.MH_databuf
175 #define m_dat M_dat.M_databuf
178 * Code that uses m_act should be converted to use m_nextpkt
179 * instead; m_act is historical and deprecated.
181 #define m_act m_nextpkt
184 * mbuf flags.
186 #define M_EXT 0x0001 /* has associated external storage */
187 #define M_PKTHDR 0x0002 /* start of record */
188 #define M_EOR 0x0004 /* end of record */
189 #define M_PROTO1 0x0008 /* protocol-specific */
190 #define M_PROTO2 0x0010 /* protocol-specific */
191 #define M_PROTO3 0x0020 /* protocol-specific */
192 #define M_PROTO4 0x0040 /* protocol-specific */
193 #define M_PROTO5 0x0080 /* protocol-specific */
196 * mbuf pkthdr flags (also stored in m_flags).
198 #define M_BCAST 0x0100 /* send/received as link-level broadcast */
199 #define M_MCAST 0x0200 /* send/received as link-level multicast */
200 #define M_FRAG 0x0400 /* packet is a fragment of a larger packet */
201 #define M_FIRSTFRAG 0x0800 /* packet is first fragment */
202 #define M_LASTFRAG 0x1000 /* packet is last fragment */
203 #define M_CLCACHE 0x2000 /* mbuf allocated from the cluster cache */
204 #define M_EXT_CLUSTER 0x4000 /* standard cluster else special */
205 #define M_PHCACHE 0x8000 /* mbuf allocated from the pkt header cache */
206 #define M_NOTIFICATION 0x10000 /* notification event */
207 #define M_VLANTAG 0x20000 /* ether_vlantag is valid */
208 #define M_MPLSLABELED 0x40000 /* packet is mpls labeled */
209 #define M_LENCHECKED 0x80000 /* packet proto lengths are checked */
210 #define M_HASH 0x100000/* hash field in pkthdr is valid */
213 * Flags copied when copying m_pkthdr.
215 #define M_COPYFLAGS (M_PKTHDR|M_EOR|M_PROTO1|M_PROTO2|M_PROTO3 | \
216 M_PROTO4|M_PROTO5|M_BCAST|M_MCAST|M_FRAG | \
217 M_FIRSTFRAG|M_LASTFRAG|M_VLANTAG|M_MPLSLABELED | \
218 M_LENCHECKED|M_HASH)
221 * Flags indicating hw checksum support and sw checksum requirements.
223 #define CSUM_IP 0x0001 /* will csum IP */
224 #define CSUM_TCP 0x0002 /* will csum TCP */
225 #define CSUM_UDP 0x0004 /* will csum UDP */
226 #define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */
227 #define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */
229 #define CSUM_IP_CHECKED 0x0100 /* did csum IP */
230 #define CSUM_IP_VALID 0x0200 /* ... the csum is valid */
231 #define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */
232 #define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */
233 #define CSUM_FRAG_NOT_CHECKED 0x1000 /* did _not_ csum fragment
234 * NB: This flag is only used
235 * by IP defragmenter.
238 #define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP)
239 #define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */
242 * Flags indicating PF processing status
244 #define FW_MBUF_GENERATED 0x00000001
245 #define PF_MBUF_TAGGED 0x00000002 /* pf_tag field is valid */
246 #define PF_MBUF_ROUTED 0x00000004 /* pf_routed field is valid */
247 #define PF_MBUF_TRANSLATE_LOCALHOST \
248 0x00000008
249 #define PF_MBUF_FRAGCACHE 0x00000010
250 #define ALTQ_MBUF_TAGGED 0x00000020 /* altq_qid is valid */
251 #define IPFORWARD_MBUF_TAGGED 0x00000040
252 #define DUMMYNET_MBUF_TAGGED 0x00000080
253 #define ALTQ_MBUF_STATE_HASHED 0x00000100
254 #define FW_MBUF_REDISPATCH 0x00000200
255 #define PF_MBUF_GENERATED FW_MBUF_GENERATED
256 #define IPFW_MBUF_GENERATED FW_MBUF_GENERATED
259 * mbuf types.
261 #define MT_FREE 0 /* should be on free list */
262 #define MT_DATA 1 /* dynamic (data) allocation */
263 #define MT_HEADER 2 /* packet header */
264 #define MT_SONAME 3 /* socket name */
265 /* 4 was MT_TAG */
266 #define MT_CONTROL 5 /* extra-data protocol message */
267 #define MT_OOBDATA 6 /* expedited data */
268 #define MT_NTYPES 7 /* number of mbuf types for mbtypes[] */
270 struct mbuf_chain {
271 struct mbuf *mc_head;
272 struct mbuf *mc_tail;
276 * General mbuf allocator statistics structure.
278 struct mbstat {
279 u_long m_mbufs; /* mbufs obtained from page pool */
280 u_long m_clusters; /* clusters obtained from page pool */
281 u_long m_spare; /* spare field */
282 u_long m_clfree; /* free clusters */
283 u_long m_drops; /* times failed to find space */
284 u_long m_wait; /* times waited for space */
285 u_long m_drain; /* times drained protocols for space */
286 u_long m_mcfail; /* times m_copym failed */
287 u_long m_mpfail; /* times m_pullup failed */
288 u_long m_msize; /* length of an mbuf */
289 u_long m_mclbytes; /* length of an mbuf cluster */
290 u_long m_minclsize; /* min length of data to allocate a cluster */
291 u_long m_mlen; /* length of data in an mbuf */
292 u_long m_mhlen; /* length of data in a header mbuf */
296 * Flags specifying how an allocation should be made.
299 #define MB_DONTWAIT 0x4
300 #define MB_TRYWAIT 0x8
301 #define MB_WAIT MB_TRYWAIT
304 * Mbuf to Malloc Flag Conversion.
306 #define MBTOM(how) ((how) & MB_TRYWAIT ? M_WAITOK : M_NOWAIT)
309 * These are identifying numbers passed to the m_mballoc_wait function,
310 * allowing us to determine whether the call came from an MGETHDR or
311 * an MGET.
313 #define MGETHDR_C 1
314 #define MGET_C 2
317 * mbuf allocation/deallocation macros (YYY deprecated, too big):
319 * MGET(struct mbuf *m, int how, int type)
320 * allocates an mbuf and initializes it to contain internal data.
322 * MGETHDR(struct mbuf *m, int how, int type)
323 * allocates an mbuf and initializes it to contain a packet header
324 * and internal data.
326 #define MGET(m, how, type) do { \
327 (m) = m_get((how), (type)); \
328 } while (0)
330 #define MGETHDR(m, how, type) do { \
331 (m) = m_gethdr((how), (type)); \
332 } while (0)
335 * MCLGET adds such clusters to a normal mbuf. The flag M_EXT is set upon
336 * success.
337 * Deprecated. Use m_getcl() or m_getl() instead.
339 #define MCLGET(m, how) do { \
340 m_mclget((m), (how)); \
341 } while (0)
344 * NB: M_COPY_PKTHDR is deprecated; use either M_MOVE_PKTHDR
345 * or m_dup_pkthdr.
348 * Move mbuf pkthdr from "from" to "to".
349 * from should have M_PKTHDR set, and to must be empty.
350 * from no longer has a pkthdr after this operation.
352 #define M_MOVE_PKTHDR(_to, _from) m_move_pkthdr((_to), (_from))
355 * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place
356 * an object of the specified size at the end of the mbuf, longword aligned.
358 #define M_ALIGN(m, len) do { \
359 (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \
360 } while (0)
363 * As above, for mbufs allocated with m_gethdr/MGETHDR
364 * or initialized by M_COPY_PKTHDR.
366 #define MH_ALIGN(m, len) do { \
367 (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \
368 } while (0)
371 * Check if we can write to an mbuf.
373 #define M_EXT_WRITABLE(m) (m_sharecount(m) == 1)
374 #define M_WRITABLE(m) (!((m)->m_flags & M_EXT) || M_EXT_WRITABLE(m))
377 * Check if the supplied mbuf has a packet header, or else panic.
379 #define M_ASSERTPKTHDR(m) \
380 KASSERT(m != NULL && m->m_flags & M_PKTHDR, \
381 ("%s: invalid mbuf or no mbuf packet header!", __func__))
384 * Compute the amount of space available before the current start of data.
385 * The M_EXT_WRITABLE() is a temporary, conservative safety measure: the burden
386 * of checking writability of the mbuf data area rests solely with the caller.
388 #define M_LEADINGSPACE(m) \
389 ((m)->m_flags & M_EXT ? \
390 (M_EXT_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \
391 (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \
392 (m)->m_data - (m)->m_dat)
395 * Compute the amount of space available after the end of data in an mbuf.
396 * The M_WRITABLE() is a temporary, conservative safety measure: the burden
397 * of checking writability of the mbuf data area rests solely with the caller.
399 #define M_TRAILINGSPACE(m) \
400 ((m)->m_flags & M_EXT ? \
401 (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \
402 - ((m)->m_data + (m)->m_len) : 0) : \
403 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
406 * Arrange to prepend space of size plen to mbuf m.
407 * If a new mbuf must be allocated, how specifies whether to wait.
408 * If how is MB_DONTWAIT and allocation fails, the original mbuf chain
409 * is freed and m is set to NULL.
411 #define M_PREPEND(m, plen, how) do { \
412 struct mbuf **_mmp = &(m); \
413 struct mbuf *_mm = *_mmp; \
414 int _mplen = (plen); \
415 int __mhow = (how); \
417 if (M_LEADINGSPACE(_mm) >= _mplen) { \
418 _mm->m_data -= _mplen; \
419 _mm->m_len += _mplen; \
420 } else \
421 _mm = m_prepend(_mm, _mplen, __mhow); \
422 if (_mm != NULL && _mm->m_flags & M_PKTHDR) \
423 _mm->m_pkthdr.len += _mplen; \
424 *_mmp = _mm; \
425 } while (0)
427 /* Length to m_copy to copy all. */
428 #define M_COPYALL 1000000000
430 /* Compatibility with 4.3 */
431 #define m_copy(m, o, l) m_copym((m), (o), (l), MB_DONTWAIT)
433 #ifdef _KERNEL
434 extern u_int m_clalloc_wid; /* mbuf cluster wait count */
435 extern u_int m_mballoc_wid; /* mbuf wait count */
436 extern int max_linkhdr; /* largest link-level header */
437 extern int max_protohdr; /* largest protocol header */
438 extern int max_hdr; /* largest link+protocol header */
439 extern int max_datalen; /* MHLEN - max_hdr */
440 extern int mbuf_wait; /* mbuf sleep time */
441 extern int nmbclusters;
442 extern int nmbufs;
444 struct uio;
446 void m_adj(struct mbuf *, int);
447 void m_cat(struct mbuf *, struct mbuf *);
448 u_int m_countm(struct mbuf *m, struct mbuf **lastm, u_int *mbcnt);
449 void m_copyback(struct mbuf *, int, int, caddr_t);
450 void m_copydata(const struct mbuf *, int, int, caddr_t);
451 struct mbuf *m_copym(const struct mbuf *, int, int, int);
452 struct mbuf *m_copypacket(struct mbuf *, int);
453 struct mbuf *m_defrag(struct mbuf *, int);
454 struct mbuf *m_defrag_nofree(struct mbuf *, int);
455 struct mbuf *m_devget(char *, int, int, struct ifnet *,
456 void (*copy)(volatile const void *, volatile void *, size_t));
457 struct mbuf *m_dup(struct mbuf *, int);
458 int m_dup_pkthdr(struct mbuf *, const struct mbuf *, int);
459 struct mbuf *m_free(struct mbuf *);
460 void m_freem(struct mbuf *);
461 struct mbuf *m_get(int, int);
462 struct mbuf *m_getc(int len, int how, int type);
463 struct mbuf *m_getcl(int how, short type, int flags);
464 struct mbuf *m_getclr(int, int);
465 struct mbuf *m_gethdr(int, int);
466 struct mbuf *m_getm(struct mbuf *, int, int, int);
467 struct mbuf *m_last(struct mbuf *m);
468 u_int m_lengthm(struct mbuf *m, struct mbuf **lastm);
469 void m_move_pkthdr(struct mbuf *, struct mbuf *);
470 struct mbuf *m_prepend(struct mbuf *, int, int);
471 void m_print(const struct mbuf *m);
472 struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
473 struct mbuf *m_pullup(struct mbuf *, int);
474 struct mbuf *m_split(struct mbuf *, int, int);
475 struct mbuf *m_uiomove(struct uio *);
476 void m_mclget(struct mbuf *m, int how);
477 int m_sharecount(struct mbuf *m);
478 void m_chtype(struct mbuf *m, int type);
479 int m_devpad(struct mbuf *m, int padto);
481 #ifdef MBUF_DEBUG
482 void mbuftrackid(struct mbuf *, int);
483 #else
484 #define mbuftrackid(m, id) /* empty */
485 #endif
488 * Allocate the right type of mbuf for the desired total length.
489 * The mbuf returned does not necessarily cover the entire requested length.
490 * This function follows mbuf chaining policy of allowing MINCLSIZE
491 * amount of chained mbufs.
493 static __inline struct mbuf *
494 m_getl(int len, int how, int type, int flags, int *psize)
496 struct mbuf *m;
497 int size;
499 if (len >= MINCLSIZE) {
500 m = m_getcl(how, type, flags);
501 size = MCLBYTES;
502 } else if (flags & M_PKTHDR) {
503 m = m_gethdr(how, type);
504 size = MHLEN;
505 } else {
506 m = m_get(how, type);
507 size = MLEN;
509 if (psize != NULL)
510 *psize = size;
511 return (m);
515 * Get a single mbuf that covers the requested number of bytes.
516 * This function does not create mbuf chains. It explicitly marks
517 * places in the code that abuse mbufs for contiguous data buffers.
519 static __inline struct mbuf *
520 m_getb(int len, int how, int type, int flags)
522 struct mbuf *m;
523 int mbufsize = (flags & M_PKTHDR) ? MHLEN : MLEN;
525 if (len > mbufsize)
526 m = m_getcl(how, type, flags);
527 else if (flags & M_PKTHDR)
528 m = m_gethdr(how, type);
529 else
530 m = m_get(how, type);
531 return (m);
535 * Packets may have annotations attached by affixing a list
536 * of "packet tags" to the pkthdr structure. Packet tags are
537 * dynamically allocated semi-opaque data structures that have
538 * a fixed header (struct m_tag) that specifies the size of the
539 * memory block and a <cookie,type> pair that identifies it.
540 * The cookie is a 32-bit unique unsigned value used to identify
541 * a module or ABI. By convention this value is chose as the
542 * date+time that the module is created, expressed as the number of
543 * seconds since the epoch (e.g. using date -u +'%s'). The type value
544 * is an ABI/module-specific value that identifies a particular annotation
545 * and is private to the module. For compatibility with systems
546 * like openbsd that define packet tags w/o an ABI/module cookie,
547 * the value PACKET_ABI_COMPAT is used to implement m_tag_get and
548 * m_tag_find compatibility shim functions and several tag types are
549 * defined below. Users that do not require compatibility should use
550 * a private cookie value so that packet tag-related definitions
551 * can be maintained privately.
553 * Note that the packet tag returned by m_tag_alloc has the default
554 * memory alignment implemented by kmalloc. To reference private data
555 * one can use a construct like:
557 * struct m_tag *mtag = m_tag_alloc(...);
558 * struct foo *p = m_tag_data(mtag);
560 * if the alignment of struct m_tag is sufficient for referencing members
561 * of struct foo. Otherwise it is necessary to embed struct m_tag within
562 * the private data structure to insure proper alignment; e.g.
564 * struct foo {
565 * struct m_tag tag;
566 * ...
567 * };
568 * struct foo *p = (struct foo *)m_tag_alloc(...);
569 * struct m_tag *mtag = &p->tag;
572 #define PACKET_TAG_NONE 0 /* Nadda */
574 /* Packet tag for use with PACKET_ABI_COMPAT */
575 #define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */
576 /* struct tdb_indent */
577 #define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */
578 /* struct tdb_indent */
579 #define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */
580 /* struct tdb_indent, never added */
581 #define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */
582 /* struct tdb_indent, never added */
583 #define PACKET_TAG_IPSEC_PENDING_TDB 5 /* Reminder to do IPsec */
584 /* struct tdb_indent, never added */
585 #define PACKET_TAG_ENCAP 6 /* Encap. processing */
586 /* struct ifnet *, the GIF interface */
587 #define PACKET_TAG_IPSEC_HISTORY 7 /* IPSEC history */
588 /* struct ipsec_history */
589 #define PACKET_TAG_IPV6_INPUT 8 /* IPV6 input processing */
590 /* struct ip6aux */
591 #define PACKET_TAG_IPFW_DIVERT 9 /* divert info */
592 /* struct divert_info */
593 #define PACKET_TAG_DUMMYNET 15 /* dummynet info */
594 /* struct dn_pkt */
595 #define PACKET_TAG_IPFORWARD 18 /* ipforward info */
596 /* struct sockaddr_in */
597 #define PACKET_TAG_IPSRCRT 27 /* IP srcrt opts */
598 /* struct ip_srcrt_opt */
599 #define PACKET_TAG_CARP 28 /* CARP info */
601 /* Packet tag routines */
602 struct m_tag *m_tag_alloc(u_int32_t, int, int, int);
603 void m_tag_free(struct m_tag *);
604 void m_tag_prepend(struct mbuf *, struct m_tag *);
605 void m_tag_unlink(struct mbuf *, struct m_tag *);
606 void m_tag_delete(struct mbuf *, struct m_tag *);
607 void m_tag_delete_chain(struct mbuf *);
608 struct m_tag *m_tag_locate(struct mbuf *, u_int32_t, int, struct m_tag *);
609 struct m_tag *m_tag_copy(struct m_tag *, int);
610 int m_tag_copy_chain(struct mbuf *, const struct mbuf *, int);
611 void m_tag_init(struct mbuf *);
612 struct m_tag *m_tag_first(struct mbuf *);
613 struct m_tag *m_tag_next(struct mbuf *, struct m_tag *);
615 /* these are for openbsd compatibility */
616 #define MTAG_ABI_COMPAT 0 /* compatibility ABI */
618 static __inline void *
619 m_tag_data(struct m_tag *tag)
621 return ((void *)(tag + 1));
624 static __inline struct m_tag *
625 m_tag_get(int type, int length, int wait)
627 return m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait);
630 static __inline struct m_tag *
631 m_tag_find(struct mbuf *m, int type, struct m_tag *start)
633 return m_tag_locate(m, MTAG_ABI_COMPAT, type, start);
636 #endif /* _KERNEL */
638 #endif /* _KERNEL || _KERNEL_STRUCTURES */
639 #endif /* !_SYS_MBUF_H_ */