usched: Allow process to change self cpu affinity
[dragonfly.git] / sys / sys / mbuf.h
blob6065805276152e71957f303a230c74bb797eb2fd
1 /*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * Copyright (c) 1982, 1986, 1988, 1993
5 * The Regents of the University of California. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * @(#)mbuf.h 8.5 (Berkeley) 2/19/95
32 * $FreeBSD: src/sys/sys/mbuf.h,v 1.44.2.17 2003/04/15 06:15:02 silby Exp $
35 #ifndef _SYS_MBUF_H_
36 #define _SYS_MBUF_H_
38 #if defined(_KERNEL) || defined(_KERNEL_STRUCTURES)
40 #ifndef _SYS_TYPES_H_
41 #include <sys/types.h>
42 #endif
43 #ifndef _SYS_PARAM_H_
44 #include <sys/param.h>
45 #endif
46 #ifndef _SYS_QUEUE_H_
47 #include <sys/queue.h>
48 #endif
49 #ifndef _NET_NETISR_H_
50 #include <net/netisr.h>
51 #endif
52 #ifndef _NET_ETHERNET_H_
53 #include <net/ethernet.h>
54 #endif
57 * Mbufs are of a single size, MSIZE (machine/param.h), which
58 * includes overhead. An mbuf may add a single "mbuf cluster" of size
59 * MCLBYTES (also in machine/param.h), which has no additional overhead
60 * and is used instead of the internal data area; this is done when
61 * at least MINCLSIZE of data must be stored.
63 #define MLEN (MSIZE - sizeof(struct m_hdr)) /* normal data len */
64 #define MHLEN (MLEN - sizeof(struct pkthdr)) /* data len w/pkthdr */
65 #define MINCLSIZE (MHLEN + 1) /* smallest amount to put in cluster */
66 #define M_MAXCOMPRESS (MHLEN / 2) /* max amount to copy for compression */
69 * Macros for type conversion:
70 * mtod(m, t) -- Convert mbuf pointer to data pointer of correct type.
71 * mtodoff(m, t, off) -- Convert mbuf pointer at the specified offset to data
72 * pointer of correct type.
74 #define mtod(m, t) ((t)((m)->m_data))
75 #define mtodoff(m, t, off) ((t)((m)->m_data + (off)))
78 * Header present at the beginning of every mbuf.
80 struct m_hdr {
81 struct mbuf *mh_next; /* next buffer in chain */
82 union {
83 struct mbuf *mh_nextpkt; /* next chain in queue/record */
84 STAILQ_ENTRY(mbuf) mh_stailqpkt;
86 caddr_t mh_data; /* location of data */
87 int mh_len; /* amount of data in this mbuf */
88 int mh_flags; /* flags; see below */
89 short mh_type; /* type of data in this mbuf */
90 short mh_pad; /* padding */
91 /* XXX implicit 4 bytes padding on x86_64 */
92 #ifdef MBUF_DEBUG
93 const char *mh_lastfunc;
94 #endif
95 union {
96 struct netmsg_packet mhm_pkt; /* hardware->proto stack msg */
97 struct netmsg_pru_send mhm_snd; /* usrspace->proto stack msg */
98 struct netmsg_inarp mhm_arp; /* proto stack arpinput msg */
99 struct netmsg_ctlinput mhm_ctl; /* proto stack ctlinput msg */
100 } mh_msgu;
102 #define mh_netmsg mh_msgu.mhm_pkt
103 #define mh_sndmsg mh_msgu.mhm_snd
104 #define mh_arpmsg mh_msgu.mhm_arp
105 #define mh_ctlmsg mh_msgu.mhm_ctl
107 /* pf stuff */
108 struct pkthdr_pf {
109 void *hdr; /* saved hdr pos in mbuf, for ECN */
110 void *statekey; /* pf stackside statekey */
111 u_int rtableid; /* alternate routing table id */
112 uint32_t qid; /* queue id */
113 uint16_t tag; /* tag id */
114 uint8_t flags;
115 uint8_t routed;
116 uint32_t state_hash; /* identifies 'connections' */
117 uint8_t ecn_af; /* for altq_red */
118 uint8_t unused01;
119 uint8_t unused02;
120 uint8_t unused03;
121 /* XXX implicit 4 bytes padding on x86_64 */
124 /* pkthdr_pf.flags */
125 #define PF_TAG_GENERATED 0x01
126 #define PF_TAG_FRAGCACHE 0x02
127 #define PF_TAG_TRANSLATE_LOCALHOST 0x04
128 #define PF_TAG_DIVERTED 0x08
129 #define PF_TAG_DIVERTED_PACKET 0x10
130 #define PF_TAG_REROUTE 0x20
133 * Packet tag structure (see below for details).
135 struct m_tag {
136 SLIST_ENTRY(m_tag) m_tag_link; /* List of packet tags */
137 uint16_t m_tag_id; /* Tag ID */
138 uint16_t m_tag_len; /* Length of data */
139 uint32_t m_tag_cookie; /* ABI/Module ID */
142 SLIST_HEAD(packet_tags, m_tag);
145 * Record/packet header in first mbuf of chain; valid only if M_PKTHDR is set.
147 * Be careful: The fields have been carefully ordered to avoid hidden padding.
148 * Keep this in mind, when adding or removing fields!
150 struct pkthdr {
151 struct ifnet *rcvif; /* rcv interface */
152 struct packet_tags tags; /* list of packet tags */
154 /* variables for ip and tcp reassembly */
155 void *header; /* pointer to packet header */
156 int len; /* total packet length */
158 /* variables for hardware checksum */
159 int csum_flags; /* flags regarding checksum */
160 int csum_data; /* data field used by csum routines */
161 uint16_t csum_iphlen; /* IP header length */
162 /* valid if CSUM IP|UDP|TCP|TSO */
163 uint8_t csum_thlen; /* TCP/UDP header length */
164 /* valid if CSUM UDP|TCP|TSO */
165 uint8_t csum_lhlen; /* link header length */
167 uint16_t tso_segsz; /* TSO segment size */
168 uint16_t ether_vlantag; /* ethernet 802.1p+q vlan tag */
170 uint16_t hash; /* packet hash */
171 uint16_t unused1; /* reserved for route table id */
172 uint32_t unused2; /* reserved for codel timestamp */
174 uint16_t wlan_seqno; /* IEEE 802.11 seq no. */
176 * Valid if BRIDGE_MBUF_TAGGED is set in fw_flags, records
177 * the original ether source address (if compatible).
179 uint8_t ether_br_shost[ETHER_ADDR_LEN];
181 /* firewall flags */
182 uint32_t fw_flags; /* flags for FW */
184 /* variables for PF processing */
185 struct pkthdr_pf pf; /* structure for PF */
189 * Description of external storage mapped into mbuf; valid only if M_EXT is set.
191 struct m_ext {
192 caddr_t ext_buf; /* start of buffer */
193 void (*ext_free)(void *);
194 u_int ext_size; /* size of buffer, for ext_free */
195 void (*ext_ref)(void *);
196 void *ext_arg;
200 * The core of the mbuf object along with some shortcut defines for
201 * practical purposes.
203 struct mbuf {
204 struct m_hdr m_hdr;
205 union {
206 struct {
207 struct pkthdr MH_pkthdr; /* M_PKTHDR set */
208 union {
209 struct m_ext MH_ext; /* M_EXT set */
210 char MH_databuf[MHLEN];
211 } MH_dat;
212 } MH;
213 char M_databuf[MLEN]; /* !M_PKTHDR, !M_EXT */
214 } M_dat;
216 #define m_next m_hdr.mh_next
217 #define m_len m_hdr.mh_len
218 #define m_data m_hdr.mh_data
219 #define m_type m_hdr.mh_type
220 #define m_flags m_hdr.mh_flags
221 #define m_nextpkt m_hdr.mh_nextpkt
222 #define m_stailqpkt m_hdr.mh_stailqpkt
223 #define m_pkthdr M_dat.MH.MH_pkthdr
224 #define m_ext M_dat.MH.MH_dat.MH_ext
225 #define m_pktdat M_dat.MH.MH_dat.MH_databuf
226 #define m_dat M_dat.M_databuf
229 * Code that uses m_act should be converted to use m_nextpkt
230 * instead; m_act is historical and deprecated.
232 #define m_act m_nextpkt
235 * mbuf flags.
237 #define M_EXT 0x0001 /* has associated external storage */
238 #define M_PKTHDR 0x0002 /* start of record */
239 #define M_EOR 0x0004 /* end of record */
240 #define M_PROTO1 0x0008 /* protocol-specific */
241 #define M_PROTO2 0x0010 /* protocol-specific */
242 #define M_PROTO3 0x0020 /* protocol-specific */
243 #define M_PROTO4 0x0040 /* protocol-specific */
244 #define M_PROTO5 0x0080 /* protocol-specific */
247 * mbuf pkthdr flags (also stored in m_flags).
249 #define M_BCAST 0x0100 /* send/received as link-level broadcast */
250 #define M_MCAST 0x0200 /* send/received as link-level multicast */
251 #define M_FRAG 0x0400 /* packet is a fragment of a larger packet */
252 #define M_FIRSTFRAG 0x0800 /* packet is first fragment */
253 #define M_LASTFRAG 0x1000 /* packet is last fragment */
254 #define M_CLCACHE 0x2000 /* mbuf allocated from the cluster cache */
255 #define M_EXT_CLUSTER 0x4000 /* standard cluster else special */
256 #define M_PHCACHE 0x8000 /* mbuf allocated from the pkt header cache */
257 #define M_UNUSED16 0x10000 /* was: notification event (SCTP) */
258 #define M_VLANTAG 0x20000 /* ether_vlantag is valid */
259 #define M_MPLSLABELED 0x40000 /* packet is mpls labeled */
260 #define M_LENCHECKED 0x80000 /* packet proto lengths are checked */
261 #define M_HASH 0x100000/* hash field in pkthdr is valid */
262 #define M_PROTO6 0x200000/* protocol-specific */
263 #define M_PROTO7 0x400000/* protocol-specific */
264 #define M_PROTO8 0x800000/* protocol-specific */
265 #define M_CKHASH 0x1000000/* hash needs software verification */
266 #define M_PRIO 0x2000000/* high priority mbuf */
267 #define M_SOLOCKED 0x4000000/* locked by userland for read() */
270 * Flags copied when copying m_pkthdr.
272 #define M_COPYFLAGS (M_PKTHDR|M_EOR|M_PROTO1|M_PROTO2|M_PROTO3 | \
273 M_PROTO4|M_PROTO5|M_PROTO6|M_PROTO7|M_PROTO8 | \
274 M_BCAST|M_MCAST|M_FRAG|M_FIRSTFRAG|M_LASTFRAG | \
275 M_VLANTAG|M_MPLSLABELED | \
276 M_LENCHECKED|M_HASH|M_CKHASH|M_PRIO)
279 * Flags indicating hw checksum support and sw checksum requirements.
281 #define CSUM_IP 0x0001 /* will csum IP */
282 #define CSUM_TCP 0x0002 /* will csum TCP */
283 #define CSUM_UDP 0x0004 /* will csum UDP */
284 #define CSUM_IP_FRAGS 0x0008 /* will csum IP fragments */
285 #define CSUM_FRAGMENT 0x0010 /* will do IP fragmentation */
287 #define CSUM_IP_CHECKED 0x0100 /* did csum IP */
288 #define CSUM_IP_VALID 0x0200 /* ... the csum is valid */
289 #define CSUM_DATA_VALID 0x0400 /* csum_data field is valid */
290 #define CSUM_PSEUDO_HDR 0x0800 /* csum_data has pseudo hdr */
291 #define CSUM_FRAG_NOT_CHECKED 0x1000 /* did _not_ csum fragment
292 * NB: This flag is only used
293 * by IP defragmenter.
295 #define CSUM_TSO 0x2000 /* will do TCP segmentation */
297 #define CSUM_DELAY_DATA (CSUM_TCP | CSUM_UDP)
298 #define CSUM_DELAY_IP (CSUM_IP) /* XXX add ipv6 here too? */
301 * Flags indicating PF processing status
303 #define FW_MBUF_GENERATED 0x00000001
304 #define PF_MBUF_STRUCTURE 0x00000002 /* m_pkthdr.pf valid */
305 #define PF_MBUF_ROUTED 0x00000004 /* pf_routed field is valid */
306 #define PF_MBUF_TAGGED 0x00000008
307 #define XX_MBUF_UNUSED10 0x00000010
308 #define XX_MBUF_UNUSED20 0x00000020
309 #define IPFORWARD_MBUF_TAGGED 0x00000040
310 #define DUMMYNET_MBUF_TAGGED 0x00000080
311 #define BRIDGE_MBUF_TAGGED 0x00000100
312 #define FW_MBUF_REDISPATCH 0x00000200
313 #define IPFW_MBUF_GENERATED FW_MBUF_GENERATED
315 * mbuf types.
317 #define MT_FREE 0 /* should be on free list */
318 #define MT_DATA 1 /* dynamic (data) allocation */
319 #define MT_HEADER 2 /* packet header */
320 #define MT_SONAME 3 /* socket name */
321 /* 4 was MT_TAG */
322 #define MT_CONTROL 5 /* extra-data protocol message */
323 #define MT_OOBDATA 6 /* expedited data */
324 #define MT_NTYPES 7 /* number of mbuf types for mbtypes[] */
327 * General mbuf allocator statistics structure.
329 * NOTE: Make sure this struct's size is multiple cache line size.
331 struct mbstat {
332 u_long m_mbufs; /* mbufs obtained from page pool */
333 u_long m_clusters; /* clusters obtained from page pool */
334 u_long m_jclusters; /* jclusters obtained from page pool */
335 u_long m_clfree; /* free clusters */
336 u_long m_drops; /* times failed to find space */
337 u_long m_wait; /* times waited for space */
338 u_long m_drain; /* times drained protocols for space */
339 u_long m_mcfail; /* times m_copym failed */
340 u_long m_mpfail; /* times m_pullup failed */
341 u_long m_msize; /* length of an mbuf */
342 u_long m_mclbytes; /* length of an mbuf cluster */
343 u_long m_mjumpagesize; /* length of a jumbo mbuf cluster */
344 u_long m_minclsize; /* min length of data to allocate a cluster */
345 u_long m_mlen; /* length of data in an mbuf */
346 u_long m_mhlen; /* length of data in a header mbuf */
347 u_long m_pad; /* pad to cache line size (64B) */
351 * objcache(9) ocflags sanitizing
353 #define MB_OCFLAG(how) ((how) & M_WAITOK ? M_WAITOK : M_NOWAIT)
356 * These are identifying numbers passed to the m_mballoc_wait function,
357 * allowing us to determine whether the call came from an MGETHDR or
358 * an MGET.
360 #define MGETHDR_C 1
361 #define MGET_C 2
364 * mbuf allocation/deallocation macros (YYY deprecated, too big):
366 * MGET(struct mbuf *m, int how, int type)
367 * allocates an mbuf and initializes it to contain internal data.
369 * MGETHDR(struct mbuf *m, int how, int type)
370 * allocates an mbuf and initializes it to contain a packet header
371 * and internal data.
373 #define MGET(m, how, type) do { \
374 (m) = m_get((how), (type)); \
375 } while (0)
377 #define MGETHDR(m, how, type) do { \
378 (m) = m_gethdr((how), (type)); \
379 } while (0)
382 * MCLGET adds such clusters to a normal mbuf. The flag M_EXT is set upon
383 * success.
384 * Deprecated. Use m_getcl() or m_getl() instead.
386 #define MCLGET(m, how) do { \
387 m_mclget((m), (how)); \
388 } while (0)
391 * NB: M_COPY_PKTHDR is deprecated; use either M_MOVE_PKTHDR
392 * or m_dup_pkthdr.
395 * Move mbuf pkthdr from "from" to "to".
396 * from should have M_PKTHDR set, and to must be empty.
397 * from no longer has a pkthdr after this operation.
399 #define M_MOVE_PKTHDR(_to, _from) m_move_pkthdr((_to), (_from))
402 * Set the m_data pointer of a newly-allocated mbuf (m_get/MGET) to place
403 * an object of the specified size at the end of the mbuf, longword aligned.
405 #define M_ALIGN(m, len) do { \
406 (m)->m_data += (MLEN - (len)) & ~(sizeof(long) - 1); \
407 } while (0)
410 * As above, for mbufs allocated with m_gethdr/MGETHDR
411 * or initialized by M_COPY_PKTHDR.
413 #define MH_ALIGN(m, len) do { \
414 (m)->m_data += (MHLEN - (len)) & ~(sizeof(long) - 1); \
415 } while (0)
418 * Check if we can write to an mbuf.
420 #define M_EXT_WRITABLE(m) (m_sharecount(m) == 1)
421 #define M_WRITABLE(m) (!((m)->m_flags & M_EXT) || M_EXT_WRITABLE(m))
424 * Check if the supplied mbuf has a packet header, or else panic.
426 #define M_ASSERTPKTHDR(m) \
427 KASSERT(m != NULL && m->m_flags & M_PKTHDR, \
428 ("%s: invalid mbuf or no mbuf packet header!", __func__))
431 * Compute the amount of space available before the current start of data.
432 * The M_EXT_WRITABLE() is a temporary, conservative safety measure: the burden
433 * of checking writability of the mbuf data area rests solely with the caller.
435 #define M_LEADINGSPACE(m) \
436 ((m)->m_flags & M_EXT ? \
437 (M_EXT_WRITABLE(m) ? (m)->m_data - (m)->m_ext.ext_buf : 0): \
438 (m)->m_flags & M_PKTHDR ? (m)->m_data - (m)->m_pktdat : \
439 (m)->m_data - (m)->m_dat)
442 * Compute the amount of space available after the end of data in an mbuf.
443 * The M_WRITABLE() is a temporary, conservative safety measure: the burden
444 * of checking writability of the mbuf data area rests solely with the caller.
446 #define M_TRAILINGSPACE(m) \
447 ((m)->m_flags & M_EXT ? \
448 (M_WRITABLE(m) ? (m)->m_ext.ext_buf + (m)->m_ext.ext_size \
449 - ((m)->m_data + (m)->m_len) : 0) : \
450 &(m)->m_dat[MLEN] - ((m)->m_data + (m)->m_len))
453 * Arrange to prepend space of size plen to mbuf m.
454 * If a new mbuf must be allocated, how specifies whether to wait.
455 * If how is M_NOWAIT and allocation fails, the original mbuf chain
456 * is freed and m is set to NULL.
458 #define M_PREPEND(m, plen, how) do { \
459 struct mbuf **_mmp = &(m); \
460 struct mbuf *_mm = *_mmp; \
461 int _mplen = (plen); \
462 int __mhow = (how); \
464 if (M_LEADINGSPACE(_mm) >= _mplen) { \
465 _mm->m_data -= _mplen; \
466 _mm->m_len += _mplen; \
467 } else \
468 _mm = m_prepend(_mm, _mplen, __mhow); \
469 if (_mm != NULL && (_mm->m_flags & M_PKTHDR)) \
470 _mm->m_pkthdr.len += _mplen; \
471 *_mmp = _mm; \
472 } while (0)
474 /* Length to m_copy to copy all. */
475 #define M_COPYALL 1000000000
477 /* Compatibility with 4.3 */
478 #define m_copy(m, o, l) m_copym((m), (o), (l), M_NOWAIT)
480 #ifdef _KERNEL
481 extern u_int m_clalloc_wid; /* mbuf cluster wait count */
482 extern u_int m_mballoc_wid; /* mbuf wait count */
483 extern int max_linkhdr; /* largest link-level header */
484 extern int max_protohdr; /* largest protocol header */
485 extern int max_hdr; /* largest link+protocol header */
486 extern int max_datalen; /* MHLEN - max_hdr */
487 extern int mbuf_wait; /* mbuf sleep time */
488 extern int nmbclusters;
489 extern int nmbufs;
491 struct uio;
493 void mcl_inclimit(int);
494 void mjcl_inclimit(int);
495 void mb_inclimit(int);
496 void m_adj(struct mbuf *, int);
497 void m_align(struct mbuf *, int);
498 int m_apply(struct mbuf *, int, int,
499 int (*)(void *, void *, u_int), void *);
500 int m_append(struct mbuf *, int, c_caddr_t);
501 void m_cat(struct mbuf *, struct mbuf *);
502 u_int m_countm(struct mbuf *m, struct mbuf **lastm, u_int *mbcnt);
503 void m_copyback(struct mbuf *, int, int, caddr_t);
504 void m_copydata(const struct mbuf *, int, int, caddr_t);
505 struct mbuf *m_copym(const struct mbuf *, int, int, int);
506 struct mbuf *m_copypacket(struct mbuf *, int);
507 struct mbuf *m_defrag(struct mbuf *, int);
508 struct mbuf *m_defrag_nofree(struct mbuf *, int);
509 struct mbuf *m_devget(char *, int, int, struct ifnet *);
510 struct mbuf *m_dup(struct mbuf *, int);
511 struct mbuf *m_dup_data(struct mbuf *, int);
512 int m_dup_pkthdr(struct mbuf *, const struct mbuf *, int);
513 void m_extadd(struct mbuf *, caddr_t, u_int, void (*)(void *),
514 void (*)(void *), void *);
515 #ifdef MBUF_DEBUG
516 struct mbuf *_m_free(struct mbuf *, const char *name);
517 void _m_freem(struct mbuf *, const char *name);
518 #else
519 struct mbuf *m_free(struct mbuf *);
520 void m_freem(struct mbuf *);
521 #endif
522 struct mbuf *m_get(int, int);
523 struct mbuf *m_getc(int len, int how, int type);
524 struct mbuf *m_getcl(int how, short type, int flags);
525 struct mbuf *m_getjcl(int how, short type, int flags, size_t size);
526 struct mbuf *m_getclr(int, int);
527 struct mbuf *m_gethdr(int, int);
528 struct mbuf *m_getm(struct mbuf *, int, int, int);
529 struct mbuf *m_getptr(struct mbuf *, int, int *);
530 struct mbuf *m_last(struct mbuf *m);
531 u_int m_lengthm(struct mbuf *m, struct mbuf **lastm);
532 void m_move_pkthdr(struct mbuf *, struct mbuf *);
533 struct mbuf *m_prepend(struct mbuf *, int, int);
534 void m_print(const struct mbuf *m);
535 struct mbuf *m_pulldown(struct mbuf *, int, int, int *);
536 struct mbuf *m_pullup(struct mbuf *, int);
537 struct mbuf *m_split(struct mbuf *, int, int);
538 struct mbuf *m_uiomove(struct uio *);
539 struct mbuf *m_unshare(struct mbuf *, int);
540 void m_mclget(struct mbuf *m, int how);
541 int m_sharecount(struct mbuf *m);
542 void m_chtype(struct mbuf *m, int type);
543 int m_devpad(struct mbuf *m, int padto);
545 #ifdef MBUF_DEBUG
547 void mbuftrackid(struct mbuf *, int);
549 #define m_free(m) _m_free(m, __func__)
550 #define m_freem(m) _m_freem(m, __func__)
552 #else
554 #define mbuftrackid(m, id) /* empty */
556 #endif
558 static __inline void
559 m_sethash(struct mbuf *m, uint16_t hash)
561 m->m_flags |= M_HASH;
562 m->m_pkthdr.hash = hash;
566 * Allocate the right type of mbuf for the desired total length.
567 * The mbuf returned does not necessarily cover the entire requested length.
568 * This function follows mbuf chaining policy of allowing MINCLSIZE
569 * amount of chained mbufs.
571 static __inline struct mbuf *
572 m_getl(int len, int how, int type, int flags, int *psize)
574 struct mbuf *m;
575 int size;
577 if (len >= MINCLSIZE) {
578 m = m_getcl(how, type, flags);
579 size = MCLBYTES;
580 } else if (flags & M_PKTHDR) {
581 m = m_gethdr(how, type);
582 size = MHLEN;
583 } else {
584 m = m_get(how, type);
585 size = MLEN;
587 if (psize != NULL)
588 *psize = size;
589 return (m);
592 static __inline struct mbuf *
593 m_getlj(int len, int how, int type, int flags, int *psize)
595 if (len > MCLBYTES) {
596 struct mbuf *m;
598 m = m_getjcl(how, type, flags, MJUMPAGESIZE);
599 if (psize != NULL)
600 *psize = MJUMPAGESIZE;
601 return m;
603 return m_getl(len, how, type, flags, psize);
607 * Get a single mbuf that covers the requested number of bytes.
608 * This function does not create mbuf chains. It explicitly marks
609 * places in the code that abuse mbufs for contiguous data buffers.
611 static __inline struct mbuf *
612 m_getb(int len, int how, int type, int flags)
614 struct mbuf *m;
615 int mbufsize = (flags & M_PKTHDR) ? MHLEN : MLEN;
617 if (len > mbufsize)
618 m = m_getcl(how, type, flags);
619 else if (flags & M_PKTHDR)
620 m = m_gethdr(how, type);
621 else
622 m = m_get(how, type);
623 return (m);
627 * Packets may have annotations attached by affixing a list
628 * of "packet tags" to the pkthdr structure. Packet tags are
629 * dynamically allocated semi-opaque data structures that have
630 * a fixed header (struct m_tag) that specifies the size of the
631 * memory block and a <cookie,type> pair that identifies it.
632 * The cookie is a 32-bit unique unsigned value used to identify
633 * a module or ABI. By convention this value is chose as the
634 * date+time that the module is created, expressed as the number of
635 * seconds since the epoch (e.g. using date -u +'%s'). The type value
636 * is an ABI/module-specific value that identifies a particular annotation
637 * and is private to the module. For compatibility with systems
638 * like openbsd that define packet tags w/o an ABI/module cookie,
639 * the value PACKET_ABI_COMPAT is used to implement m_tag_get and
640 * m_tag_find compatibility shim functions and several tag types are
641 * defined below. Users that do not require compatibility should use
642 * a private cookie value so that packet tag-related definitions
643 * can be maintained privately.
645 * Note that the packet tag returned by m_tag_alloc has the default
646 * memory alignment implemented by kmalloc. To reference private data
647 * one can use a construct like:
649 * struct m_tag *mtag = m_tag_alloc(...);
650 * struct foo *p = m_tag_data(mtag);
652 * if the alignment of struct m_tag is sufficient for referencing members
653 * of struct foo. Otherwise it is necessary to embed struct m_tag within
654 * the private data structure to insure proper alignment; e.g.
656 * struct foo {
657 * struct m_tag tag;
658 * ...
659 * };
660 * struct foo *p = (struct foo *)m_tag_alloc(...);
661 * struct m_tag *mtag = &p->tag;
664 #define PACKET_TAG_NONE 0 /* Nadda */
666 /* Packet tag for use with PACKET_ABI_COMPAT */
667 #define PACKET_TAG_IPSEC_IN_DONE 1 /* IPsec applied, in */
668 /* struct tdb_indent */
669 #define PACKET_TAG_IPSEC_OUT_DONE 2 /* IPsec applied, out */
670 /* struct tdb_indent */
671 #define PACKET_TAG_IPSEC_IN_CRYPTO_DONE 3 /* NIC IPsec crypto done */
672 /* struct tdb_indent, never added */
673 #define PACKET_TAG_IPSEC_OUT_CRYPTO_NEEDED 4 /* NIC IPsec crypto req'ed */
674 /* struct tdb_indent, never added */
675 #define PACKET_TAG_IPSEC_PENDING_TDB 5 /* Reminder to do IPsec */
676 /* struct tdb_indent, never added */
677 #define PACKET_TAG_ENCAP 6 /* Encap. processing */
678 /* struct ifnet *, the GIF interface */
679 #define PACKET_TAG_IPSEC_HISTORY 7 /* IPSEC history */
680 /* struct ipsec_history */
681 #define PACKET_TAG_IPV6_INPUT 8 /* IPV6 input processing */
682 /* struct ip6aux */
683 #define PACKET_TAG_IPFW_DIVERT 9 /* divert info */
684 /* struct divert_info */
685 #define PACKET_TAG_DUMMYNET 15 /* dummynet info */
686 /* struct dn_pkt */
687 #define PACKET_TAG_IPFORWARD 18 /* ipforward info */
688 /* struct sockaddr_in */
689 #define PACKET_TAG_IPSRCRT 27 /* IP srcrt opts */
690 /* struct ip_srcrt_opt */
691 #define PACKET_TAG_CARP 28 /* CARP info */
692 /* struct pf_mtag */
693 #define PACKET_TAG_PF 29 /* PF info */
695 #define PACKET_TAG_PF_DIVERT 0x0200 /* pf(4) diverted packet */
698 /* Packet tag routines */
699 struct m_tag *m_tag_alloc(uint32_t, int, int, int);
700 void m_tag_free(struct m_tag *);
701 void m_tag_prepend(struct mbuf *, struct m_tag *);
702 void m_tag_unlink(struct mbuf *, struct m_tag *);
703 void m_tag_delete(struct mbuf *, struct m_tag *);
704 void m_tag_delete_chain(struct mbuf *);
705 struct m_tag *m_tag_locate(struct mbuf *, uint32_t, int, struct m_tag *);
706 struct m_tag *m_tag_copy(struct m_tag *, int);
707 int m_tag_copy_chain(struct mbuf *, const struct mbuf *, int);
708 void m_tag_init(struct mbuf *);
709 struct m_tag *m_tag_first(struct mbuf *);
710 struct m_tag *m_tag_next(struct mbuf *, struct m_tag *);
712 /* these are for openbsd compatibility */
713 #define MTAG_ABI_COMPAT 0 /* compatibility ABI */
715 static __inline void *
716 m_tag_data(struct m_tag *tag)
718 return ((void *)(tag + 1));
721 static __inline struct m_tag *
722 m_tag_get(int type, int length, int wait)
724 return m_tag_alloc(MTAG_ABI_COMPAT, type, length, wait);
727 static __inline struct m_tag *
728 m_tag_find(struct mbuf *m, int type, struct m_tag *start)
730 return m_tag_locate(m, MTAG_ABI_COMPAT, type, start);
733 struct mbufq {
734 STAILQ_HEAD(, mbuf) mq_head;
735 int mq_len;
736 int mq_maxlen;
739 static inline void
740 mbufq_init(struct mbufq *mq, int maxlen)
743 STAILQ_INIT(&mq->mq_head);
744 mq->mq_maxlen = maxlen;
745 mq->mq_len = 0;
748 static inline struct mbuf *
749 mbufq_flush(struct mbufq *mq)
751 struct mbuf *m;
753 m = STAILQ_FIRST(&mq->mq_head);
754 STAILQ_INIT(&mq->mq_head);
755 mq->mq_len = 0;
756 return (m);
759 static inline void
760 mbufq_drain(struct mbufq *mq)
762 struct mbuf *m, *n;
764 n = mbufq_flush(mq);
765 while ((m = n) != NULL) {
766 n = STAILQ_NEXT(m, m_stailqpkt);
767 m_freem(m);
771 static inline struct mbuf *
772 mbufq_first(const struct mbufq *mq)
775 return (STAILQ_FIRST(&mq->mq_head));
778 static inline struct mbuf *
779 mbufq_last(const struct mbufq *mq)
782 return (STAILQ_LAST(&mq->mq_head, mbuf, m_stailqpkt));
785 static inline int
786 mbufq_full(const struct mbufq *mq)
789 return (mq->mq_len >= mq->mq_maxlen);
792 static inline int
793 mbufq_len(const struct mbufq *mq)
796 return (mq->mq_len);
799 static inline int
800 mbufq_enqueue(struct mbufq *mq, struct mbuf *m)
803 if (mbufq_full(mq))
804 return (ENOBUFS);
805 STAILQ_INSERT_TAIL(&mq->mq_head, m, m_stailqpkt);
806 mq->mq_len++;
807 return (0);
810 static inline struct mbuf *
811 mbufq_dequeue(struct mbufq *mq)
813 struct mbuf *m;
815 m = STAILQ_FIRST(&mq->mq_head);
816 if (m) {
817 STAILQ_REMOVE_HEAD(&mq->mq_head, m_stailqpkt);
818 m->m_nextpkt = NULL;
819 mq->mq_len--;
821 return (m);
824 static inline void
825 mbufq_prepend(struct mbufq *mq, struct mbuf *m)
828 STAILQ_INSERT_HEAD(&mq->mq_head, m, m_stailqpkt);
829 mq->mq_len++;
832 #endif /* _KERNEL */
834 #endif /* _KERNEL || _KERNEL_STRUCTURES */
835 #endif /* !_SYS_MBUF_H_ */