2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1988, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
67 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
68 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.70 2008/11/20 14:21:01 sephe Exp $
71 #include "opt_param.h"
72 #include "opt_mbuf_stress_test.h"
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/malloc.h>
77 #include <sys/kernel.h>
78 #include <sys/sysctl.h>
79 #include <sys/domain.h>
80 #include <sys/objcache.h>
82 #include <sys/protosw.h>
84 #include <sys/thread.h>
85 #include <sys/globaldata.h>
86 #include <sys/thread2.h>
88 #include <machine/atomic.h>
89 #include <machine/limits.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_extern.h>
96 #include <machine/cpu.h>
100 * mbuf cluster meta-data
108 * mbuf tracking for debugging purposes
112 static MALLOC_DEFINE(M_MTRACK
, "mtrack", "mtrack");
115 RB_HEAD(mbuf_rb_tree
, mbtrack
);
116 RB_PROTOTYPE2(mbuf_rb_tree
, mbtrack
, rb_node
, mbtrack_cmp
, struct mbuf
*);
119 RB_ENTRY(mbtrack
) rb_node
;
125 mbtrack_cmp(struct mbtrack
*mb1
, struct mbtrack
*mb2
)
134 RB_GENERATE2(mbuf_rb_tree
, mbtrack
, rb_node
, mbtrack_cmp
, struct mbuf
*, m
);
136 struct mbuf_rb_tree mbuf_track_root
;
139 mbuftrack(struct mbuf
*m
)
144 mbt
= kmalloc(sizeof(*mbt
), M_MTRACK
, M_INTWAIT
|M_ZERO
);
146 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root
, mbt
))
147 panic("mbuftrack: mbuf %p already being tracked\n", m
);
152 mbufuntrack(struct mbuf
*m
)
157 mbt
= mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root
, m
);
159 kprintf("mbufuntrack: mbuf %p was not tracked\n", m
);
161 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root
, mbt
);
162 kfree(mbt
, M_MTRACK
);
168 mbuftrackid(struct mbuf
*m
, int trackid
)
177 mbt
= mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root
, m
);
179 mbt
->trackid
= trackid
;
188 mbuftrack_callback(struct mbtrack
*mbt
, void *arg
)
190 struct sysctl_req
*req
= arg
;
194 ksnprintf(buf
, sizeof(buf
), "mbuf %p track %d\n", mbt
->m
, mbt
->trackid
);
196 error
= SYSCTL_OUT(req
, buf
, strlen(buf
));
203 mbuftrack_show(SYSCTL_HANDLER_ARGS
)
208 error
= mbuf_rb_tree_RB_SCAN(&mbuf_track_root
, NULL
,
209 mbuftrack_callback
, req
);
213 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, showmbufs
, CTLFLAG_RD
|CTLTYPE_STRING
,
214 0, 0, mbuftrack_show
, "A", "Show all in-use mbufs");
219 #define mbufuntrack(m)
223 static void mbinit(void *);
224 SYSINIT(mbuf
, SI_BOOT2_MACHDEP
, SI_ORDER_FIRST
, mbinit
, NULL
)
226 static u_long mbtypes
[SMP_MAXCPU
][MT_NTYPES
];
228 static struct mbstat mbstat
[SMP_MAXCPU
];
237 #ifdef MBUF_STRESS_TEST
238 int m_defragrandomfailures
;
241 struct objcache
*mbuf_cache
, *mbufphdr_cache
;
242 struct objcache
*mclmeta_cache
;
243 struct objcache
*mbufcluster_cache
, *mbufphdrcluster_cache
;
248 SYSCTL_INT(_kern_ipc
, KIPC_MAX_LINKHDR
, max_linkhdr
, CTLFLAG_RW
,
249 &max_linkhdr
, 0, "");
250 SYSCTL_INT(_kern_ipc
, KIPC_MAX_PROTOHDR
, max_protohdr
, CTLFLAG_RW
,
251 &max_protohdr
, 0, "");
252 SYSCTL_INT(_kern_ipc
, KIPC_MAX_HDR
, max_hdr
, CTLFLAG_RW
, &max_hdr
, 0, "");
253 SYSCTL_INT(_kern_ipc
, KIPC_MAX_DATALEN
, max_datalen
, CTLFLAG_RW
,
254 &max_datalen
, 0, "");
255 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mbuf_wait
, CTLFLAG_RW
,
257 static int do_mbstat(SYSCTL_HANDLER_ARGS
);
259 SYSCTL_PROC(_kern_ipc
, KIPC_MBSTAT
, mbstat
, CTLTYPE_STRUCT
|CTLFLAG_RD
,
260 0, 0, do_mbstat
, "S,mbstat", "");
262 static int do_mbtypes(SYSCTL_HANDLER_ARGS
);
264 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, mbtypes
, CTLTYPE_ULONG
|CTLFLAG_RD
,
265 0, 0, do_mbtypes
, "LU", "");
268 do_mbstat(SYSCTL_HANDLER_ARGS
)
270 struct mbstat mbstat_total
;
271 struct mbstat
*mbstat_totalp
;
274 bzero(&mbstat_total
, sizeof(mbstat_total
));
275 mbstat_totalp
= &mbstat_total
;
277 for (i
= 0; i
< ncpus
; i
++)
279 mbstat_total
.m_mbufs
+= mbstat
[i
].m_mbufs
;
280 mbstat_total
.m_clusters
+= mbstat
[i
].m_clusters
;
281 mbstat_total
.m_spare
+= mbstat
[i
].m_spare
;
282 mbstat_total
.m_clfree
+= mbstat
[i
].m_clfree
;
283 mbstat_total
.m_drops
+= mbstat
[i
].m_drops
;
284 mbstat_total
.m_wait
+= mbstat
[i
].m_wait
;
285 mbstat_total
.m_drain
+= mbstat
[i
].m_drain
;
286 mbstat_total
.m_mcfail
+= mbstat
[i
].m_mcfail
;
287 mbstat_total
.m_mpfail
+= mbstat
[i
].m_mpfail
;
291 * The following fields are not cumulative fields so just
292 * get their values once.
294 mbstat_total
.m_msize
= mbstat
[0].m_msize
;
295 mbstat_total
.m_mclbytes
= mbstat
[0].m_mclbytes
;
296 mbstat_total
.m_minclsize
= mbstat
[0].m_minclsize
;
297 mbstat_total
.m_mlen
= mbstat
[0].m_mlen
;
298 mbstat_total
.m_mhlen
= mbstat
[0].m_mhlen
;
300 return(sysctl_handle_opaque(oidp
, mbstat_totalp
, sizeof(mbstat_total
), req
));
304 do_mbtypes(SYSCTL_HANDLER_ARGS
)
306 u_long totals
[MT_NTYPES
];
309 for (i
= 0; i
< MT_NTYPES
; i
++)
312 for (i
= 0; i
< ncpus
; i
++)
314 for (j
= 0; j
< MT_NTYPES
; j
++)
315 totals
[j
] += mbtypes
[i
][j
];
318 return(sysctl_handle_opaque(oidp
, totals
, sizeof(totals
), req
));
322 * These are read-only because we do not currently have any code
323 * to adjust the objcache limits after the fact. The variables
324 * may only be set as boot-time tunables.
326 SYSCTL_INT(_kern_ipc
, KIPC_NMBCLUSTERS
, nmbclusters
, CTLFLAG_RD
,
327 &nmbclusters
, 0, "Maximum number of mbuf clusters available");
328 SYSCTL_INT(_kern_ipc
, OID_AUTO
, nmbufs
, CTLFLAG_RD
, &nmbufs
, 0,
329 "Maximum number of mbufs available");
331 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragpackets
, CTLFLAG_RD
,
332 &m_defragpackets
, 0, "");
333 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragbytes
, CTLFLAG_RD
,
334 &m_defragbytes
, 0, "");
335 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defraguseless
, CTLFLAG_RD
,
336 &m_defraguseless
, 0, "");
337 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragfailure
, CTLFLAG_RD
,
338 &m_defragfailure
, 0, "");
339 #ifdef MBUF_STRESS_TEST
340 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragrandomfailures
, CTLFLAG_RW
,
341 &m_defragrandomfailures
, 0, "");
344 static MALLOC_DEFINE(M_MBUF
, "mbuf", "mbuf");
345 static MALLOC_DEFINE(M_MBUFCL
, "mbufcl", "mbufcl");
346 static MALLOC_DEFINE(M_MCLMETA
, "mclmeta", "mclmeta");
348 static void m_reclaim (void);
349 static void m_mclref(void *arg
);
350 static void m_mclfree(void *arg
);
353 #define NMBCLUSTERS (512 + maxusers * 16)
356 #define NMBUFS (nmbclusters * 2)
360 * Perform sanity checks of tunables declared above.
363 tunable_mbinit(void *dummy
)
366 * This has to be done before VM init.
368 nmbclusters
= NMBCLUSTERS
;
369 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters
);
371 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs
);
373 if (nmbufs
< nmbclusters
* 2)
374 nmbufs
= nmbclusters
* 2;
376 SYSINIT(tunable_mbinit
, SI_BOOT1_TUNABLES
, SI_ORDER_ANY
,
377 tunable_mbinit
, NULL
);
379 /* "number of clusters of pages" */
385 * The mbuf object cache only guarantees that m_next and m_nextpkt are
386 * NULL and that m_data points to the beginning of the data area. In
387 * particular, m_len and m_pkthdr.len are uninitialized. It is the
388 * responsibility of the caller to initialize those fields before use.
391 static boolean_t __inline
392 mbuf_ctor(void *obj
, void *private, int ocflags
)
394 struct mbuf
*m
= obj
;
398 m
->m_data
= m
->m_dat
;
405 * Initialize the mbuf and the packet header fields.
408 mbufphdr_ctor(void *obj
, void *private, int ocflags
)
410 struct mbuf
*m
= obj
;
414 m
->m_data
= m
->m_pktdat
;
415 m
->m_flags
= M_PKTHDR
| M_PHCACHE
;
417 m
->m_pkthdr
.rcvif
= NULL
; /* eliminate XXX JH */
418 SLIST_INIT(&m
->m_pkthdr
.tags
);
419 m
->m_pkthdr
.csum_flags
= 0; /* eliminate XXX JH */
420 m
->m_pkthdr
.fw_flags
= 0; /* eliminate XXX JH */
426 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
429 mclmeta_ctor(void *obj
, void *private, int ocflags
)
431 struct mbcluster
*cl
= obj
;
434 if (ocflags
& M_NOWAIT
)
435 buf
= kmalloc(MCLBYTES
, M_MBUFCL
, M_NOWAIT
| M_ZERO
);
437 buf
= kmalloc(MCLBYTES
, M_MBUFCL
, M_INTWAIT
| M_ZERO
);
446 mclmeta_dtor(void *obj
, void *private)
448 struct mbcluster
*mcl
= obj
;
450 KKASSERT(mcl
->mcl_refs
== 0);
451 kfree(mcl
->mcl_data
, M_MBUFCL
);
455 linkcluster(struct mbuf
*m
, struct mbcluster
*cl
)
458 * Add the cluster to the mbuf. The caller will detect that the
459 * mbuf now has an attached cluster.
461 m
->m_ext
.ext_arg
= cl
;
462 m
->m_ext
.ext_buf
= cl
->mcl_data
;
463 m
->m_ext
.ext_ref
= m_mclref
;
464 m
->m_ext
.ext_free
= m_mclfree
;
465 m
->m_ext
.ext_size
= MCLBYTES
;
466 atomic_add_int(&cl
->mcl_refs
, 1);
468 m
->m_data
= m
->m_ext
.ext_buf
;
469 m
->m_flags
|= M_EXT
| M_EXT_CLUSTER
;
473 mbufphdrcluster_ctor(void *obj
, void *private, int ocflags
)
475 struct mbuf
*m
= obj
;
476 struct mbcluster
*cl
;
478 mbufphdr_ctor(obj
, private, ocflags
);
479 cl
= objcache_get(mclmeta_cache
, ocflags
);
482 m
->m_flags
|= M_CLCACHE
;
488 mbufcluster_ctor(void *obj
, void *private, int ocflags
)
490 struct mbuf
*m
= obj
;
491 struct mbcluster
*cl
;
493 mbuf_ctor(obj
, private, ocflags
);
494 cl
= objcache_get(mclmeta_cache
, ocflags
);
497 m
->m_flags
|= M_CLCACHE
;
503 * Used for both the cluster and cluster PHDR caches.
505 * The mbuf may have lost its cluster due to sharing, deal
506 * with the situation by checking M_EXT.
509 mbufcluster_dtor(void *obj
, void *private)
511 struct mbuf
*m
= obj
;
512 struct mbcluster
*mcl
;
514 if (m
->m_flags
& M_EXT
) {
515 KKASSERT((m
->m_flags
& M_EXT_CLUSTER
) != 0);
516 mcl
= m
->m_ext
.ext_arg
;
517 KKASSERT(mcl
->mcl_refs
== 1);
519 objcache_put(mclmeta_cache
, mcl
);
523 struct objcache_malloc_args mbuf_malloc_args
= { MSIZE
, M_MBUF
};
524 struct objcache_malloc_args mclmeta_malloc_args
=
525 { sizeof(struct mbcluster
), M_MCLMETA
};
531 int mb_limit
, cl_limit
;
536 * Initialize statistics
538 for (i
= 0; i
< ncpus
; i
++) {
539 atomic_set_long_nonlocked(&mbstat
[i
].m_msize
, MSIZE
);
540 atomic_set_long_nonlocked(&mbstat
[i
].m_mclbytes
, MCLBYTES
);
541 atomic_set_long_nonlocked(&mbstat
[i
].m_minclsize
, MINCLSIZE
);
542 atomic_set_long_nonlocked(&mbstat
[i
].m_mlen
, MLEN
);
543 atomic_set_long_nonlocked(&mbstat
[i
].m_mhlen
, MHLEN
);
547 * Create objtect caches and save cluster limits, which will
548 * be used to adjust backing kmalloc pools' limit later.
551 mb_limit
= cl_limit
= 0;
554 mbuf_cache
= objcache_create("mbuf", &limit
, 0,
555 mbuf_ctor
, NULL
, NULL
,
556 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
560 mbufphdr_cache
= objcache_create("mbuf pkt hdr", &limit
, 64,
561 mbufphdr_ctor
, NULL
, NULL
,
562 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
565 cl_limit
= nmbclusters
;
566 mclmeta_cache
= objcache_create("cluster mbuf", &cl_limit
, 0,
567 mclmeta_ctor
, mclmeta_dtor
, NULL
,
568 objcache_malloc_alloc
, objcache_malloc_free
, &mclmeta_malloc_args
);
571 mbufcluster_cache
= objcache_create("mbuf + cluster", &limit
, 0,
572 mbufcluster_ctor
, mbufcluster_dtor
, NULL
,
573 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
577 mbufphdrcluster_cache
= objcache_create("mbuf pkt hdr + cluster",
578 &limit
, 64, mbufphdrcluster_ctor
, mbufcluster_dtor
, NULL
,
579 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
583 * Adjust backing kmalloc pools' limit
585 * NOTE: We raise the limit by another 1/8 to take the effect
586 * of loosememuse into account.
588 cl_limit
+= cl_limit
/ 8;
589 kmalloc_raise_limit(mclmeta_malloc_args
.mtype
,
590 mclmeta_malloc_args
.objsize
* cl_limit
);
591 kmalloc_raise_limit(M_MBUFCL
, MCLBYTES
* cl_limit
);
593 mb_limit
+= mb_limit
/ 8;
594 kmalloc_raise_limit(mbuf_malloc_args
.mtype
,
595 mbuf_malloc_args
.objsize
* mb_limit
);
599 * Return the number of references to this mbuf's data. 0 is returned
600 * if the mbuf is not M_EXT, a reference count is returned if it is
601 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
604 m_sharecount(struct mbuf
*m
)
606 switch (m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
)) {
611 case M_EXT
| M_EXT_CLUSTER
:
612 return (((struct mbcluster
*)m
->m_ext
.ext_arg
)->mcl_refs
);
615 return (0); /* to shut up compiler */
619 * change mbuf to new type
622 m_chtype(struct mbuf
*m
, int type
)
624 struct globaldata
*gd
= mycpu
;
626 atomic_add_long_nonlocked(&mbtypes
[gd
->gd_cpuid
][type
], 1);
627 atomic_subtract_long_nonlocked(&mbtypes
[gd
->gd_cpuid
][m
->m_type
], 1);
628 atomic_set_short_nonlocked(&m
->m_type
, type
);
638 SLIST_FOREACH(dp
, &domains
, dom_next
) {
639 for (pr
= dp
->dom_protosw
; pr
< dp
->dom_protoswNPROTOSW
; pr
++) {
645 atomic_add_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_drain
, 1);
649 updatestats(struct mbuf
*m
, int type
)
651 struct globaldata
*gd
= mycpu
;
656 atomic_add_long_nonlocked(&mbtypes
[gd
->gd_cpuid
][type
], 1);
657 atomic_add_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_mbufs
, 1);
665 m_get(int how
, int type
)
669 int ocf
= MBTOM(how
);
673 m
= objcache_get(mbuf_cache
, ocf
);
676 if ((how
& MB_TRYWAIT
) && ntries
++ == 0) {
677 struct objcache
*reclaimlist
[] = {
679 mbufcluster_cache
, mbufphdrcluster_cache
681 const int nreclaims
= __arysize(reclaimlist
);
683 if (!objcache_reclaimlist(reclaimlist
, nreclaims
, ocf
))
690 updatestats(m
, type
);
695 m_gethdr(int how
, int type
)
698 int ocf
= MBTOM(how
);
703 m
= objcache_get(mbufphdr_cache
, ocf
);
706 if ((how
& MB_TRYWAIT
) && ntries
++ == 0) {
707 struct objcache
*reclaimlist
[] = {
709 mbufcluster_cache
, mbufphdrcluster_cache
711 const int nreclaims
= __arysize(reclaimlist
);
713 if (!objcache_reclaimlist(reclaimlist
, nreclaims
, ocf
))
720 updatestats(m
, type
);
725 * Get a mbuf (not a mbuf cluster!) and zero it.
729 m_getclr(int how
, int type
)
733 m
= m_get(how
, type
);
735 bzero(m
->m_data
, MLEN
);
740 * Returns an mbuf with an attached cluster.
741 * Because many network drivers use this kind of buffers a lot, it is
742 * convenient to keep a small pool of free buffers of this kind.
743 * Even a small size such as 10 gives about 10% improvement in the
744 * forwarding rate in a bridge or router.
747 m_getcl(int how
, short type
, int flags
)
750 int ocflags
= MBTOM(how
);
755 if (flags
& M_PKTHDR
)
756 m
= objcache_get(mbufphdrcluster_cache
, ocflags
);
758 m
= objcache_get(mbufcluster_cache
, ocflags
);
761 if ((how
& MB_TRYWAIT
) && ntries
++ == 0) {
762 struct objcache
*reclaimlist
[1];
764 if (flags
& M_PKTHDR
)
765 reclaimlist
[0] = mbufcluster_cache
;
767 reclaimlist
[0] = mbufphdrcluster_cache
;
768 if (!objcache_reclaimlist(reclaimlist
, 1, ocflags
))
779 atomic_add_long_nonlocked(&mbtypes
[mycpu
->gd_cpuid
][type
], 1);
780 atomic_add_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_clusters
, 1);
785 * Allocate chain of requested length.
788 m_getc(int len
, int how
, int type
)
790 struct mbuf
*n
, *nfirst
= NULL
, **ntail
= &nfirst
;
794 n
= m_getl(len
, how
, type
, 0, &nsize
);
810 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
811 * and return a pointer to the head of the allocated chain. If m0 is
812 * non-null, then we assume that it is a single mbuf or an mbuf chain to
813 * which we want len bytes worth of mbufs and/or clusters attached, and so
814 * if we succeed in allocating it, we will just return a pointer to m0.
816 * If we happen to fail at any point during the allocation, we will free
817 * up everything we have already allocated and return NULL.
819 * Deprecated. Use m_getc() and m_cat() instead.
822 m_getm(struct mbuf
*m0
, int len
, int type
, int how
)
826 nfirst
= m_getc(len
, how
, type
);
829 m_last(m0
)->m_next
= nfirst
;
837 * Adds a cluster to a normal mbuf, M_EXT is set on success.
838 * Deprecated. Use m_getcl() instead.
841 m_mclget(struct mbuf
*m
, int how
)
843 struct mbcluster
*mcl
;
845 KKASSERT((m
->m_flags
& M_EXT
) == 0);
846 mcl
= objcache_get(mclmeta_cache
, MBTOM(how
));
849 atomic_add_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_clusters
, 1);
854 * Updates to mbcluster must be MPSAFE. Only an entity which already has
855 * a reference to the cluster can ref it, so we are in no danger of
856 * racing an add with a subtract. But the operation must still be atomic
857 * since multiple entities may have a reference on the cluster.
859 * m_mclfree() is almost the same but it must contend with two entities
860 * freeing the cluster at the same time. If there is only one reference
861 * count we are the only entity referencing the cluster and no further
862 * locking is required. Otherwise we must protect against a race to 0
863 * with the serializer.
868 struct mbcluster
*mcl
= arg
;
870 atomic_add_int(&mcl
->mcl_refs
, 1);
874 * When dereferencing a cluster we have to deal with a N->0 race, where
875 * N entities free their references simultaniously. To do this we use
876 * atomic_fetchadd_int().
881 struct mbcluster
*mcl
= arg
;
883 if (atomic_fetchadd_int(&mcl
->mcl_refs
, -1) == 1)
884 objcache_put(mclmeta_cache
, mcl
);
888 * Free a single mbuf and any associated external storage. The successor,
889 * if any, is returned.
891 * We do need to check non-first mbuf for m_aux, since some of existing
892 * code does not call M_PREPEND properly.
893 * (example: call to bpf_mtap from drivers)
896 m_free(struct mbuf
*m
)
899 struct globaldata
*gd
= mycpu
;
901 KASSERT(m
->m_type
!= MT_FREE
, ("freeing free mbuf %p", m
));
902 atomic_subtract_long_nonlocked(&mbtypes
[gd
->gd_cpuid
][m
->m_type
], 1);
907 * Make sure the mbuf is in constructed state before returning it
913 KKASSERT(m
->m_nextpkt
== NULL
);
915 if (m
->m_nextpkt
!= NULL
) {
916 static int afewtimes
= 10;
918 if (afewtimes
-- > 0) {
919 kprintf("mfree: m->m_nextpkt != NULL\n");
925 if (m
->m_flags
& M_PKTHDR
) {
926 m_tag_delete_chain(m
); /* eliminate XXX JH */
929 m
->m_flags
&= (M_EXT
| M_EXT_CLUSTER
| M_CLCACHE
| M_PHCACHE
);
932 * Clean the M_PKTHDR state so we can return the mbuf to its original
933 * cache. This is based on the PHCACHE flag which tells us whether
934 * the mbuf was originally allocated out of a packet-header cache
935 * or a non-packet-header cache.
937 if (m
->m_flags
& M_PHCACHE
) {
938 m
->m_flags
|= M_PKTHDR
;
939 m
->m_pkthdr
.rcvif
= NULL
; /* eliminate XXX JH */
940 m
->m_pkthdr
.csum_flags
= 0; /* eliminate XXX JH */
941 m
->m_pkthdr
.fw_flags
= 0; /* eliminate XXX JH */
942 SLIST_INIT(&m
->m_pkthdr
.tags
);
946 * Handle remaining flags combinations. M_CLCACHE tells us whether
947 * the mbuf was originally allocated from a cluster cache or not,
948 * and is totally separate from whether the mbuf is currently
949 * associated with a cluster.
952 switch(m
->m_flags
& (M_CLCACHE
| M_EXT
| M_EXT_CLUSTER
)) {
953 case M_CLCACHE
| M_EXT
| M_EXT_CLUSTER
:
955 * mbuf+cluster cache case. The mbuf was allocated from the
956 * combined mbuf_cluster cache and can be returned to the
957 * cache if the cluster hasn't been shared.
959 if (m_sharecount(m
) == 1) {
961 * The cluster has not been shared, we can just
962 * reset the data pointer and return the mbuf
963 * to the cluster cache. Note that the reference
964 * count is left intact (it is still associated with
967 m
->m_data
= m
->m_ext
.ext_buf
;
968 if (m
->m_flags
& M_PHCACHE
)
969 objcache_put(mbufphdrcluster_cache
, m
);
971 objcache_put(mbufcluster_cache
, m
);
972 atomic_subtract_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_clusters
, 1);
975 * Hell. Someone else has a ref on this cluster,
976 * we have to disconnect it which means we can't
977 * put it back into the mbufcluster_cache, we
978 * have to destroy the mbuf.
980 * Other mbuf references to the cluster will typically
981 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
983 * XXX we could try to connect another cluster to
986 m
->m_ext
.ext_free(m
->m_ext
.ext_arg
);
987 m
->m_flags
&= ~(M_EXT
| M_EXT_CLUSTER
);
988 if (m
->m_flags
& M_PHCACHE
)
989 objcache_dtor(mbufphdrcluster_cache
, m
);
991 objcache_dtor(mbufcluster_cache
, m
);
994 case M_EXT
| M_EXT_CLUSTER
:
996 * Normal cluster associated with an mbuf that was allocated
997 * from the normal mbuf pool rather then the cluster pool.
998 * The cluster has to be independantly disassociated from the
1001 if (m_sharecount(m
) == 1)
1002 atomic_subtract_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_clusters
, 1);
1006 * Normal cluster association case, disconnect the cluster from
1007 * the mbuf. The cluster may or may not be custom.
1009 m
->m_ext
.ext_free(m
->m_ext
.ext_arg
);
1010 m
->m_flags
&= ~(M_EXT
| M_EXT_CLUSTER
);
1014 * return the mbuf to the mbuf cache.
1016 if (m
->m_flags
& M_PHCACHE
) {
1017 m
->m_data
= m
->m_pktdat
;
1018 objcache_put(mbufphdr_cache
, m
);
1020 m
->m_data
= m
->m_dat
;
1021 objcache_put(mbuf_cache
, m
);
1023 atomic_subtract_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_mbufs
, 1);
1027 panic("bad mbuf flags %p %08x\n", m
, m
->m_flags
);
1035 m_freem(struct mbuf
*m
)
1044 * mbuf utility routines
1048 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1052 m_prepend(struct mbuf
*m
, int len
, int how
)
1056 if (m
->m_flags
& M_PKTHDR
)
1057 mn
= m_gethdr(how
, m
->m_type
);
1059 mn
= m_get(how
, m
->m_type
);
1064 if (m
->m_flags
& M_PKTHDR
)
1065 M_MOVE_PKTHDR(mn
, m
);
1075 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1076 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1077 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
1078 * Note that the copy is read-only, because clusters are not copied,
1079 * only their reference counts are incremented.
1082 m_copym(const struct mbuf
*m
, int off0
, int len
, int wait
)
1084 struct mbuf
*n
, **np
;
1089 KASSERT(off
>= 0, ("m_copym, negative off %d", off
));
1090 KASSERT(len
>= 0, ("m_copym, negative len %d", len
));
1091 if (off
== 0 && m
->m_flags
& M_PKTHDR
)
1094 KASSERT(m
!= NULL
, ("m_copym, offset > size of mbuf chain"));
1104 KASSERT(len
== M_COPYALL
,
1105 ("m_copym, length > size of mbuf chain"));
1109 * Because we are sharing any cluster attachment below,
1110 * be sure to get an mbuf that does not have a cluster
1111 * associated with it.
1114 n
= m_gethdr(wait
, m
->m_type
);
1116 n
= m_get(wait
, m
->m_type
);
1121 if (!m_dup_pkthdr(n
, m
, wait
))
1123 if (len
== M_COPYALL
)
1124 n
->m_pkthdr
.len
-= off0
;
1126 n
->m_pkthdr
.len
= len
;
1129 n
->m_len
= min(len
, m
->m_len
- off
);
1130 if (m
->m_flags
& M_EXT
) {
1131 KKASSERT((n
->m_flags
& M_EXT
) == 0);
1132 n
->m_data
= m
->m_data
+ off
;
1133 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
1134 n
->m_ext
= m
->m_ext
;
1135 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
1137 bcopy(mtod(m
, caddr_t
)+off
, mtod(n
, caddr_t
),
1138 (unsigned)n
->m_len
);
1140 if (len
!= M_COPYALL
)
1147 atomic_add_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_mcfail
, 1);
1151 atomic_add_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_mcfail
, 1);
1156 * Copy an entire packet, including header (which must be present).
1157 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1158 * Note that the copy is read-only, because clusters are not copied,
1159 * only their reference counts are incremented.
1160 * Preserve alignment of the first mbuf so if the creator has left
1161 * some room at the beginning (e.g. for inserting protocol headers)
1162 * the copies also have the room available.
1165 m_copypacket(struct mbuf
*m
, int how
)
1167 struct mbuf
*top
, *n
, *o
;
1169 n
= m_gethdr(how
, m
->m_type
);
1174 if (!m_dup_pkthdr(n
, m
, how
))
1176 n
->m_len
= m
->m_len
;
1177 if (m
->m_flags
& M_EXT
) {
1178 KKASSERT((n
->m_flags
& M_EXT
) == 0);
1179 n
->m_data
= m
->m_data
;
1180 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
1181 n
->m_ext
= m
->m_ext
;
1182 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
1184 n
->m_data
= n
->m_pktdat
+ (m
->m_data
- m
->m_pktdat
);
1185 bcopy(mtod(m
, char *), mtod(n
, char *), n
->m_len
);
1190 o
= m_get(how
, m
->m_type
);
1197 n
->m_len
= m
->m_len
;
1198 if (m
->m_flags
& M_EXT
) {
1199 KKASSERT((n
->m_flags
& M_EXT
) == 0);
1200 n
->m_data
= m
->m_data
;
1201 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
1202 n
->m_ext
= m
->m_ext
;
1203 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
1205 bcopy(mtod(m
, char *), mtod(n
, char *), n
->m_len
);
1213 atomic_add_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_mcfail
, 1);
1218 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1219 * continuing for "len" bytes, into the indicated buffer.
1222 m_copydata(const struct mbuf
*m
, int off
, int len
, caddr_t cp
)
1226 KASSERT(off
>= 0, ("m_copydata, negative off %d", off
));
1227 KASSERT(len
>= 0, ("m_copydata, negative len %d", len
));
1229 KASSERT(m
!= NULL
, ("m_copydata, offset > size of mbuf chain"));
1236 KASSERT(m
!= NULL
, ("m_copydata, length > size of mbuf chain"));
1237 count
= min(m
->m_len
- off
, len
);
1238 bcopy(mtod(m
, caddr_t
) + off
, cp
, count
);
1247 * Copy a packet header mbuf chain into a completely new chain, including
1248 * copying any mbuf clusters. Use this instead of m_copypacket() when
1249 * you need a writable copy of an mbuf chain.
1252 m_dup(struct mbuf
*m
, int how
)
1254 struct mbuf
**p
, *top
= NULL
;
1255 int remain
, moff
, nsize
;
1260 KASSERT((m
->m_flags
& M_PKTHDR
) != 0, ("%s: !PKTHDR", __func__
));
1262 /* While there's more data, get a new mbuf, tack it on, and fill it */
1263 remain
= m
->m_pkthdr
.len
;
1266 while (remain
> 0 || top
== NULL
) { /* allow m->m_pkthdr.len == 0 */
1269 /* Get the next new mbuf */
1270 n
= m_getl(remain
, how
, m
->m_type
, top
== NULL
? M_PKTHDR
: 0,
1275 if (!m_dup_pkthdr(n
, m
, how
))
1278 /* Link it into the new chain */
1282 /* Copy data from original mbuf(s) into new mbuf */
1284 while (n
->m_len
< nsize
&& m
!= NULL
) {
1285 int chunk
= min(nsize
- n
->m_len
, m
->m_len
- moff
);
1287 bcopy(m
->m_data
+ moff
, n
->m_data
+ n
->m_len
, chunk
);
1291 if (moff
== m
->m_len
) {
1297 /* Check correct total mbuf length */
1298 KASSERT((remain
> 0 && m
!= NULL
) || (remain
== 0 && m
== NULL
),
1299 ("%s: bogus m_pkthdr.len", __func__
));
1306 atomic_add_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_mcfail
, 1);
1311 * Concatenate mbuf chain n to m.
1312 * Both chains must be of the same type (e.g. MT_DATA).
1313 * Any m_pkthdr is not updated.
1316 m_cat(struct mbuf
*m
, struct mbuf
*n
)
1320 if (m
->m_flags
& M_EXT
||
1321 m
->m_data
+ m
->m_len
+ n
->m_len
>= &m
->m_dat
[MLEN
]) {
1322 /* just join the two chains */
1326 /* splat the data from one into the other */
1327 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
1329 m
->m_len
+= n
->m_len
;
1335 m_adj(struct mbuf
*mp
, int req_len
)
1341 if ((m
= mp
) == NULL
)
1347 while (m
!= NULL
&& len
> 0) {
1348 if (m
->m_len
<= len
) {
1359 if (mp
->m_flags
& M_PKTHDR
)
1360 m
->m_pkthdr
.len
-= (req_len
- len
);
1363 * Trim from tail. Scan the mbuf chain,
1364 * calculating its length and finding the last mbuf.
1365 * If the adjustment only affects this mbuf, then just
1366 * adjust and return. Otherwise, rescan and truncate
1367 * after the remaining size.
1373 if (m
->m_next
== NULL
)
1377 if (m
->m_len
>= len
) {
1379 if (mp
->m_flags
& M_PKTHDR
)
1380 mp
->m_pkthdr
.len
-= len
;
1387 * Correct length for chain is "count".
1388 * Find the mbuf with last data, adjust its length,
1389 * and toss data from remaining mbufs on chain.
1392 if (m
->m_flags
& M_PKTHDR
)
1393 m
->m_pkthdr
.len
= count
;
1394 for (; m
; m
= m
->m_next
) {
1395 if (m
->m_len
>= count
) {
1402 (m
= m
->m_next
) ->m_len
= 0;
1407 * Rearrange an mbuf chain so that len bytes are contiguous
1408 * and in the data area of an mbuf (so that mtod will work for a structure
1409 * of size len). Returns the resulting mbuf chain on success, frees it and
1410 * returns null on failure. If there is room, it will add up to
1411 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1412 * avoid being called next time.
1415 m_pullup(struct mbuf
*n
, int len
)
1422 * If first mbuf has no cluster, and has room for len bytes
1423 * without shifting current data, pullup into it,
1424 * otherwise allocate a new mbuf to prepend to the chain.
1426 if (!(n
->m_flags
& M_EXT
) &&
1427 n
->m_data
+ len
< &n
->m_dat
[MLEN
] &&
1429 if (n
->m_len
>= len
)
1437 if (n
->m_flags
& M_PKTHDR
)
1438 m
= m_gethdr(MB_DONTWAIT
, n
->m_type
);
1440 m
= m_get(MB_DONTWAIT
, n
->m_type
);
1444 if (n
->m_flags
& M_PKTHDR
)
1445 M_MOVE_PKTHDR(m
, n
);
1447 space
= &m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
);
1449 count
= min(min(max(len
, max_protohdr
), space
), n
->m_len
);
1450 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
1460 } while (len
> 0 && n
);
1469 atomic_add_long_nonlocked(&mbstat
[mycpu
->gd_cpuid
].m_mcfail
, 1);
1474 * Partition an mbuf chain in two pieces, returning the tail --
1475 * all but the first len0 bytes. In case of failure, it returns NULL and
1476 * attempts to restore the chain to its original state.
1478 * Note that the resulting mbufs might be read-only, because the new
1479 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1480 * the "breaking point" happens to lie within a cluster mbuf. Use the
1481 * M_WRITABLE() macro to check for this case.
1484 m_split(struct mbuf
*m0
, int len0
, int wait
)
1487 unsigned len
= len0
, remain
;
1489 for (m
= m0
; m
&& len
> m
->m_len
; m
= m
->m_next
)
1493 remain
= m
->m_len
- len
;
1494 if (m0
->m_flags
& M_PKTHDR
) {
1495 n
= m_gethdr(wait
, m0
->m_type
);
1498 n
->m_pkthdr
.rcvif
= m0
->m_pkthdr
.rcvif
;
1499 n
->m_pkthdr
.len
= m0
->m_pkthdr
.len
- len0
;
1500 m0
->m_pkthdr
.len
= len0
;
1501 if (m
->m_flags
& M_EXT
)
1503 if (remain
> MHLEN
) {
1504 /* m can't be the lead packet */
1506 n
->m_next
= m_split(m
, len
, wait
);
1507 if (n
->m_next
== NULL
) {
1515 MH_ALIGN(n
, remain
);
1516 } else if (remain
== 0) {
1521 n
= m_get(wait
, m
->m_type
);
1527 if (m
->m_flags
& M_EXT
) {
1528 KKASSERT((n
->m_flags
& M_EXT
) == 0);
1529 n
->m_data
= m
->m_data
+ len
;
1530 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
1531 n
->m_ext
= m
->m_ext
;
1532 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
1534 bcopy(mtod(m
, caddr_t
) + len
, mtod(n
, caddr_t
), remain
);
1538 n
->m_next
= m
->m_next
;
1544 * Routine to copy from device local memory into mbufs.
1545 * Note: "offset" is ill-defined and always called as 0, so ignore it.
1548 m_devget(char *buf
, int len
, int offset
, struct ifnet
*ifp
,
1549 void (*copy
)(volatile const void *from
, volatile void *to
, size_t length
))
1551 struct mbuf
*m
, *mfirst
= NULL
, **mtail
;
1560 m
= m_getl(len
, MB_DONTWAIT
, MT_DATA
, flags
, &nsize
);
1565 m
->m_len
= min(len
, nsize
);
1567 if (flags
& M_PKTHDR
) {
1568 if (len
+ max_linkhdr
<= nsize
)
1569 m
->m_data
+= max_linkhdr
;
1570 m
->m_pkthdr
.rcvif
= ifp
;
1571 m
->m_pkthdr
.len
= len
;
1575 copy(buf
, m
->m_data
, (unsigned)m
->m_len
);
1586 * Routine to pad mbuf to the specified length 'padto'.
1589 m_devpad(struct mbuf
*m
, int padto
)
1591 struct mbuf
*last
= NULL
;
1594 if (padto
<= m
->m_pkthdr
.len
)
1597 padlen
= padto
- m
->m_pkthdr
.len
;
1599 /* if there's only the packet-header and we can pad there, use it. */
1600 if (m
->m_pkthdr
.len
== m
->m_len
&& M_TRAILINGSPACE(m
) >= padlen
) {
1604 * Walk packet chain to find last mbuf. We will either
1605 * pad there, or append a new mbuf and pad it
1607 for (last
= m
; last
->m_next
!= NULL
; last
= last
->m_next
)
1610 /* `last' now points to last in chain. */
1611 if (M_TRAILINGSPACE(last
) < padlen
) {
1614 /* Allocate new empty mbuf, pad it. Compact later. */
1615 MGET(n
, MB_DONTWAIT
, MT_DATA
);
1623 KKASSERT(M_TRAILINGSPACE(last
) >= padlen
);
1624 KKASSERT(M_WRITABLE(last
));
1626 /* Now zero the pad area */
1627 bzero(mtod(last
, char *) + last
->m_len
, padlen
);
1628 last
->m_len
+= padlen
;
1629 m
->m_pkthdr
.len
+= padlen
;
1634 * Copy data from a buffer back into the indicated mbuf chain,
1635 * starting "off" bytes from the beginning, extending the mbuf
1636 * chain if necessary.
1639 m_copyback(struct mbuf
*m0
, int off
, int len
, caddr_t cp
)
1642 struct mbuf
*m
= m0
, *n
;
1647 while (off
> (mlen
= m
->m_len
)) {
1650 if (m
->m_next
== NULL
) {
1651 n
= m_getclr(MB_DONTWAIT
, m
->m_type
);
1654 n
->m_len
= min(MLEN
, len
+ off
);
1660 mlen
= min (m
->m_len
- off
, len
);
1661 bcopy(cp
, off
+ mtod(m
, caddr_t
), (unsigned)mlen
);
1669 if (m
->m_next
== NULL
) {
1670 n
= m_get(MB_DONTWAIT
, m
->m_type
);
1673 n
->m_len
= min(MLEN
, len
);
1678 out
: if (((m
= m0
)->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
< totlen
))
1679 m
->m_pkthdr
.len
= totlen
;
1683 m_print(const struct mbuf
*m
)
1686 const struct mbuf
*m2
;
1688 len
= m
->m_pkthdr
.len
;
1691 kprintf("%p %*D\n", m2
, m2
->m_len
, (u_char
*)m2
->m_data
, "-");
1699 * "Move" mbuf pkthdr from "from" to "to".
1700 * "from" must have M_PKTHDR set, and "to" must be empty.
1703 m_move_pkthdr(struct mbuf
*to
, struct mbuf
*from
)
1705 KASSERT((to
->m_flags
& M_PKTHDR
), ("m_move_pkthdr: not packet header"));
1707 to
->m_flags
|= from
->m_flags
& M_COPYFLAGS
;
1708 to
->m_pkthdr
= from
->m_pkthdr
; /* especially tags */
1709 SLIST_INIT(&from
->m_pkthdr
.tags
); /* purge tags from src */
1713 * Duplicate "from"'s mbuf pkthdr in "to".
1714 * "from" must have M_PKTHDR set, and "to" must be empty.
1715 * In particular, this does a deep copy of the packet tags.
1718 m_dup_pkthdr(struct mbuf
*to
, const struct mbuf
*from
, int how
)
1720 KASSERT((to
->m_flags
& M_PKTHDR
), ("m_dup_pkthdr: not packet header"));
1722 to
->m_flags
= (from
->m_flags
& M_COPYFLAGS
) |
1723 (to
->m_flags
& ~M_COPYFLAGS
);
1724 to
->m_pkthdr
= from
->m_pkthdr
;
1725 SLIST_INIT(&to
->m_pkthdr
.tags
);
1726 return (m_tag_copy_chain(to
, from
, how
));
1730 * Defragment a mbuf chain, returning the shortest possible
1731 * chain of mbufs and clusters. If allocation fails and
1732 * this cannot be completed, NULL will be returned, but
1733 * the passed in chain will be unchanged. Upon success,
1734 * the original chain will be freed, and the new chain
1737 * If a non-packet header is passed in, the original
1738 * mbuf (chain?) will be returned unharmed.
1740 * m_defrag_nofree doesn't free the passed in mbuf.
1743 m_defrag(struct mbuf
*m0
, int how
)
1747 if ((m_new
= m_defrag_nofree(m0
, how
)) == NULL
)
1755 m_defrag_nofree(struct mbuf
*m0
, int how
)
1757 struct mbuf
*m_new
= NULL
, *m_final
= NULL
;
1758 int progress
= 0, length
, nsize
;
1760 if (!(m0
->m_flags
& M_PKTHDR
))
1763 #ifdef MBUF_STRESS_TEST
1764 if (m_defragrandomfailures
) {
1765 int temp
= karc4random() & 0xff;
1771 m_final
= m_getl(m0
->m_pkthdr
.len
, how
, MT_DATA
, M_PKTHDR
, &nsize
);
1772 if (m_final
== NULL
)
1774 m_final
->m_len
= 0; /* in case m0->m_pkthdr.len is zero */
1776 if (m_dup_pkthdr(m_final
, m0
, how
) == 0)
1781 while (progress
< m0
->m_pkthdr
.len
) {
1782 length
= m0
->m_pkthdr
.len
- progress
;
1783 if (length
> MCLBYTES
)
1786 if (m_new
== NULL
) {
1787 m_new
= m_getl(length
, how
, MT_DATA
, 0, &nsize
);
1792 m_copydata(m0
, progress
, length
, mtod(m_new
, caddr_t
));
1794 m_new
->m_len
= length
;
1795 if (m_new
!= m_final
)
1796 m_cat(m_final
, m_new
);
1799 if (m0
->m_next
== NULL
)
1802 m_defragbytes
+= m_final
->m_pkthdr
.len
;
1813 * Move data from uio into mbufs.
1816 m_uiomove(struct uio
*uio
)
1818 struct mbuf
*m
; /* current working mbuf */
1819 struct mbuf
*head
= NULL
; /* result mbuf chain */
1820 struct mbuf
**mp
= &head
;
1821 int flags
= M_PKTHDR
;
1827 if (uio
->uio_resid
> INT_MAX
)
1830 resid
= (int)uio
->uio_resid
;
1831 m
= m_getl(resid
, MB_WAIT
, MT_DATA
, flags
, &nsize
);
1833 m
->m_pkthdr
.len
= 0;
1834 /* Leave room for protocol headers. */
1839 m
->m_len
= imin(nsize
, resid
);
1840 error
= uiomove(mtod(m
, caddr_t
), m
->m_len
, uio
);
1847 head
->m_pkthdr
.len
+= m
->m_len
;
1848 } while (uio
->uio_resid
> 0);
1858 m_last(struct mbuf
*m
)
1866 * Return the number of bytes in an mbuf chain.
1867 * If lastm is not NULL, also return the last mbuf.
1870 m_lengthm(struct mbuf
*m
, struct mbuf
**lastm
)
1873 struct mbuf
*prev
= m
;
1886 * Like m_lengthm(), except also keep track of mbuf usage.
1889 m_countm(struct mbuf
*m
, struct mbuf
**lastm
, u_int
*pmbcnt
)
1891 u_int len
= 0, mbcnt
= 0;
1892 struct mbuf
*prev
= m
;
1897 if (m
->m_flags
& M_EXT
)
1898 mbcnt
+= m
->m_ext
.ext_size
;