4 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
5 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
7 * This code is derived from software contributed to The DragonFly Project
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of The DragonFly Project nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific, prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
28 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
30 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
31 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
37 * Copyright (c) 1982, 1986, 1988, 1991, 1993
38 * The Regents of the University of California. All rights reserved.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
65 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
68 #include "opt_param.h"
69 #include "opt_mbuf_stress_test.h"
70 #include <sys/param.h>
71 #include <sys/systm.h>
73 #include <sys/malloc.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/domain.h>
78 #include <sys/objcache.h>
80 #include <sys/protosw.h>
82 #include <sys/thread.h>
84 #include <sys/globaldata.h>
86 #include <sys/thread2.h>
87 #include <sys/spinlock2.h>
89 #include <machine/atomic.h>
90 #include <machine/limits.h>
93 #include <vm/vm_kern.h>
94 #include <vm/vm_extern.h>
97 #include <machine/cpu.h>
101 * mbuf cluster meta-data
109 * mbuf tracking for debugging purposes
113 static MALLOC_DEFINE(M_MTRACK
, "mtrack", "mtrack");
116 RB_HEAD(mbuf_rb_tree
, mbtrack
);
117 RB_PROTOTYPE2(mbuf_rb_tree
, mbtrack
, rb_node
, mbtrack_cmp
, struct mbuf
*);
120 RB_ENTRY(mbtrack
) rb_node
;
126 mbtrack_cmp(struct mbtrack
*mb1
, struct mbtrack
*mb2
)
135 RB_GENERATE2(mbuf_rb_tree
, mbtrack
, rb_node
, mbtrack_cmp
, struct mbuf
*, m
);
137 struct mbuf_rb_tree mbuf_track_root
;
138 static struct spinlock mbuf_track_spin
= SPINLOCK_INITIALIZER(mbuf_track_spin
, "mbuf_track_spin");
141 mbuftrack(struct mbuf
*m
)
145 mbt
= kmalloc(sizeof(*mbt
), M_MTRACK
, M_INTWAIT
|M_ZERO
);
146 spin_lock(&mbuf_track_spin
);
148 if (mbuf_rb_tree_RB_INSERT(&mbuf_track_root
, mbt
)) {
149 spin_unlock(&mbuf_track_spin
);
150 panic("mbuftrack: mbuf %p already being tracked", m
);
152 spin_unlock(&mbuf_track_spin
);
156 mbufuntrack(struct mbuf
*m
)
160 spin_lock(&mbuf_track_spin
);
161 mbt
= mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root
, m
);
163 spin_unlock(&mbuf_track_spin
);
164 panic("mbufuntrack: mbuf %p was not tracked", m
);
166 mbuf_rb_tree_RB_REMOVE(&mbuf_track_root
, mbt
);
167 spin_unlock(&mbuf_track_spin
);
168 kfree(mbt
, M_MTRACK
);
173 mbuftrackid(struct mbuf
*m
, int trackid
)
178 spin_lock(&mbuf_track_spin
);
182 mbt
= mbuf_rb_tree_RB_LOOKUP(&mbuf_track_root
, m
);
184 spin_unlock(&mbuf_track_spin
);
185 panic("mbuftrackid: mbuf %p not tracked", m
);
187 mbt
->trackid
= trackid
;
192 spin_unlock(&mbuf_track_spin
);
196 mbuftrack_callback(struct mbtrack
*mbt
, void *arg
)
198 struct sysctl_req
*req
= arg
;
202 ksnprintf(buf
, sizeof(buf
), "mbuf %p track %d\n", mbt
->m
, mbt
->trackid
);
204 spin_unlock(&mbuf_track_spin
);
205 error
= SYSCTL_OUT(req
, buf
, strlen(buf
));
206 spin_lock(&mbuf_track_spin
);
213 mbuftrack_show(SYSCTL_HANDLER_ARGS
)
217 spin_lock(&mbuf_track_spin
);
218 error
= mbuf_rb_tree_RB_SCAN(&mbuf_track_root
, NULL
,
219 mbuftrack_callback
, req
);
220 spin_unlock(&mbuf_track_spin
);
223 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, showmbufs
, CTLFLAG_RD
|CTLTYPE_STRING
,
224 0, 0, mbuftrack_show
, "A", "Show all in-use mbufs");
229 #define mbufuntrack(m)
233 static void mbinit(void *);
234 SYSINIT(mbuf
, SI_BOOT2_MACHDEP
, SI_ORDER_FIRST
, mbinit
, NULL
);
236 struct mbtypes_stat
{
237 u_long stats
[MT_NTYPES
];
240 static struct mbtypes_stat mbtypes
[SMP_MAXCPU
];
242 static struct mbstat mbstat
[SMP_MAXCPU
] __cachealign
;
251 #ifdef MBUF_STRESS_TEST
252 int m_defragrandomfailures
;
255 struct objcache
*mbuf_cache
, *mbufphdr_cache
;
256 struct objcache
*mclmeta_cache
, *mjclmeta_cache
;
257 struct objcache
*mbufcluster_cache
, *mbufphdrcluster_cache
;
258 struct objcache
*mbufjcluster_cache
, *mbufphdrjcluster_cache
;
260 struct lock mbupdate_lk
= LOCK_INITIALIZER("mbupdate", 0, LK_CANRECURSE
);
263 static int nmbjclusters
;
266 static int mjclph_cachefrac
;
267 static int mjcl_cachefrac
;
268 static int mclph_cachefrac
;
269 static int mcl_cachefrac
;
271 SYSCTL_INT(_kern_ipc
, KIPC_MAX_LINKHDR
, max_linkhdr
, CTLFLAG_RW
,
272 &max_linkhdr
, 0, "Max size of a link-level header");
273 SYSCTL_INT(_kern_ipc
, KIPC_MAX_PROTOHDR
, max_protohdr
, CTLFLAG_RW
,
274 &max_protohdr
, 0, "Max size of a protocol header");
275 SYSCTL_INT(_kern_ipc
, KIPC_MAX_HDR
, max_hdr
, CTLFLAG_RW
, &max_hdr
, 0,
276 "Max size of link+protocol headers");
277 SYSCTL_INT(_kern_ipc
, KIPC_MAX_DATALEN
, max_datalen
, CTLFLAG_RW
,
278 &max_datalen
, 0, "Max data payload size without headers");
279 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mbuf_wait
, CTLFLAG_RW
,
280 &mbuf_wait
, 0, "Time in ticks to sleep after failed mbuf allocations");
281 static int do_mbstat(SYSCTL_HANDLER_ARGS
);
283 SYSCTL_PROC(_kern_ipc
, KIPC_MBSTAT
, mbstat
, CTLTYPE_STRUCT
|CTLFLAG_RD
,
284 0, 0, do_mbstat
, "S,mbstat", "mbuf usage statistics");
286 static int do_mbtypes(SYSCTL_HANDLER_ARGS
);
288 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, mbtypes
, CTLTYPE_ULONG
|CTLFLAG_RD
,
289 0, 0, do_mbtypes
, "LU", "");
292 do_mbstat(SYSCTL_HANDLER_ARGS
)
294 struct mbstat mbstat_total
;
295 struct mbstat
*mbstat_totalp
;
298 bzero(&mbstat_total
, sizeof(mbstat_total
));
299 mbstat_totalp
= &mbstat_total
;
301 for (i
= 0; i
< ncpus
; i
++) {
302 mbstat_total
.m_mbufs
+= mbstat
[i
].m_mbufs
;
303 mbstat_total
.m_clusters
+= mbstat
[i
].m_clusters
;
304 mbstat_total
.m_jclusters
+= mbstat
[i
].m_jclusters
;
305 mbstat_total
.m_clfree
+= mbstat
[i
].m_clfree
;
306 mbstat_total
.m_drops
+= mbstat
[i
].m_drops
;
307 mbstat_total
.m_wait
+= mbstat
[i
].m_wait
;
308 mbstat_total
.m_drain
+= mbstat
[i
].m_drain
;
309 mbstat_total
.m_mcfail
+= mbstat
[i
].m_mcfail
;
310 mbstat_total
.m_mpfail
+= mbstat
[i
].m_mpfail
;
314 * The following fields are not cumulative fields so just
315 * get their values once.
317 mbstat_total
.m_msize
= mbstat
[0].m_msize
;
318 mbstat_total
.m_mclbytes
= mbstat
[0].m_mclbytes
;
319 mbstat_total
.m_minclsize
= mbstat
[0].m_minclsize
;
320 mbstat_total
.m_mlen
= mbstat
[0].m_mlen
;
321 mbstat_total
.m_mhlen
= mbstat
[0].m_mhlen
;
323 return(sysctl_handle_opaque(oidp
, mbstat_totalp
, sizeof(mbstat_total
), req
));
327 do_mbtypes(SYSCTL_HANDLER_ARGS
)
329 u_long totals
[MT_NTYPES
];
332 for (i
= 0; i
< MT_NTYPES
; i
++)
335 for (i
= 0; i
< ncpus
; i
++) {
336 for (j
= 0; j
< MT_NTYPES
; j
++)
337 totals
[j
] += mbtypes
[i
].stats
[j
];
340 return(sysctl_handle_opaque(oidp
, totals
, sizeof(totals
), req
));
344 * The variables may be set as boot-time tunables or live. Setting these
345 * values too low can deadlock your network. Network interfaces may also
346 * adjust nmbclusters and/or nmbjclusters to account for preloading the
349 static int sysctl_nmbclusters(SYSCTL_HANDLER_ARGS
);
350 static int sysctl_nmbjclusters(SYSCTL_HANDLER_ARGS
);
351 static int sysctl_nmbufs(SYSCTL_HANDLER_ARGS
);
352 SYSCTL_PROC(_kern_ipc
, KIPC_NMBCLUSTERS
, nmbclusters
, CTLTYPE_INT
| CTLFLAG_RW
,
353 0, 0, sysctl_nmbclusters
, "I",
354 "Maximum number of mbuf clusters available");
355 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, nmbjclusters
, CTLTYPE_INT
| CTLFLAG_RW
,
356 0, 0, sysctl_nmbjclusters
, "I",
357 "Maximum number of mbuf jclusters available");
358 SYSCTL_PROC(_kern_ipc
, OID_AUTO
, nmbufs
, CTLTYPE_INT
| CTLFLAG_RW
,
359 0, 0, sysctl_nmbufs
, "I",
360 "Maximum number of mbufs available");
362 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mjclph_cachefrac
, CTLFLAG_RD
,
363 &mjclph_cachefrac
, 0,
364 "Fraction of cacheable mbuf jclusters w/ pkthdr");
365 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mjcl_cachefrac
, CTLFLAG_RD
,
367 "Fraction of cacheable mbuf jclusters");
368 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mclph_cachefrac
, CTLFLAG_RD
,
370 "Fraction of cacheable mbuf clusters w/ pkthdr");
371 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mcl_cachefrac
, CTLFLAG_RD
,
372 &mcl_cachefrac
, 0, "Fraction of cacheable mbuf clusters");
374 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragpackets
, CTLFLAG_RD
,
375 &m_defragpackets
, 0, "Number of defragment packets");
376 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragbytes
, CTLFLAG_RD
,
377 &m_defragbytes
, 0, "Number of defragment bytes");
378 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defraguseless
, CTLFLAG_RD
,
379 &m_defraguseless
, 0, "Number of useless defragment mbuf chain operations");
380 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragfailure
, CTLFLAG_RD
,
381 &m_defragfailure
, 0, "Number of failed defragment mbuf chain operations");
382 #ifdef MBUF_STRESS_TEST
383 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragrandomfailures
, CTLFLAG_RW
,
384 &m_defragrandomfailures
, 0, "");
387 static MALLOC_DEFINE(M_MBUF
, "mbuf", "mbuf");
388 static MALLOC_DEFINE(M_MBUFCL
, "mbufcl", "mbufcl");
389 static MALLOC_DEFINE(M_MCLMETA
, "mclmeta", "mclmeta");
391 static void m_reclaim (void);
392 static void m_mclref(void *arg
);
393 static void m_mclfree(void *arg
);
394 static void m_mjclfree(void *arg
);
396 static void mbupdatelimits(void);
399 * Generally scale default mbufs to maxproc.
401 * NOTE: Default NMBUFS must take into account a possible DOS attack
402 * using fd passing on unix domain sockets.
405 #define NMBCLUSTERS (512 + maxproc * 4)
407 #ifndef BASE_CACHEFRAC
408 #define BASE_CACHEFRAC 16
410 #ifndef MJCLPH_CACHEFRAC
411 #define MJCLPH_CACHEFRAC (BASE_CACHEFRAC * 2)
413 #ifndef MJCL_CACHEFRAC
414 #define MJCL_CACHEFRAC (BASE_CACHEFRAC * 2)
416 #ifndef MCLPH_CACHEFRAC
417 #define MCLPH_CACHEFRAC (BASE_CACHEFRAC * 2)
419 #ifndef MCL_CACHEFRAC
420 #define MCL_CACHEFRAC (BASE_CACHEFRAC * 2)
423 #define NMBJCLUSTERS (NMBCLUSTERS / 4)
426 #define NMBUFS (nmbclusters / 2 + maxfiles)
429 #define NMBCLUSTERS_MIN (NMBCLUSTERS / 2)
430 #define NMBJCLUSTERS_MIN (NMBJCLUSTERS / 2)
431 #define NMBUFS_MIN (NMBUFS / 2)
434 * Perform sanity checks of tunables declared above.
437 tunable_mbinit(void *dummy
)
440 * This has to be done before VM init.
442 nmbclusters
= NMBCLUSTERS
;
443 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters
);
444 mjclph_cachefrac
= MJCLPH_CACHEFRAC
;
445 TUNABLE_INT_FETCH("kern.ipc.mjclph_cachefrac", &mjclph_cachefrac
);
446 mjcl_cachefrac
= MJCL_CACHEFRAC
;
447 TUNABLE_INT_FETCH("kern.ipc.mjcl_cachefrac", &mjcl_cachefrac
);
448 mclph_cachefrac
= MCLPH_CACHEFRAC
;
449 TUNABLE_INT_FETCH("kern.ipc.mclph_cachefrac", &mclph_cachefrac
);
450 mcl_cachefrac
= MCL_CACHEFRAC
;
451 TUNABLE_INT_FETCH("kern.ipc.mcl_cachefrac", &mcl_cachefrac
);
454 * WARNING! each mcl cache feeds two mbuf caches, so the minimum
455 * cachefrac is 2. For safety, use 3.
457 if (mjclph_cachefrac
< 3)
458 mjclph_cachefrac
= 3;
459 if (mjcl_cachefrac
< 3)
461 if (mclph_cachefrac
< 3)
463 if (mcl_cachefrac
< 3)
466 nmbjclusters
= NMBJCLUSTERS
;
467 TUNABLE_INT_FETCH("kern.ipc.nmbjclusters", &nmbjclusters
);
470 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs
);
473 if (nmbufs
< nmbclusters
* 2)
474 nmbufs
= nmbclusters
* 2;
476 SYSINIT(tunable_mbinit
, SI_BOOT1_TUNABLES
, SI_ORDER_ANY
,
477 tunable_mbinit
, NULL
);
480 mbinclimit(int *limit
, int inc
, int minlim
)
484 lockmgr(&mbupdate_lk
, LK_EXCLUSIVE
);
486 new_limit
= *limit
+ inc
;
487 if (new_limit
< minlim
)
490 if (*limit
!= new_limit
) {
495 lockmgr(&mbupdate_lk
, LK_RELEASE
);
499 mbsetlimit(int *limit
, int new_limit
, int minlim
)
501 if (new_limit
< minlim
)
504 lockmgr(&mbupdate_lk
, LK_EXCLUSIVE
);
505 mbinclimit(limit
, new_limit
- *limit
, minlim
);
506 lockmgr(&mbupdate_lk
, LK_RELEASE
);
511 sysctl_mblimit(SYSCTL_HANDLER_ARGS
, int *limit
, int minlim
)
516 error
= sysctl_handle_int(oidp
, &value
, 0, req
);
517 if (error
|| req
->newptr
== NULL
)
520 return mbsetlimit(limit
, value
, minlim
);
524 * Sysctl support to update nmbclusters, nmbjclusters, and nmbufs.
527 sysctl_nmbclusters(SYSCTL_HANDLER_ARGS
)
529 return sysctl_mblimit(oidp
, arg1
, arg2
, req
, &nmbclusters
,
534 sysctl_nmbjclusters(SYSCTL_HANDLER_ARGS
)
536 return sysctl_mblimit(oidp
, arg1
, arg2
, req
, &nmbjclusters
,
541 sysctl_nmbufs(SYSCTL_HANDLER_ARGS
)
543 return sysctl_mblimit(oidp
, arg1
, arg2
, req
, &nmbufs
, NMBUFS_MIN
);
547 mcl_inclimit(int inc
)
549 mbinclimit(&nmbclusters
, inc
, NMBCLUSTERS_MIN
);
553 mjcl_inclimit(int inc
)
555 mbinclimit(&nmbjclusters
, inc
, NMBJCLUSTERS_MIN
);
561 mbinclimit(&nmbufs
, inc
, NMBUFS_MIN
);
564 /* "number of clusters of pages" */
570 * The mbuf object cache only guarantees that m_next and m_nextpkt are
571 * NULL and that m_data points to the beginning of the data area. In
572 * particular, m_len and m_pkthdr.len are uninitialized. It is the
573 * responsibility of the caller to initialize those fields before use.
575 static __inline boolean_t
576 mbuf_ctor(void *obj
, void *private, int ocflags
)
578 struct mbuf
*m
= obj
;
582 m
->m_data
= m
->m_dat
;
589 * Initialize the mbuf and the packet header fields.
592 mbufphdr_ctor(void *obj
, void *private, int ocflags
)
594 struct mbuf
*m
= obj
;
598 m
->m_data
= m
->m_pktdat
;
599 m
->m_flags
= M_PKTHDR
| M_PHCACHE
;
601 m
->m_pkthdr
.rcvif
= NULL
; /* eliminate XXX JH */
602 SLIST_INIT(&m
->m_pkthdr
.tags
);
603 m
->m_pkthdr
.csum_flags
= 0; /* eliminate XXX JH */
604 m
->m_pkthdr
.fw_flags
= 0; /* eliminate XXX JH */
610 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
613 mclmeta_ctor(void *obj
, void *private, int ocflags
)
615 struct mbcluster
*cl
= obj
;
618 if (ocflags
& M_NOWAIT
)
619 buf
= kmalloc(MCLBYTES
, M_MBUFCL
, M_NOWAIT
| M_ZERO
);
621 buf
= kmalloc(MCLBYTES
, M_MBUFCL
, M_INTWAIT
| M_ZERO
);
630 mjclmeta_ctor(void *obj
, void *private, int ocflags
)
632 struct mbcluster
*cl
= obj
;
635 if (ocflags
& M_NOWAIT
)
636 buf
= kmalloc(MJUMPAGESIZE
, M_MBUFCL
, M_NOWAIT
| M_ZERO
);
638 buf
= kmalloc(MJUMPAGESIZE
, M_MBUFCL
, M_INTWAIT
| M_ZERO
);
647 mclmeta_dtor(void *obj
, void *private)
649 struct mbcluster
*mcl
= obj
;
651 KKASSERT(mcl
->mcl_refs
== 0);
652 kfree(mcl
->mcl_data
, M_MBUFCL
);
656 linkjcluster(struct mbuf
*m
, struct mbcluster
*cl
, uint size
)
659 * Add the cluster to the mbuf. The caller will detect that the
660 * mbuf now has an attached cluster.
662 m
->m_ext
.ext_arg
= cl
;
663 m
->m_ext
.ext_buf
= cl
->mcl_data
;
664 m
->m_ext
.ext_ref
= m_mclref
;
665 if (size
!= MCLBYTES
)
666 m
->m_ext
.ext_free
= m_mjclfree
;
668 m
->m_ext
.ext_free
= m_mclfree
;
669 m
->m_ext
.ext_size
= size
;
670 atomic_add_int(&cl
->mcl_refs
, 1);
672 m
->m_data
= m
->m_ext
.ext_buf
;
673 m
->m_flags
|= M_EXT
| M_EXT_CLUSTER
;
677 linkcluster(struct mbuf
*m
, struct mbcluster
*cl
)
679 linkjcluster(m
, cl
, MCLBYTES
);
683 mbufphdrcluster_ctor(void *obj
, void *private, int ocflags
)
685 struct mbuf
*m
= obj
;
686 struct mbcluster
*cl
;
688 mbufphdr_ctor(obj
, private, ocflags
);
689 cl
= objcache_get(mclmeta_cache
, ocflags
);
691 ++mbstat
[mycpu
->gd_cpuid
].m_drops
;
694 m
->m_flags
|= M_CLCACHE
;
700 mbufphdrjcluster_ctor(void *obj
, void *private, int ocflags
)
702 struct mbuf
*m
= obj
;
703 struct mbcluster
*cl
;
705 mbufphdr_ctor(obj
, private, ocflags
);
706 cl
= objcache_get(mjclmeta_cache
, ocflags
);
708 ++mbstat
[mycpu
->gd_cpuid
].m_drops
;
711 m
->m_flags
|= M_CLCACHE
;
712 linkjcluster(m
, cl
, MJUMPAGESIZE
);
717 mbufcluster_ctor(void *obj
, void *private, int ocflags
)
719 struct mbuf
*m
= obj
;
720 struct mbcluster
*cl
;
722 mbuf_ctor(obj
, private, ocflags
);
723 cl
= objcache_get(mclmeta_cache
, ocflags
);
725 ++mbstat
[mycpu
->gd_cpuid
].m_drops
;
728 m
->m_flags
|= M_CLCACHE
;
734 mbufjcluster_ctor(void *obj
, void *private, int ocflags
)
736 struct mbuf
*m
= obj
;
737 struct mbcluster
*cl
;
739 mbuf_ctor(obj
, private, ocflags
);
740 cl
= objcache_get(mjclmeta_cache
, ocflags
);
742 ++mbstat
[mycpu
->gd_cpuid
].m_drops
;
745 m
->m_flags
|= M_CLCACHE
;
746 linkjcluster(m
, cl
, MJUMPAGESIZE
);
751 * Used for both the cluster and cluster PHDR caches.
753 * The mbuf may have lost its cluster due to sharing, deal
754 * with the situation by checking M_EXT.
757 mbufcluster_dtor(void *obj
, void *private)
759 struct mbuf
*m
= obj
;
760 struct mbcluster
*mcl
;
762 if (m
->m_flags
& M_EXT
) {
763 KKASSERT((m
->m_flags
& M_EXT_CLUSTER
) != 0);
764 mcl
= m
->m_ext
.ext_arg
;
765 KKASSERT(mcl
->mcl_refs
== 1);
767 if (m
->m_flags
& M_EXT
&& m
->m_ext
.ext_size
!= MCLBYTES
)
768 objcache_put(mjclmeta_cache
, mcl
);
770 objcache_put(mclmeta_cache
, mcl
);
774 struct objcache_malloc_args mbuf_malloc_args
= { MSIZE
, M_MBUF
};
775 struct objcache_malloc_args mclmeta_malloc_args
=
776 { sizeof(struct mbcluster
), M_MCLMETA
};
782 int mb_limit
, cl_limit
, ncl_limit
, jcl_limit
;
787 * Initialize statistics
789 for (i
= 0; i
< ncpus
; i
++) {
790 mbstat
[i
].m_msize
= MSIZE
;
791 mbstat
[i
].m_mclbytes
= MCLBYTES
;
792 mbstat
[i
].m_mjumpagesize
= MJUMPAGESIZE
;
793 mbstat
[i
].m_minclsize
= MINCLSIZE
;
794 mbstat
[i
].m_mlen
= MLEN
;
795 mbstat
[i
].m_mhlen
= MHLEN
;
799 * Create object caches and save cluster limits, which will
800 * be used to adjust backing kmalloc pools' limit later.
803 mb_limit
= cl_limit
= 0;
806 mbuf_cache
= objcache_create("mbuf",
807 limit
, nmbufs
/ BASE_CACHEFRAC
,
808 mbuf_ctor
, NULL
, NULL
,
809 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
813 mbufphdr_cache
= objcache_create("mbuf pkt hdr",
814 limit
, nmbufs
/ BASE_CACHEFRAC
,
815 mbufphdr_ctor
, NULL
, NULL
,
816 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
819 ncl_limit
= nmbclusters
;
820 mclmeta_cache
= objcache_create("cluster mbuf",
821 ncl_limit
, nmbclusters
/ BASE_CACHEFRAC
,
822 mclmeta_ctor
, mclmeta_dtor
, NULL
,
823 objcache_malloc_alloc
, objcache_malloc_free
, &mclmeta_malloc_args
);
824 cl_limit
+= ncl_limit
;
826 jcl_limit
= nmbjclusters
;
827 mjclmeta_cache
= objcache_create("jcluster mbuf",
828 jcl_limit
, nmbjclusters
/ BASE_CACHEFRAC
,
829 mjclmeta_ctor
, mclmeta_dtor
, NULL
,
830 objcache_malloc_alloc
, objcache_malloc_free
, &mclmeta_malloc_args
);
831 cl_limit
+= jcl_limit
;
834 mbufcluster_cache
= objcache_create("mbuf + cluster",
835 limit
, nmbclusters
/ mcl_cachefrac
,
836 mbufcluster_ctor
, mbufcluster_dtor
, NULL
,
837 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
841 mbufphdrcluster_cache
= objcache_create("mbuf pkt hdr + cluster",
842 limit
, nmbclusters
/ mclph_cachefrac
,
843 mbufphdrcluster_ctor
, mbufcluster_dtor
, NULL
,
844 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
847 limit
= nmbjclusters
;
848 mbufjcluster_cache
= objcache_create("mbuf + jcluster",
849 limit
, nmbjclusters
/ mjcl_cachefrac
,
850 mbufjcluster_ctor
, mbufcluster_dtor
, NULL
,
851 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
854 limit
= nmbjclusters
;
855 mbufphdrjcluster_cache
= objcache_create("mbuf pkt hdr + jcluster",
856 limit
, nmbjclusters
/ mjclph_cachefrac
,
857 mbufphdrjcluster_ctor
, mbufcluster_dtor
, NULL
,
858 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
862 * Adjust backing kmalloc pools' limit
864 * NOTE: We raise the limit by another 1/8 to take the effect
865 * of loosememuse into account.
867 cl_limit
+= cl_limit
/ 8;
868 kmalloc_raise_limit(mclmeta_malloc_args
.mtype
,
869 mclmeta_malloc_args
.objsize
* (size_t)cl_limit
);
870 kmalloc_raise_limit(M_MBUFCL
,
871 (MCLBYTES
* (size_t)ncl_limit
) +
872 (MJUMPAGESIZE
* (size_t)jcl_limit
));
874 mb_limit
+= mb_limit
/ 8;
875 kmalloc_raise_limit(mbuf_malloc_args
.mtype
,
876 mbuf_malloc_args
.objsize
* (size_t)mb_limit
);
880 * Adjust mbuf limits after changes have been made
882 * Caller must hold mbupdate_lk
887 int mb_limit
, cl_limit
, ncl_limit
, jcl_limit
;
890 KASSERT(lockstatus(&mbupdate_lk
, curthread
) != 0,
891 ("mbupdate_lk is not held"));
894 * Figure out adjustments to object caches after nmbufs, nmbclusters,
895 * or nmbjclusters has been modified.
897 mb_limit
= cl_limit
= 0;
900 objcache_set_cluster_limit(mbuf_cache
, limit
);
904 objcache_set_cluster_limit(mbufphdr_cache
, limit
);
907 ncl_limit
= nmbclusters
;
908 objcache_set_cluster_limit(mclmeta_cache
, ncl_limit
);
909 cl_limit
+= ncl_limit
;
911 jcl_limit
= nmbjclusters
;
912 objcache_set_cluster_limit(mjclmeta_cache
, jcl_limit
);
913 cl_limit
+= jcl_limit
;
916 objcache_set_cluster_limit(mbufcluster_cache
, limit
);
920 objcache_set_cluster_limit(mbufphdrcluster_cache
, limit
);
923 limit
= nmbjclusters
;
924 objcache_set_cluster_limit(mbufjcluster_cache
, limit
);
927 limit
= nmbjclusters
;
928 objcache_set_cluster_limit(mbufphdrjcluster_cache
, limit
);
932 * Adjust backing kmalloc pools' limit
934 * NOTE: We raise the limit by another 1/8 to take the effect
935 * of loosememuse into account.
937 cl_limit
+= cl_limit
/ 8;
938 kmalloc_raise_limit(mclmeta_malloc_args
.mtype
,
939 mclmeta_malloc_args
.objsize
* (size_t)cl_limit
);
940 kmalloc_raise_limit(M_MBUFCL
,
941 (MCLBYTES
* (size_t)ncl_limit
) +
942 (MJUMPAGESIZE
* (size_t)jcl_limit
));
943 mb_limit
+= mb_limit
/ 8;
944 kmalloc_raise_limit(mbuf_malloc_args
.mtype
,
945 mbuf_malloc_args
.objsize
* (size_t)mb_limit
);
949 * Return the number of references to this mbuf's data. 0 is returned
950 * if the mbuf is not M_EXT, a reference count is returned if it is
951 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
954 m_sharecount(struct mbuf
*m
)
956 switch (m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
)) {
961 case M_EXT
| M_EXT_CLUSTER
:
962 return (((struct mbcluster
*)m
->m_ext
.ext_arg
)->mcl_refs
);
965 return (0); /* to shut up compiler */
969 * change mbuf to new type
972 m_chtype(struct mbuf
*m
, int type
)
974 struct globaldata
*gd
= mycpu
;
976 ++mbtypes
[gd
->gd_cpuid
].stats
[type
];
977 --mbtypes
[gd
->gd_cpuid
].stats
[m
->m_type
];
987 kprintf("Debug: m_reclaim() called\n");
989 SLIST_FOREACH(dp
, &domains
, dom_next
) {
990 for (pr
= dp
->dom_protosw
; pr
< dp
->dom_protoswNPROTOSW
; pr
++) {
995 ++mbstat
[mycpu
->gd_cpuid
].m_drain
;
999 updatestats(struct mbuf
*m
, int type
)
1001 struct globaldata
*gd
= mycpu
;
1006 KASSERT(m
->m_next
== NULL
, ("mbuf %p: bad m_next in get", m
));
1007 KASSERT(m
->m_nextpkt
== NULL
, ("mbuf %p: bad m_nextpkt in get", m
));
1010 ++mbtypes
[gd
->gd_cpuid
].stats
[type
];
1011 ++mbstat
[gd
->gd_cpuid
].m_mbufs
;
1019 m_get(int how
, int type
)
1023 int ocf
= MB_OCFLAG(how
);
1027 m
= objcache_get(mbuf_cache
, ocf
);
1030 if ((ocf
& M_WAITOK
) && ntries
++ == 0) {
1031 struct objcache
*reclaimlist
[] = {
1034 mbufphdrcluster_cache
,
1036 mbufphdrjcluster_cache
1038 const int nreclaims
= NELEM(reclaimlist
);
1040 if (!objcache_reclaimlist(reclaimlist
, nreclaims
, ocf
))
1044 ++mbstat
[mycpu
->gd_cpuid
].m_drops
;
1048 KASSERT(m
->m_data
== m
->m_dat
, ("mbuf %p: bad m_data in get", m
));
1052 updatestats(m
, type
);
1057 m_gethdr(int how
, int type
)
1060 int ocf
= MB_OCFLAG(how
);
1065 m
= objcache_get(mbufphdr_cache
, ocf
);
1068 if ((ocf
& M_WAITOK
) && ntries
++ == 0) {
1069 struct objcache
*reclaimlist
[] = {
1071 mbufcluster_cache
, mbufphdrcluster_cache
,
1072 mbufjcluster_cache
, mbufphdrjcluster_cache
1074 const int nreclaims
= NELEM(reclaimlist
);
1076 if (!objcache_reclaimlist(reclaimlist
, nreclaims
, ocf
))
1080 ++mbstat
[mycpu
->gd_cpuid
].m_drops
;
1084 KASSERT(m
->m_data
== m
->m_pktdat
, ("mbuf %p: bad m_data in get", m
));
1087 m
->m_pkthdr
.len
= 0;
1089 updatestats(m
, type
);
1094 * Get a mbuf (not a mbuf cluster!) and zero it.
1098 m_getclr(int how
, int type
)
1102 m
= m_get(how
, type
);
1104 bzero(m
->m_data
, MLEN
);
1108 static struct mbuf
*
1109 m_getcl_cache(int how
, short type
, int flags
, struct objcache
*mbclc
,
1110 struct objcache
*mbphclc
, u_long
*cl_stats
)
1112 struct mbuf
*m
= NULL
;
1113 int ocflags
= MB_OCFLAG(how
);
1118 if (flags
& M_PKTHDR
)
1119 m
= objcache_get(mbphclc
, ocflags
);
1121 m
= objcache_get(mbclc
, ocflags
);
1124 if ((ocflags
& M_WAITOK
) && ntries
++ == 0) {
1125 struct objcache
*reclaimlist
[1];
1127 if (flags
& M_PKTHDR
)
1128 reclaimlist
[0] = mbclc
;
1130 reclaimlist
[0] = mbphclc
;
1131 if (!objcache_reclaimlist(reclaimlist
, 1, ocflags
))
1135 ++mbstat
[mycpu
->gd_cpuid
].m_drops
;
1140 KASSERT(m
->m_data
== m
->m_ext
.ext_buf
,
1141 ("mbuf %p: bad m_data in get", m
));
1145 m
->m_pkthdr
.len
= 0; /* just do it unconditonally */
1149 ++mbtypes
[mycpu
->gd_cpuid
].stats
[type
];
1155 m_getjcl(int how
, short type
, int flags
, size_t size
)
1157 struct objcache
*mbclc
, *mbphclc
;
1162 mbclc
= mbufcluster_cache
;
1163 mbphclc
= mbufphdrcluster_cache
;
1164 cl_stats
= &mbstat
[mycpu
->gd_cpuid
].m_clusters
;
1168 mbclc
= mbufjcluster_cache
;
1169 mbphclc
= mbufphdrjcluster_cache
;
1170 cl_stats
= &mbstat
[mycpu
->gd_cpuid
].m_jclusters
;
1173 return m_getcl_cache(how
, type
, flags
, mbclc
, mbphclc
, cl_stats
);
1177 * Returns an mbuf with an attached cluster.
1178 * Because many network drivers use this kind of buffers a lot, it is
1179 * convenient to keep a small pool of free buffers of this kind.
1180 * Even a small size such as 10 gives about 10% improvement in the
1181 * forwarding rate in a bridge or router.
1184 m_getcl(int how
, short type
, int flags
)
1186 return m_getcl_cache(how
, type
, flags
,
1187 mbufcluster_cache
, mbufphdrcluster_cache
,
1188 &mbstat
[mycpu
->gd_cpuid
].m_clusters
);
1192 * Allocate chain of requested length.
1195 m_getc(int len
, int how
, int type
)
1197 struct mbuf
*n
, *nfirst
= NULL
, **ntail
= &nfirst
;
1201 n
= m_getl(len
, how
, type
, 0, &nsize
);
1217 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
1218 * and return a pointer to the head of the allocated chain. If m0 is
1219 * non-null, then we assume that it is a single mbuf or an mbuf chain to
1220 * which we want len bytes worth of mbufs and/or clusters attached, and so
1221 * if we succeed in allocating it, we will just return a pointer to m0.
1223 * If we happen to fail at any point during the allocation, we will free
1224 * up everything we have already allocated and return NULL.
1226 * Deprecated. Use m_getc() and m_cat() instead.
1229 m_getm(struct mbuf
*m0
, int len
, int type
, int how
)
1231 struct mbuf
*nfirst
;
1233 nfirst
= m_getc(len
, how
, type
);
1236 m_last(m0
)->m_next
= nfirst
;
1244 * Adds a cluster to a normal mbuf, M_EXT is set on success.
1245 * Deprecated. Use m_getcl() instead.
1248 m_mclget(struct mbuf
*m
, int how
)
1250 struct mbcluster
*mcl
;
1252 KKASSERT((m
->m_flags
& M_EXT
) == 0);
1253 mcl
= objcache_get(mclmeta_cache
, MB_OCFLAG(how
));
1255 linkcluster(m
, mcl
);
1256 ++mbstat
[mycpu
->gd_cpuid
].m_clusters
;
1258 ++mbstat
[mycpu
->gd_cpuid
].m_drops
;
1263 * Updates to mbcluster must be MPSAFE. Only an entity which already has
1264 * a reference to the cluster can ref it, so we are in no danger of
1265 * racing an add with a subtract. But the operation must still be atomic
1266 * since multiple entities may have a reference on the cluster.
1268 * m_mclfree() is almost the same but it must contend with two entities
1269 * freeing the cluster at the same time.
1274 struct mbcluster
*mcl
= arg
;
1276 atomic_add_int(&mcl
->mcl_refs
, 1);
1280 * When dereferencing a cluster we have to deal with a N->0 race, where
1281 * N entities free their references simultaniously. To do this we use
1282 * atomic_fetchadd_int().
1285 m_mclfree(void *arg
)
1287 struct mbcluster
*mcl
= arg
;
1289 if (atomic_fetchadd_int(&mcl
->mcl_refs
, -1) == 1) {
1290 --mbstat
[mycpu
->gd_cpuid
].m_clusters
;
1291 objcache_put(mclmeta_cache
, mcl
);
1296 m_mjclfree(void *arg
)
1298 struct mbcluster
*mcl
= arg
;
1300 if (atomic_fetchadd_int(&mcl
->mcl_refs
, -1) == 1) {
1301 --mbstat
[mycpu
->gd_cpuid
].m_jclusters
;
1302 objcache_put(mjclmeta_cache
, mcl
);
1307 * Free a single mbuf and any associated external storage. The successor,
1308 * if any, is returned.
1310 * We do need to check non-first mbuf for m_aux, since some of existing
1311 * code does not call M_PREPEND properly.
1312 * (example: call to bpf_mtap from drivers)
1318 _m_free(struct mbuf
*m
, const char *func
)
1323 m_free(struct mbuf
*m
)
1328 struct globaldata
*gd
= mycpu
;
1330 KASSERT(m
->m_type
!= MT_FREE
, ("freeing free mbuf %p", m
));
1331 KASSERT(M_TRAILINGSPACE(m
) >= 0, ("overflowed mbuf %p", m
));
1332 --mbtypes
[gd
->gd_cpuid
].stats
[m
->m_type
];
1337 * Make sure the mbuf is in constructed state before returning it
1343 m
->m_hdr
.mh_lastfunc
= func
;
1346 KKASSERT(m
->m_nextpkt
== NULL
);
1348 if (m
->m_nextpkt
!= NULL
) {
1349 static int afewtimes
= 10;
1351 if (afewtimes
-- > 0) {
1352 kprintf("mfree: m->m_nextpkt != NULL\n");
1353 print_backtrace(-1);
1355 m
->m_nextpkt
= NULL
;
1358 if (m
->m_flags
& M_PKTHDR
) {
1359 m_tag_delete_chain(m
); /* eliminate XXX JH */
1362 m
->m_flags
&= (M_EXT
| M_EXT_CLUSTER
| M_CLCACHE
| M_PHCACHE
);
1365 * Clean the M_PKTHDR state so we can return the mbuf to its original
1366 * cache. This is based on the PHCACHE flag which tells us whether
1367 * the mbuf was originally allocated out of a packet-header cache
1368 * or a non-packet-header cache.
1370 if (m
->m_flags
& M_PHCACHE
) {
1371 m
->m_flags
|= M_PKTHDR
;
1372 m
->m_pkthdr
.rcvif
= NULL
; /* eliminate XXX JH */
1373 m
->m_pkthdr
.csum_flags
= 0; /* eliminate XXX JH */
1374 m
->m_pkthdr
.fw_flags
= 0; /* eliminate XXX JH */
1375 SLIST_INIT(&m
->m_pkthdr
.tags
);
1379 * Handle remaining flags combinations. M_CLCACHE tells us whether
1380 * the mbuf was originally allocated from a cluster cache or not,
1381 * and is totally separate from whether the mbuf is currently
1382 * associated with a cluster.
1384 switch(m
->m_flags
& (M_CLCACHE
| M_EXT
| M_EXT_CLUSTER
)) {
1385 case M_CLCACHE
| M_EXT
| M_EXT_CLUSTER
:
1387 * mbuf+cluster cache case. The mbuf was allocated from the
1388 * combined mbuf_cluster cache and can be returned to the
1389 * cache if the cluster hasn't been shared.
1391 if (m_sharecount(m
) == 1) {
1393 * The cluster has not been shared, we can just
1394 * reset the data pointer and return the mbuf
1395 * to the cluster cache. Note that the reference
1396 * count is left intact (it is still associated with
1399 m
->m_data
= m
->m_ext
.ext_buf
;
1400 if (m
->m_flags
& M_EXT
&& m
->m_ext
.ext_size
!= MCLBYTES
) {
1401 if (m
->m_flags
& M_PHCACHE
)
1402 objcache_put(mbufphdrjcluster_cache
, m
);
1404 objcache_put(mbufjcluster_cache
, m
);
1405 --mbstat
[mycpu
->gd_cpuid
].m_jclusters
;
1407 if (m
->m_flags
& M_PHCACHE
)
1408 objcache_put(mbufphdrcluster_cache
, m
);
1410 objcache_put(mbufcluster_cache
, m
);
1411 --mbstat
[mycpu
->gd_cpuid
].m_clusters
;
1415 * Hell. Someone else has a ref on this cluster,
1416 * we have to disconnect it which means we can't
1417 * put it back into the mbufcluster_cache, we
1418 * have to destroy the mbuf.
1420 * Other mbuf references to the cluster will typically
1421 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
1423 * XXX we could try to connect another cluster to
1426 m
->m_ext
.ext_free(m
->m_ext
.ext_arg
);
1427 m
->m_flags
&= ~(M_EXT
| M_EXT_CLUSTER
);
1428 if (m
->m_ext
.ext_size
== MCLBYTES
) {
1429 if (m
->m_flags
& M_PHCACHE
)
1430 objcache_dtor(mbufphdrcluster_cache
, m
);
1432 objcache_dtor(mbufcluster_cache
, m
);
1434 if (m
->m_flags
& M_PHCACHE
)
1435 objcache_dtor(mbufphdrjcluster_cache
, m
);
1437 objcache_dtor(mbufjcluster_cache
, m
);
1441 case M_EXT
| M_EXT_CLUSTER
:
1444 * Normal cluster association case, disconnect the cluster from
1445 * the mbuf. The cluster may or may not be custom.
1447 m
->m_ext
.ext_free(m
->m_ext
.ext_arg
);
1448 m
->m_flags
&= ~(M_EXT
| M_EXT_CLUSTER
);
1452 * return the mbuf to the mbuf cache.
1454 if (m
->m_flags
& M_PHCACHE
) {
1455 m
->m_data
= m
->m_pktdat
;
1456 objcache_put(mbufphdr_cache
, m
);
1458 m
->m_data
= m
->m_dat
;
1459 objcache_put(mbuf_cache
, m
);
1461 --mbstat
[mycpu
->gd_cpuid
].m_mbufs
;
1465 panic("bad mbuf flags %p %08x", m
, m
->m_flags
);
1474 _m_freem(struct mbuf
*m
, const char *func
)
1477 m
= _m_free(m
, func
);
1483 m_freem(struct mbuf
*m
)
1492 m_extadd(struct mbuf
*m
, caddr_t buf
, u_int size
, void (*reff
)(void *),
1493 void (*freef
)(void *), void *arg
)
1495 m
->m_ext
.ext_arg
= arg
;
1496 m
->m_ext
.ext_buf
= buf
;
1497 m
->m_ext
.ext_ref
= reff
;
1498 m
->m_ext
.ext_free
= freef
;
1499 m
->m_ext
.ext_size
= size
;
1502 m
->m_flags
|= M_EXT
;
1506 * mbuf utility routines
1510 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
1514 m_prepend(struct mbuf
*m
, int len
, int how
)
1518 if (m
->m_flags
& M_PKTHDR
)
1519 mn
= m_gethdr(how
, m
->m_type
);
1521 mn
= m_get(how
, m
->m_type
);
1526 if (m
->m_flags
& M_PKTHDR
)
1527 M_MOVE_PKTHDR(mn
, m
);
1537 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
1538 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
1539 * The wait parameter is a choice of M_WAITOK/M_NOWAIT from caller.
1540 * Note that the copy is read-only, because clusters are not copied,
1541 * only their reference counts are incremented.
1544 m_copym(const struct mbuf
*m
, int off0
, int len
, int wait
)
1546 struct mbuf
*n
, **np
;
1551 KASSERT(off
>= 0, ("m_copym, negative off %d", off
));
1552 KASSERT(len
>= 0, ("m_copym, negative len %d", len
));
1553 if (off
== 0 && (m
->m_flags
& M_PKTHDR
))
1556 KASSERT(m
!= NULL
, ("m_copym, offset > size of mbuf chain"));
1566 KASSERT(len
== M_COPYALL
,
1567 ("m_copym, length > size of mbuf chain"));
1571 * Because we are sharing any cluster attachment below,
1572 * be sure to get an mbuf that does not have a cluster
1573 * associated with it.
1576 n
= m_gethdr(wait
, m
->m_type
);
1578 n
= m_get(wait
, m
->m_type
);
1583 if (!m_dup_pkthdr(n
, m
, wait
))
1585 if (len
== M_COPYALL
)
1586 n
->m_pkthdr
.len
-= off0
;
1588 n
->m_pkthdr
.len
= len
;
1591 n
->m_len
= min(len
, m
->m_len
- off
);
1592 if (m
->m_flags
& M_EXT
) {
1593 KKASSERT((n
->m_flags
& M_EXT
) == 0);
1594 n
->m_data
= m
->m_data
+ off
;
1595 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
1596 n
->m_ext
= m
->m_ext
;
1597 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
1599 bcopy(mtod(m
, caddr_t
)+off
, mtod(n
, caddr_t
),
1600 (unsigned)n
->m_len
);
1602 if (len
!= M_COPYALL
)
1609 ++mbstat
[mycpu
->gd_cpuid
].m_mcfail
;
1613 ++mbstat
[mycpu
->gd_cpuid
].m_mcfail
;
1618 * Copy an entire packet, including header (which must be present).
1619 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
1620 * Note that the copy is read-only, because clusters are not copied,
1621 * only their reference counts are incremented.
1622 * Preserve alignment of the first mbuf so if the creator has left
1623 * some room at the beginning (e.g. for inserting protocol headers)
1624 * the copies also have the room available.
1627 m_copypacket(struct mbuf
*m
, int how
)
1629 struct mbuf
*top
, *n
, *o
;
1631 n
= m_gethdr(how
, m
->m_type
);
1636 if (!m_dup_pkthdr(n
, m
, how
))
1638 n
->m_len
= m
->m_len
;
1639 if (m
->m_flags
& M_EXT
) {
1640 KKASSERT((n
->m_flags
& M_EXT
) == 0);
1641 n
->m_data
= m
->m_data
;
1642 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
1643 n
->m_ext
= m
->m_ext
;
1644 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
1646 n
->m_data
= n
->m_pktdat
+ (m
->m_data
- m
->m_pktdat
);
1647 bcopy(mtod(m
, char *), mtod(n
, char *), n
->m_len
);
1652 o
= m_get(how
, m
->m_type
);
1659 n
->m_len
= m
->m_len
;
1660 if (m
->m_flags
& M_EXT
) {
1661 KKASSERT((n
->m_flags
& M_EXT
) == 0);
1662 n
->m_data
= m
->m_data
;
1663 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
1664 n
->m_ext
= m
->m_ext
;
1665 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
1667 bcopy(mtod(m
, char *), mtod(n
, char *), n
->m_len
);
1675 ++mbstat
[mycpu
->gd_cpuid
].m_mcfail
;
1680 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1681 * continuing for "len" bytes, into the indicated buffer.
1684 m_copydata(const struct mbuf
*m
, int off
, int len
, caddr_t cp
)
1688 KASSERT(off
>= 0, ("m_copydata, negative off %d", off
));
1689 KASSERT(len
>= 0, ("m_copydata, negative len %d", len
));
1691 KASSERT(m
!= NULL
, ("m_copydata, offset > size of mbuf chain"));
1698 KASSERT(m
!= NULL
, ("m_copydata, length > size of mbuf chain"));
1699 count
= min(m
->m_len
- off
, len
);
1700 bcopy(mtod(m
, caddr_t
) + off
, cp
, count
);
1709 * Copy a packet header mbuf chain into a completely new chain, including
1710 * copying any mbuf clusters. Use this instead of m_copypacket() when
1711 * you need a writable copy of an mbuf chain.
1714 m_dup(struct mbuf
*m
, int how
)
1716 struct mbuf
**p
, *top
= NULL
;
1717 int remain
, moff
, nsize
;
1722 KASSERT((m
->m_flags
& M_PKTHDR
) != 0, ("%s: !PKTHDR", __func__
));
1724 /* While there's more data, get a new mbuf, tack it on, and fill it */
1725 remain
= m
->m_pkthdr
.len
;
1728 while (remain
> 0 || top
== NULL
) { /* allow m->m_pkthdr.len == 0 */
1731 /* Get the next new mbuf */
1732 n
= m_getl(remain
, how
, m
->m_type
, top
== NULL
? M_PKTHDR
: 0,
1737 if (!m_dup_pkthdr(n
, m
, how
))
1740 /* Link it into the new chain */
1744 /* Copy data from original mbuf(s) into new mbuf */
1746 while (n
->m_len
< nsize
&& m
!= NULL
) {
1747 int chunk
= min(nsize
- n
->m_len
, m
->m_len
- moff
);
1749 bcopy(m
->m_data
+ moff
, n
->m_data
+ n
->m_len
, chunk
);
1753 if (moff
== m
->m_len
) {
1759 /* Check correct total mbuf length */
1760 KASSERT((remain
> 0 && m
!= NULL
) || (remain
== 0 && m
== NULL
),
1761 ("%s: bogus m_pkthdr.len", __func__
));
1768 ++mbstat
[mycpu
->gd_cpuid
].m_mcfail
;
1773 * Copy the non-packet mbuf data chain into a new set of mbufs, including
1774 * copying any mbuf clusters. This is typically used to realign a data
1775 * chain by nfs_realign().
1777 * The original chain is left intact. how should be M_WAITOK or M_NOWAIT
1778 * and NULL can be returned if M_NOWAIT is passed.
1780 * Be careful to use cluster mbufs, a large mbuf chain converted to non
1781 * cluster mbufs can exhaust our supply of mbufs.
1784 m_dup_data(struct mbuf
*m
, int how
)
1786 struct mbuf
**p
, *n
, *top
= NULL
;
1787 int mlen
, moff
, chunk
, gsize
, nsize
;
1796 * Optimize the mbuf allocation but do not get too carried away.
1798 if (m
->m_next
|| m
->m_len
> MLEN
)
1799 if (m
->m_flags
& M_EXT
&& m
->m_ext
.ext_size
== MCLBYTES
)
1802 gsize
= MJUMPAGESIZE
;
1812 * Scan the mbuf chain until nothing is left, the new mbuf chain
1813 * will be allocated on the fly as needed.
1820 KKASSERT(m
->m_type
== MT_DATA
);
1822 n
= m_getl(gsize
, how
, MT_DATA
, 0, &nsize
);
1829 chunk
= imin(mlen
, nsize
);
1830 bcopy(m
->m_data
+ moff
, n
->m_data
+ n
->m_len
, chunk
);
1845 ++mbstat
[mycpu
->gd_cpuid
].m_mcfail
;
1850 * Concatenate mbuf chain n to m.
1851 * Both chains must be of the same type (e.g. MT_DATA).
1852 * Any m_pkthdr is not updated.
1855 m_cat(struct mbuf
*m
, struct mbuf
*n
)
1859 if (m
->m_flags
& M_EXT
||
1860 m
->m_data
+ m
->m_len
+ n
->m_len
>= &m
->m_dat
[MLEN
]) {
1861 /* just join the two chains */
1865 /* splat the data from one into the other */
1866 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
1868 m
->m_len
+= n
->m_len
;
1874 m_adj(struct mbuf
*mp
, int req_len
)
1880 if ((m
= mp
) == NULL
)
1886 while (m
!= NULL
&& len
> 0) {
1887 if (m
->m_len
<= len
) {
1898 if (mp
->m_flags
& M_PKTHDR
)
1899 m
->m_pkthdr
.len
-= (req_len
- len
);
1902 * Trim from tail. Scan the mbuf chain,
1903 * calculating its length and finding the last mbuf.
1904 * If the adjustment only affects this mbuf, then just
1905 * adjust and return. Otherwise, rescan and truncate
1906 * after the remaining size.
1912 if (m
->m_next
== NULL
)
1916 if (m
->m_len
>= len
) {
1918 if (mp
->m_flags
& M_PKTHDR
)
1919 mp
->m_pkthdr
.len
-= len
;
1926 * Correct length for chain is "count".
1927 * Find the mbuf with last data, adjust its length,
1928 * and toss data from remaining mbufs on chain.
1931 if (m
->m_flags
& M_PKTHDR
)
1932 m
->m_pkthdr
.len
= count
;
1933 for (; m
; m
= m
->m_next
) {
1934 if (m
->m_len
>= count
) {
1941 (m
= m
->m_next
) ->m_len
= 0;
1946 * Set the m_data pointer of a newly-allocated mbuf
1947 * to place an object of the specified size at the
1948 * end of the mbuf, longword aligned.
1951 m_align(struct mbuf
*m
, int len
)
1955 if (m
->m_flags
& M_EXT
)
1956 adjust
= m
->m_ext
.ext_size
- len
;
1957 else if (m
->m_flags
& M_PKTHDR
)
1958 adjust
= MHLEN
- len
;
1960 adjust
= MLEN
- len
;
1961 m
->m_data
+= adjust
&~ (sizeof(long)-1);
1965 * Create a writable copy of the mbuf chain. While doing this
1966 * we compact the chain with a goal of producing a chain with
1967 * at most two mbufs. The second mbuf in this chain is likely
1968 * to be a cluster. The primary purpose of this work is to create
1969 * a writable packet for encryption, compression, etc. The
1970 * secondary goal is to linearize the data so the data can be
1971 * passed to crypto hardware in the most efficient manner possible.
1974 m_unshare(struct mbuf
*m0
, int how
)
1976 struct mbuf
*m
, *mprev
;
1977 struct mbuf
*n
, *mfirst
, *mlast
;
1981 for (m
= m0
; m
!= NULL
; m
= mprev
->m_next
) {
1983 * Regular mbufs are ignored unless there's a cluster
1984 * in front of it that we can use to coalesce. We do
1985 * the latter mainly so later clusters can be coalesced
1986 * also w/o having to handle them specially (i.e. convert
1987 * mbuf+cluster -> cluster). This optimization is heavily
1988 * influenced by the assumption that we're running over
1989 * Ethernet where MCLBYTES is large enough that the max
1990 * packet size will permit lots of coalescing into a
1991 * single cluster. This in turn permits efficient
1992 * crypto operations, especially when using hardware.
1994 if ((m
->m_flags
& M_EXT
) == 0) {
1995 if (mprev
&& (mprev
->m_flags
& M_EXT
) &&
1996 m
->m_len
<= M_TRAILINGSPACE(mprev
)) {
1997 /* XXX: this ignores mbuf types */
1998 memcpy(mtod(mprev
, caddr_t
) + mprev
->m_len
,
1999 mtod(m
, caddr_t
), m
->m_len
);
2000 mprev
->m_len
+= m
->m_len
;
2001 mprev
->m_next
= m
->m_next
; /* unlink from chain */
2002 m_free(m
); /* reclaim mbuf */
2009 * Writable mbufs are left alone (for now).
2011 if (M_WRITABLE(m
)) {
2017 * Not writable, replace with a copy or coalesce with
2018 * the previous mbuf if possible (since we have to copy
2019 * it anyway, we try to reduce the number of mbufs and
2020 * clusters so that future work is easier).
2022 KASSERT(m
->m_flags
& M_EXT
, ("m_flags 0x%x", m
->m_flags
));
2023 /* NB: we only coalesce into a cluster or larger */
2024 if (mprev
!= NULL
&& (mprev
->m_flags
& M_EXT
) &&
2025 m
->m_len
<= M_TRAILINGSPACE(mprev
)) {
2026 /* XXX: this ignores mbuf types */
2027 memcpy(mtod(mprev
, caddr_t
) + mprev
->m_len
,
2028 mtod(m
, caddr_t
), m
->m_len
);
2029 mprev
->m_len
+= m
->m_len
;
2030 mprev
->m_next
= m
->m_next
; /* unlink from chain */
2031 m_free(m
); /* reclaim mbuf */
2036 * Allocate new space to hold the copy...
2038 /* XXX why can M_PKTHDR be set past the first mbuf? */
2039 if (mprev
== NULL
&& (m
->m_flags
& M_PKTHDR
)) {
2041 * NB: if a packet header is present we must
2042 * allocate the mbuf separately from any cluster
2043 * because M_MOVE_PKTHDR will smash the data
2044 * pointer and drop the M_EXT marker.
2046 MGETHDR(n
, how
, m
->m_type
);
2051 M_MOVE_PKTHDR(n
, m
);
2053 if ((n
->m_flags
& M_EXT
) == 0) {
2059 n
= m_getcl(how
, m
->m_type
, m
->m_flags
);
2066 * ... and copy the data. We deal with jumbo mbufs
2067 * (i.e. m_len > MCLBYTES) by splitting them into
2068 * clusters. We could just malloc a buffer and make
2069 * it external but too many device drivers don't know
2070 * how to break up the non-contiguous memory when
2078 int cc
= min(len
, MCLBYTES
);
2079 memcpy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + off
, cc
);
2090 n
= m_getcl(how
, m
->m_type
, m
->m_flags
);
2097 n
->m_next
= m
->m_next
;
2099 m0
= mfirst
; /* new head of chain */
2101 mprev
->m_next
= mfirst
; /* replace old mbuf */
2102 m_free(m
); /* release old mbuf */
2109 * Rearrange an mbuf chain so that len bytes are contiguous
2110 * and in the data area of an mbuf (so that mtod will work for a structure
2111 * of size len). Returns the resulting mbuf chain on success, frees it and
2112 * returns null on failure. If there is room, it will add up to
2113 * max_protohdr-len extra bytes to the contiguous region in an attempt to
2114 * avoid being called next time.
2117 m_pullup(struct mbuf
*n
, int len
)
2124 * If first mbuf has no cluster, and has room for len bytes
2125 * without shifting current data, pullup into it,
2126 * otherwise allocate a new mbuf to prepend to the chain.
2128 if (!(n
->m_flags
& M_EXT
) &&
2129 n
->m_data
+ len
< &n
->m_dat
[MLEN
] &&
2131 if (n
->m_len
>= len
)
2139 if (n
->m_flags
& M_PKTHDR
)
2140 m
= m_gethdr(M_NOWAIT
, n
->m_type
);
2142 m
= m_get(M_NOWAIT
, n
->m_type
);
2146 if (n
->m_flags
& M_PKTHDR
)
2147 M_MOVE_PKTHDR(m
, n
);
2149 space
= &m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
);
2151 count
= min(min(max(len
, max_protohdr
), space
), n
->m_len
);
2152 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
2162 } while (len
> 0 && n
);
2171 ++mbstat
[mycpu
->gd_cpuid
].m_mcfail
;
2176 * Partition an mbuf chain in two pieces, returning the tail --
2177 * all but the first len0 bytes. In case of failure, it returns NULL and
2178 * attempts to restore the chain to its original state.
2180 * Note that the resulting mbufs might be read-only, because the new
2181 * mbuf can end up sharing an mbuf cluster with the original mbuf if
2182 * the "breaking point" happens to lie within a cluster mbuf. Use the
2183 * M_WRITABLE() macro to check for this case.
2186 m_split(struct mbuf
*m0
, int len0
, int wait
)
2189 unsigned len
= len0
, remain
;
2191 for (m
= m0
; m
&& len
> m
->m_len
; m
= m
->m_next
)
2195 remain
= m
->m_len
- len
;
2196 if (m0
->m_flags
& M_PKTHDR
) {
2197 n
= m_gethdr(wait
, m0
->m_type
);
2200 n
->m_pkthdr
.rcvif
= m0
->m_pkthdr
.rcvif
;
2201 n
->m_pkthdr
.len
= m0
->m_pkthdr
.len
- len0
;
2202 m0
->m_pkthdr
.len
= len0
;
2203 if (m
->m_flags
& M_EXT
)
2205 if (remain
> MHLEN
) {
2206 /* m can't be the lead packet */
2208 n
->m_next
= m_split(m
, len
, wait
);
2209 if (n
->m_next
== NULL
) {
2217 MH_ALIGN(n
, remain
);
2218 } else if (remain
== 0) {
2223 n
= m_get(wait
, m
->m_type
);
2229 if (m
->m_flags
& M_EXT
) {
2230 KKASSERT((n
->m_flags
& M_EXT
) == 0);
2231 n
->m_data
= m
->m_data
+ len
;
2232 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
2233 n
->m_ext
= m
->m_ext
;
2234 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
2236 bcopy(mtod(m
, caddr_t
) + len
, mtod(n
, caddr_t
), remain
);
2240 n
->m_next
= m
->m_next
;
2246 * Routine to copy from device local memory into mbufs.
2247 * Note: "offset" is ill-defined and always called as 0, so ignore it.
2250 m_devget(char *buf
, int len
, int offset
, struct ifnet
*ifp
)
2252 struct mbuf
*m
, *mfirst
= NULL
, **mtail
;
2259 m
= m_getl(len
, M_NOWAIT
, MT_DATA
, flags
, &nsize
);
2264 m
->m_len
= min(len
, nsize
);
2266 if (flags
& M_PKTHDR
) {
2267 if (len
+ max_linkhdr
<= nsize
)
2268 m
->m_data
+= max_linkhdr
;
2269 m
->m_pkthdr
.rcvif
= ifp
;
2270 m
->m_pkthdr
.len
= len
;
2274 bcopy(buf
, m
->m_data
, (unsigned)m
->m_len
);
2285 * Routine to pad mbuf to the specified length 'padto'.
2288 m_devpad(struct mbuf
*m
, int padto
)
2290 struct mbuf
*last
= NULL
;
2293 if (padto
<= m
->m_pkthdr
.len
)
2296 padlen
= padto
- m
->m_pkthdr
.len
;
2298 /* if there's only the packet-header and we can pad there, use it. */
2299 if (m
->m_pkthdr
.len
== m
->m_len
&& M_TRAILINGSPACE(m
) >= padlen
) {
2303 * Walk packet chain to find last mbuf. We will either
2304 * pad there, or append a new mbuf and pad it
2306 for (last
= m
; last
->m_next
!= NULL
; last
= last
->m_next
)
2309 /* `last' now points to last in chain. */
2310 if (M_TRAILINGSPACE(last
) < padlen
) {
2313 /* Allocate new empty mbuf, pad it. Compact later. */
2314 MGET(n
, M_NOWAIT
, MT_DATA
);
2322 KKASSERT(M_TRAILINGSPACE(last
) >= padlen
);
2323 KKASSERT(M_WRITABLE(last
));
2325 /* Now zero the pad area */
2326 bzero(mtod(last
, char *) + last
->m_len
, padlen
);
2327 last
->m_len
+= padlen
;
2328 m
->m_pkthdr
.len
+= padlen
;
2333 * Copy data from a buffer back into the indicated mbuf chain,
2334 * starting "off" bytes from the beginning, extending the mbuf
2335 * chain if necessary.
2338 m_copyback(struct mbuf
*m0
, int off
, int len
, caddr_t cp
)
2341 struct mbuf
*m
= m0
, *n
;
2346 while (off
> (mlen
= m
->m_len
)) {
2349 if (m
->m_next
== NULL
) {
2350 n
= m_getclr(M_NOWAIT
, m
->m_type
);
2353 n
->m_len
= min(MLEN
, len
+ off
);
2359 mlen
= min (m
->m_len
- off
, len
);
2360 bcopy(cp
, off
+ mtod(m
, caddr_t
), (unsigned)mlen
);
2368 if (m
->m_next
== NULL
) {
2369 n
= m_get(M_NOWAIT
, m
->m_type
);
2372 n
->m_len
= min(MLEN
, len
);
2377 out
: if (((m
= m0
)->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
< totlen
))
2378 m
->m_pkthdr
.len
= totlen
;
2382 * Append the specified data to the indicated mbuf chain,
2383 * Extend the mbuf chain if the new data does not fit in
2386 * Return 1 if able to complete the job; otherwise 0.
2389 m_append(struct mbuf
*m0
, int len
, c_caddr_t cp
)
2392 int remainder
, space
;
2394 for (m
= m0
; m
->m_next
!= NULL
; m
= m
->m_next
)
2397 space
= M_TRAILINGSPACE(m
);
2400 * Copy into available space.
2402 if (space
> remainder
)
2404 bcopy(cp
, mtod(m
, caddr_t
) + m
->m_len
, space
);
2406 cp
+= space
, remainder
-= space
;
2408 while (remainder
> 0) {
2410 * Allocate a new mbuf; could check space
2411 * and allocate a cluster instead.
2413 n
= m_get(M_NOWAIT
, m
->m_type
);
2416 n
->m_len
= min(MLEN
, remainder
);
2417 bcopy(cp
, mtod(n
, caddr_t
), n
->m_len
);
2418 cp
+= n
->m_len
, remainder
-= n
->m_len
;
2422 if (m0
->m_flags
& M_PKTHDR
)
2423 m0
->m_pkthdr
.len
+= len
- remainder
;
2424 return (remainder
== 0);
2428 * Apply function f to the data in an mbuf chain starting "off" bytes from
2429 * the beginning, continuing for "len" bytes.
2432 m_apply(struct mbuf
*m
, int off
, int len
,
2433 int (*f
)(void *, void *, u_int
), void *arg
)
2438 KASSERT(off
>= 0, ("m_apply, negative off %d", off
));
2439 KASSERT(len
>= 0, ("m_apply, negative len %d", len
));
2441 KASSERT(m
!= NULL
, ("m_apply, offset > size of mbuf chain"));
2448 KASSERT(m
!= NULL
, ("m_apply, offset > size of mbuf chain"));
2449 count
= min(m
->m_len
- off
, len
);
2450 rval
= (*f
)(arg
, mtod(m
, caddr_t
) + off
, count
);
2461 * Return a pointer to mbuf/offset of location in mbuf chain.
2464 m_getptr(struct mbuf
*m
, int loc
, int *off
)
2468 /* Normal end of search. */
2469 if (m
->m_len
> loc
) {
2474 if (m
->m_next
== NULL
) {
2476 /* Point at the end of valid data. */
2489 m_print(const struct mbuf
*m
)
2492 const struct mbuf
*m2
;
2495 len
= m
->m_pkthdr
.len
;
2497 hexstr
= kmalloc(HEX_NCPYLEN(len
), M_TEMP
, M_ZERO
| M_WAITOK
);
2499 kprintf("%p %s\n", m2
, hexncpy(m2
->m_data
, m2
->m_len
, hexstr
,
2500 HEX_NCPYLEN(m2
->m_len
), "-"));
2504 kfree(hexstr
, M_TEMP
);
2509 * "Move" mbuf pkthdr from "from" to "to".
2510 * "from" must have M_PKTHDR set, and "to" must be empty.
2513 m_move_pkthdr(struct mbuf
*to
, struct mbuf
*from
)
2515 KASSERT((to
->m_flags
& M_PKTHDR
), ("m_move_pkthdr: not packet header"));
2517 to
->m_flags
|= from
->m_flags
& M_COPYFLAGS
;
2518 to
->m_pkthdr
= from
->m_pkthdr
; /* especially tags */
2519 SLIST_INIT(&from
->m_pkthdr
.tags
); /* purge tags from src */
2523 * Duplicate "from"'s mbuf pkthdr in "to".
2524 * "from" must have M_PKTHDR set, and "to" must be empty.
2525 * In particular, this does a deep copy of the packet tags.
2528 m_dup_pkthdr(struct mbuf
*to
, const struct mbuf
*from
, int how
)
2530 KASSERT((to
->m_flags
& M_PKTHDR
), ("m_dup_pkthdr: not packet header"));
2532 to
->m_flags
= (from
->m_flags
& M_COPYFLAGS
) |
2533 (to
->m_flags
& ~M_COPYFLAGS
);
2534 to
->m_pkthdr
= from
->m_pkthdr
;
2535 SLIST_INIT(&to
->m_pkthdr
.tags
);
2536 return (m_tag_copy_chain(to
, from
, how
));
2540 * Defragment a mbuf chain, returning the shortest possible
2541 * chain of mbufs and clusters. If allocation fails and
2542 * this cannot be completed, NULL will be returned, but
2543 * the passed in chain will be unchanged. Upon success,
2544 * the original chain will be freed, and the new chain
2547 * If a non-packet header is passed in, the original
2548 * mbuf (chain?) will be returned unharmed.
2550 * m_defrag_nofree doesn't free the passed in mbuf.
2553 m_defrag(struct mbuf
*m0
, int how
)
2557 if ((m_new
= m_defrag_nofree(m0
, how
)) == NULL
)
2565 m_defrag_nofree(struct mbuf
*m0
, int how
)
2567 struct mbuf
*m_new
= NULL
, *m_final
= NULL
;
2568 int progress
= 0, length
, nsize
;
2570 if (!(m0
->m_flags
& M_PKTHDR
))
2573 #ifdef MBUF_STRESS_TEST
2574 if (m_defragrandomfailures
) {
2575 int temp
= karc4random() & 0xff;
2581 m_final
= m_getl(m0
->m_pkthdr
.len
, how
, MT_DATA
, M_PKTHDR
, &nsize
);
2582 if (m_final
== NULL
)
2584 m_final
->m_len
= 0; /* in case m0->m_pkthdr.len is zero */
2586 if (m_dup_pkthdr(m_final
, m0
, how
) == 0)
2591 while (progress
< m0
->m_pkthdr
.len
) {
2592 length
= m0
->m_pkthdr
.len
- progress
;
2593 if (length
> MCLBYTES
)
2596 if (m_new
== NULL
) {
2597 m_new
= m_getl(length
, how
, MT_DATA
, 0, &nsize
);
2602 m_copydata(m0
, progress
, length
, mtod(m_new
, caddr_t
));
2604 m_new
->m_len
= length
;
2605 if (m_new
!= m_final
)
2606 m_cat(m_final
, m_new
);
2609 if (m0
->m_next
== NULL
)
2612 m_defragbytes
+= m_final
->m_pkthdr
.len
;
2623 * Move data from uio into mbufs.
2626 m_uiomove(struct uio
*uio
)
2628 struct mbuf
*m
; /* current working mbuf */
2629 struct mbuf
*head
= NULL
; /* result mbuf chain */
2630 struct mbuf
**mp
= &head
;
2631 int flags
= M_PKTHDR
;
2637 if (uio
->uio_resid
> INT_MAX
)
2640 resid
= (int)uio
->uio_resid
;
2641 m
= m_getl(resid
, M_WAITOK
, MT_DATA
, flags
, &nsize
);
2643 m
->m_pkthdr
.len
= 0;
2644 /* Leave room for protocol headers. */
2649 m
->m_len
= imin(nsize
, resid
);
2650 error
= uiomove(mtod(m
, caddr_t
), m
->m_len
, uio
);
2657 head
->m_pkthdr
.len
+= m
->m_len
;
2658 } while (uio
->uio_resid
> 0);
2668 m_last(struct mbuf
*m
)
2676 * Return the number of bytes in an mbuf chain.
2677 * If lastm is not NULL, also return the last mbuf.
2680 m_lengthm(struct mbuf
*m
, struct mbuf
**lastm
)
2683 struct mbuf
*prev
= m
;
2696 * Like m_lengthm(), except also keep track of mbuf usage.
2699 m_countm(struct mbuf
*m
, struct mbuf
**lastm
, u_int
*pmbcnt
)
2701 u_int len
= 0, mbcnt
= 0;
2702 struct mbuf
*prev
= m
;
2707 if (m
->m_flags
& M_EXT
)
2708 mbcnt
+= m
->m_ext
.ext_size
;