2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
37 * License terms: all terms for the DragonFly license above plus the following:
39 * 4. All advertising materials mentioning features or use of this software
40 * must display the following acknowledgement:
42 * This product includes software developed by Jeffrey M. Hsu
43 * for the DragonFly Project.
45 * This requirement may be waived with permission from Jeffrey Hsu.
46 * This requirement will sunset and may be removed on July 8 2005,
47 * after which the standard DragonFly license (as shown above) will
52 * Copyright (c) 1982, 1986, 1988, 1991, 1993
53 * The Regents of the University of California. All rights reserved.
55 * Redistribution and use in source and binary forms, with or without
56 * modification, are permitted provided that the following conditions
58 * 1. Redistributions of source code must retain the above copyright
59 * notice, this list of conditions and the following disclaimer.
60 * 2. Redistributions in binary form must reproduce the above copyright
61 * notice, this list of conditions and the following disclaimer in the
62 * documentation and/or other materials provided with the distribution.
63 * 3. All advertising materials mentioning features or use of this software
64 * must display the following acknowledgement:
65 * This product includes software developed by the University of
66 * California, Berkeley and its contributors.
67 * 4. Neither the name of the University nor the names of its contributors
68 * may be used to endorse or promote products derived from this software
69 * without specific prior written permission.
71 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
72 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
73 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
74 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
75 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
76 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
77 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
78 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
80 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83 * @(#)uipc_mbuf.c 8.2 (Berkeley) 1/4/94
84 * $FreeBSD: src/sys/kern/uipc_mbuf.c,v 1.51.2.24 2003/04/15 06:59:29 silby Exp $
85 * $DragonFly: src/sys/kern/uipc_mbuf.c,v 1.55 2006/01/21 19:05:42 dillon Exp $
88 #include "opt_param.h"
90 #include "opt_mbuf_stress_test.h"
91 #include <sys/param.h>
92 #include <sys/systm.h>
93 #include <sys/malloc.h>
95 #include <sys/kernel.h>
96 #include <sys/sysctl.h>
97 #include <sys/domain.h>
98 #include <sys/objcache.h>
99 #include <sys/protosw.h>
101 #include <sys/thread.h>
102 #include <sys/globaldata.h>
103 #include <sys/serialize.h>
104 #include <sys/thread2.h>
107 #include <vm/vm_kern.h>
108 #include <vm/vm_extern.h>
111 #include <machine/cpu.h>
115 * mbuf cluster meta-data
120 struct lwkt_serialize mcl_serializer
;
123 static void mbinit(void *);
124 SYSINIT(mbuf
, SI_SUB_MBUF
, SI_ORDER_FIRST
, mbinit
, NULL
)
126 static u_long mbtypes
[MT_NTYPES
];
128 struct mbstat mbstat
;
137 #ifdef MBUF_STRESS_TEST
138 int m_defragrandomfailures
;
141 struct objcache
*mbuf_cache
, *mbufphdr_cache
;
142 struct objcache
*mclmeta_cache
;
143 struct objcache
*mbufcluster_cache
, *mbufphdrcluster_cache
;
148 SYSCTL_INT(_kern_ipc
, KIPC_MAX_LINKHDR
, max_linkhdr
, CTLFLAG_RW
,
149 &max_linkhdr
, 0, "");
150 SYSCTL_INT(_kern_ipc
, KIPC_MAX_PROTOHDR
, max_protohdr
, CTLFLAG_RW
,
151 &max_protohdr
, 0, "");
152 SYSCTL_INT(_kern_ipc
, KIPC_MAX_HDR
, max_hdr
, CTLFLAG_RW
, &max_hdr
, 0, "");
153 SYSCTL_INT(_kern_ipc
, KIPC_MAX_DATALEN
, max_datalen
, CTLFLAG_RW
,
154 &max_datalen
, 0, "");
155 SYSCTL_INT(_kern_ipc
, OID_AUTO
, mbuf_wait
, CTLFLAG_RW
,
157 SYSCTL_STRUCT(_kern_ipc
, KIPC_MBSTAT
, mbstat
, CTLFLAG_RW
, &mbstat
, mbstat
, "");
158 SYSCTL_OPAQUE(_kern_ipc
, OID_AUTO
, mbtypes
, CTLFLAG_RD
, mbtypes
,
159 sizeof(mbtypes
), "LU", "");
160 SYSCTL_INT(_kern_ipc
, KIPC_NMBCLUSTERS
, nmbclusters
, CTLFLAG_RW
,
161 &nmbclusters
, 0, "Maximum number of mbuf clusters available");
162 SYSCTL_INT(_kern_ipc
, OID_AUTO
, nmbufs
, CTLFLAG_RW
, &nmbufs
, 0,
163 "Maximum number of mbufs available");
165 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragpackets
, CTLFLAG_RD
,
166 &m_defragpackets
, 0, "");
167 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragbytes
, CTLFLAG_RD
,
168 &m_defragbytes
, 0, "");
169 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defraguseless
, CTLFLAG_RD
,
170 &m_defraguseless
, 0, "");
171 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragfailure
, CTLFLAG_RD
,
172 &m_defragfailure
, 0, "");
173 #ifdef MBUF_STRESS_TEST
174 SYSCTL_INT(_kern_ipc
, OID_AUTO
, m_defragrandomfailures
, CTLFLAG_RW
,
175 &m_defragrandomfailures
, 0, "");
178 static MALLOC_DEFINE(M_MBUF
, "mbuf", "mbuf");
179 static MALLOC_DEFINE(M_MBUFCL
, "mbufcl", "mbufcl");
180 static MALLOC_DEFINE(M_MCLMETA
, "mclmeta", "mclmeta");
182 static void m_reclaim (void);
183 static void m_mclref(void *arg
);
184 static void m_mclfree(void *arg
);
187 #define NMBCLUSTERS (512 + maxusers * 16)
190 #define NMBUFS (nmbclusters * 2)
194 * Perform sanity checks of tunables declared above.
197 tunable_mbinit(void *dummy
)
201 * This has to be done before VM init.
203 nmbclusters
= NMBCLUSTERS
;
204 TUNABLE_INT_FETCH("kern.ipc.nmbclusters", &nmbclusters
);
206 TUNABLE_INT_FETCH("kern.ipc.nmbufs", &nmbufs
);
208 if (nmbufs
< nmbclusters
* 2)
209 nmbufs
= nmbclusters
* 2;
213 SYSINIT(tunable_mbinit
, SI_SUB_TUNABLES
, SI_ORDER_ANY
, tunable_mbinit
, NULL
);
215 /* "number of clusters of pages" */
221 * The mbuf object cache only guarantees that m_next and m_nextpkt are
222 * NULL and that m_data points to the beginning of the data area. In
223 * particular, m_len and m_pkthdr.len are uninitialized. It is the
224 * responsibility of the caller to initialize those fields before use.
227 static boolean_t __inline
228 mbuf_ctor(void *obj
, void *private, int ocflags
)
230 struct mbuf
*m
= obj
;
234 m
->m_data
= m
->m_dat
;
241 * Initialize the mbuf and the packet header fields.
244 mbufphdr_ctor(void *obj
, void *private, int ocflags
)
246 struct mbuf
*m
= obj
;
250 m
->m_data
= m
->m_pktdat
;
251 m
->m_flags
= M_PKTHDR
| M_PHCACHE
;
253 m
->m_pkthdr
.rcvif
= NULL
; /* eliminate XXX JH */
254 SLIST_INIT(&m
->m_pkthdr
.tags
);
255 m
->m_pkthdr
.csum_flags
= 0; /* eliminate XXX JH */
256 m
->m_pkthdr
.fw_flags
= 0; /* eliminate XXX JH */
262 * A mbcluster object consists of 2K (MCLBYTES) cluster and a refcount.
265 mclmeta_ctor(void *obj
, void *private, int ocflags
)
267 struct mbcluster
*cl
= obj
;
270 if (ocflags
& M_NOWAIT
)
271 buf
= malloc(MCLBYTES
, M_MBUFCL
, M_NOWAIT
| M_ZERO
);
273 buf
= malloc(MCLBYTES
, M_MBUFCL
, M_INTWAIT
| M_ZERO
);
278 lwkt_serialize_init(&cl
->mcl_serializer
);
283 mclmeta_dtor(void *obj
, void *private)
285 struct mbcluster
*mcl
= obj
;
287 KKASSERT(mcl
->mcl_refs
== 0);
288 free(mcl
->mcl_data
, M_MBUFCL
);
292 linkcluster(struct mbuf
*m
, struct mbcluster
*cl
)
295 * Add the cluster to the mbuf. The caller will detect that the
296 * mbuf now has an attached cluster.
298 m
->m_ext
.ext_arg
= cl
;
299 m
->m_ext
.ext_buf
= cl
->mcl_data
;
300 m
->m_ext
.ext_ref
= m_mclref
;
301 m
->m_ext
.ext_free
= m_mclfree
;
302 m
->m_ext
.ext_size
= MCLBYTES
;
303 atomic_add_int(&cl
->mcl_refs
, 1);
305 m
->m_data
= m
->m_ext
.ext_buf
;
306 m
->m_flags
|= M_EXT
| M_EXT_CLUSTER
;
310 mbufphdrcluster_ctor(void *obj
, void *private, int ocflags
)
312 struct mbuf
*m
= obj
;
313 struct mbcluster
*cl
;
315 mbufphdr_ctor(obj
, private, ocflags
);
316 cl
= objcache_get(mclmeta_cache
, ocflags
);
319 m
->m_flags
|= M_CLCACHE
;
325 mbufcluster_ctor(void *obj
, void *private, int ocflags
)
327 struct mbuf
*m
= obj
;
328 struct mbcluster
*cl
;
330 mbuf_ctor(obj
, private, ocflags
);
331 cl
= objcache_get(mclmeta_cache
, ocflags
);
334 m
->m_flags
|= M_CLCACHE
;
340 * Used for both the cluster and cluster PHDR caches.
342 * The mbuf may have lost its cluster due to sharing, deal
343 * with the situation by checking M_EXT.
346 mbufcluster_dtor(void *obj
, void *private)
348 struct mbuf
*m
= obj
;
349 struct mbcluster
*mcl
;
351 if (m
->m_flags
& M_EXT
) {
352 KKASSERT((m
->m_flags
& M_EXT_CLUSTER
) != 0);
353 mcl
= m
->m_ext
.ext_arg
;
354 KKASSERT(mcl
->mcl_refs
== 1);
356 objcache_put(mclmeta_cache
, mcl
);
360 struct objcache_malloc_args mbuf_malloc_args
= { MSIZE
, M_MBUF
};
361 struct objcache_malloc_args mclmeta_malloc_args
=
362 { sizeof(struct mbcluster
), M_MCLMETA
};
368 mbstat
.m_msize
= MSIZE
;
369 mbstat
.m_mclbytes
= MCLBYTES
;
370 mbstat
.m_minclsize
= MINCLSIZE
;
371 mbstat
.m_mlen
= MLEN
;
372 mbstat
.m_mhlen
= MHLEN
;
374 mbuf_cache
= objcache_create("mbuf", nmbufs
, 0,
375 mbuf_ctor
, null_dtor
, NULL
,
376 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
377 mbufphdr_cache
= objcache_create("mbuf pkt hdr", nmbufs
, 64,
378 mbufphdr_ctor
, null_dtor
, NULL
,
379 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
380 mclmeta_cache
= objcache_create("cluster mbuf", nmbclusters
, 0,
381 mclmeta_ctor
, mclmeta_dtor
, NULL
,
382 objcache_malloc_alloc
, objcache_malloc_free
, &mclmeta_malloc_args
);
383 mbufcluster_cache
= objcache_create("mbuf + cluster", nmbclusters
, 0,
384 mbufcluster_ctor
, mbufcluster_dtor
, NULL
,
385 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
386 mbufphdrcluster_cache
= objcache_create("mbuf pkt hdr + cluster",
387 nmbclusters
, 64, mbufphdrcluster_ctor
, mbufcluster_dtor
, NULL
,
388 objcache_malloc_alloc
, objcache_malloc_free
, &mbuf_malloc_args
);
393 * Return the number of references to this mbuf's data. 0 is returned
394 * if the mbuf is not M_EXT, a reference count is returned if it is
395 * M_EXT | M_EXT_CLUSTER, and 99 is returned if it is a special M_EXT.
398 m_sharecount(struct mbuf
*m
)
400 switch (m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
)) {
405 case M_EXT
| M_EXT_CLUSTER
:
406 return (((struct mbcluster
*)m
->m_ext
.ext_arg
)->mcl_refs
);
409 return (0); /* to shut up compiler */
413 * change mbuf to new type
416 m_chtype(struct mbuf
*m
, int type
)
420 --mbtypes
[m
->m_type
];
432 SLIST_FOREACH(dp
, &domains
, dom_next
) {
433 for (pr
= dp
->dom_protosw
; pr
< dp
->dom_protoswNPROTOSW
; pr
++) {
443 updatestats(struct mbuf
*m
, int type
)
457 m_get(int how
, int type
)
461 int ocf
= MBTOM(how
);
465 m
= objcache_get(mbuf_cache
, ocf
);
468 if ((how
& MB_TRYWAIT
) && ntries
++ == 0) {
469 struct objcache
*reclaimlist
[] = {
471 mbufcluster_cache
, mbufphdrcluster_cache
473 const int nreclaims
= __arysize(reclaimlist
);
475 if (!objcache_reclaimlist(reclaimlist
, nreclaims
, ocf
))
482 updatestats(m
, type
);
487 m_gethdr(int how
, int type
)
490 int ocf
= MBTOM(how
);
495 m
= objcache_get(mbufphdr_cache
, ocf
);
498 if ((how
& MB_TRYWAIT
) && ntries
++ == 0) {
499 struct objcache
*reclaimlist
[] = {
501 mbufcluster_cache
, mbufphdrcluster_cache
503 const int nreclaims
= __arysize(reclaimlist
);
505 if (!objcache_reclaimlist(reclaimlist
, nreclaims
, ocf
))
512 updatestats(m
, type
);
517 * Get a mbuf (not a mbuf cluster!) and zero it.
521 m_getclr(int how
, int type
)
525 m
= m_get(how
, type
);
527 bzero(m
->m_data
, MLEN
);
532 * Returns an mbuf with an attached cluster.
533 * Because many network drivers use this kind of buffers a lot, it is
534 * convenient to keep a small pool of free buffers of this kind.
535 * Even a small size such as 10 gives about 10% improvement in the
536 * forwarding rate in a bridge or router.
539 m_getcl(int how
, short type
, int flags
)
542 int ocflags
= MBTOM(how
);
547 if (flags
& M_PKTHDR
)
548 m
= objcache_get(mbufphdrcluster_cache
, ocflags
);
550 m
= objcache_get(mbufcluster_cache
, ocflags
);
553 if ((how
& MB_TRYWAIT
) && ntries
++ == 0) {
554 struct objcache
*reclaimlist
[1];
556 if (flags
& M_PKTHDR
)
557 reclaimlist
[0] = mbufcluster_cache
;
559 reclaimlist
[0] = mbufphdrcluster_cache
;
560 if (!objcache_reclaimlist(reclaimlist
, 1, ocflags
))
577 * Allocate chain of requested length.
580 m_getc(int len
, int how
, int type
)
582 struct mbuf
*n
, *nfirst
= NULL
, **ntail
= &nfirst
;
586 n
= m_getl(len
, how
, type
, 0, &nsize
);
602 * Allocate len-worth of mbufs and/or mbuf clusters (whatever fits best)
603 * and return a pointer to the head of the allocated chain. If m0 is
604 * non-null, then we assume that it is a single mbuf or an mbuf chain to
605 * which we want len bytes worth of mbufs and/or clusters attached, and so
606 * if we succeed in allocating it, we will just return a pointer to m0.
608 * If we happen to fail at any point during the allocation, we will free
609 * up everything we have already allocated and return NULL.
611 * Deprecated. Use m_getc() and m_cat() instead.
614 m_getm(struct mbuf
*m0
, int len
, int type
, int how
)
618 nfirst
= m_getc(len
, how
, type
);
621 m_last(m0
)->m_next
= nfirst
;
629 * Adds a cluster to a normal mbuf, M_EXT is set on success.
630 * Deprecated. Use m_getcl() instead.
633 m_mclget(struct mbuf
*m
, int how
)
635 struct mbcluster
*mcl
;
637 KKASSERT((m
->m_flags
& M_EXT
) == 0);
638 mcl
= objcache_get(mclmeta_cache
, MBTOM(how
));
643 /* leave the m_mbufs count intact for original mbuf */
649 * Updates to mbcluster must be MPSAFE. Only an entity which already has
650 * a reference to the cluster can ref it, so we are in no danger of
651 * racing an add with a subtract. But the operation must still be atomic
652 * since multiple entities may have a reference on the cluster.
654 * m_mclfree() is almost the same but it must contend with two entities
655 * freeing the cluster at the same time. If there is only one reference
656 * count we are the only entity referencing the cluster and no further
657 * locking is required. Otherwise we must protect against a race to 0
658 * with the serializer.
663 struct mbcluster
*mcl
= arg
;
665 atomic_add_int(&mcl
->mcl_refs
, 1);
671 struct mbcluster
*mcl
= arg
;
673 if (mcl
->mcl_refs
== 1) {
675 objcache_put(mclmeta_cache
, mcl
);
677 lwkt_serialize_enter(&mcl
->mcl_serializer
);
678 if (mcl
->mcl_refs
> 1) {
679 atomic_subtract_int(&mcl
->mcl_refs
, 1);
680 lwkt_serialize_exit(&mcl
->mcl_serializer
);
682 lwkt_serialize_exit(&mcl
->mcl_serializer
);
683 KKASSERT(mcl
->mcl_refs
== 1);
685 objcache_put(mclmeta_cache
, mcl
);
690 extern void db_print_backtrace(void);
693 * Free a single mbuf and any associated external storage. The successor,
694 * if any, is returned.
696 * We do need to check non-first mbuf for m_aux, since some of existing
697 * code does not call M_PREPEND properly.
698 * (example: call to bpf_mtap from drivers)
701 m_free(struct mbuf
*m
)
705 KASSERT(m
->m_type
!= MT_FREE
, ("freeing free mbuf %p", m
));
706 --mbtypes
[m
->m_type
];
711 * Make sure the mbuf is in constructed state before returning it
716 KKASSERT(m
->m_nextpkt
== NULL
);
718 if (m
->m_nextpkt
!= NULL
) {
720 static int afewtimes
= 10;
722 if (afewtimes
-- > 0) {
723 printf("mfree: m->m_nextpkt != NULL\n");
724 db_print_backtrace();
730 if (m
->m_flags
& M_PKTHDR
) {
731 m_tag_delete_chain(m
); /* eliminate XXX JH */
734 m
->m_flags
&= (M_EXT
| M_EXT_CLUSTER
| M_CLCACHE
| M_PHCACHE
);
737 * Clean the M_PKTHDR state so we can return the mbuf to its original
738 * cache. This is based on the PHCACHE flag which tells us whether
739 * the mbuf was originally allocated out of a packet-header cache
740 * or a non-packet-header cache.
742 if (m
->m_flags
& M_PHCACHE
) {
743 m
->m_flags
|= M_PKTHDR
;
744 m
->m_pkthdr
.rcvif
= NULL
; /* eliminate XXX JH */
745 m
->m_pkthdr
.csum_flags
= 0; /* eliminate XXX JH */
746 m
->m_pkthdr
.fw_flags
= 0; /* eliminate XXX JH */
747 SLIST_INIT(&m
->m_pkthdr
.tags
);
751 * Handle remaining flags combinations. M_CLCACHE tells us whether
752 * the mbuf was originally allocated from a cluster cache or not,
753 * and is totally separate from whether the mbuf is currently
754 * associated with a cluster.
757 switch(m
->m_flags
& (M_CLCACHE
| M_EXT
| M_EXT_CLUSTER
)) {
758 case M_CLCACHE
| M_EXT
| M_EXT_CLUSTER
:
760 * mbuf+cluster cache case. The mbuf was allocated from the
761 * combined mbuf_cluster cache and can be returned to the
762 * cache if the cluster hasn't been shared.
764 if (m_sharecount(m
) == 1) {
766 * The cluster has not been shared, we can just
767 * reset the data pointer and return the mbuf
768 * to the cluster cache. Note that the reference
769 * count is left intact (it is still associated with
772 m
->m_data
= m
->m_ext
.ext_buf
;
773 if (m
->m_flags
& M_PHCACHE
)
774 objcache_put(mbufphdrcluster_cache
, m
);
776 objcache_put(mbufcluster_cache
, m
);
780 * Hell. Someone else has a ref on this cluster,
781 * we have to disconnect it which means we can't
782 * put it back into the mbufcluster_cache, we
783 * have to destroy the mbuf.
785 * Other mbuf references to the cluster will typically
786 * be M_EXT | M_EXT_CLUSTER but without M_CLCACHE.
788 * XXX we could try to connect another cluster to
791 m
->m_ext
.ext_free(m
->m_ext
.ext_arg
);
792 m
->m_flags
&= ~(M_EXT
| M_EXT_CLUSTER
);
793 if (m
->m_flags
& M_PHCACHE
)
794 objcache_dtor(mbufphdrcluster_cache
, m
);
796 objcache_dtor(mbufcluster_cache
, m
);
799 case M_EXT
| M_EXT_CLUSTER
:
801 * Normal cluster associated with an mbuf that was allocated
802 * from the normal mbuf pool rather then the cluster pool.
803 * The cluster has to be independantly disassociated from the
806 if (m_sharecount(m
) == 1)
811 * Normal cluster association case, disconnect the cluster from
812 * the mbuf. The cluster may or may not be custom.
814 m
->m_ext
.ext_free(m
->m_ext
.ext_arg
);
815 m
->m_flags
&= ~(M_EXT
| M_EXT_CLUSTER
);
819 * return the mbuf to the mbuf cache.
821 if (m
->m_flags
& M_PHCACHE
) {
822 m
->m_data
= m
->m_pktdat
;
823 objcache_put(mbufphdr_cache
, m
);
825 m
->m_data
= m
->m_dat
;
826 objcache_put(mbuf_cache
, m
);
832 panic("bad mbuf flags %p %08x\n", m
, m
->m_flags
);
840 m_freem(struct mbuf
*m
)
849 * mbuf utility routines
853 * Lesser-used path for M_PREPEND: allocate new mbuf to prepend to chain and
857 m_prepend(struct mbuf
*m
, int len
, int how
)
861 if (m
->m_flags
& M_PKTHDR
)
862 mn
= m_gethdr(how
, m
->m_type
);
864 mn
= m_get(how
, m
->m_type
);
869 if (m
->m_flags
& M_PKTHDR
)
870 M_MOVE_PKTHDR(mn
, m
);
880 * Make a copy of an mbuf chain starting "off0" bytes from the beginning,
881 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
882 * The wait parameter is a choice of MB_WAIT/MB_DONTWAIT from caller.
883 * Note that the copy is read-only, because clusters are not copied,
884 * only their reference counts are incremented.
887 m_copym(const struct mbuf
*m
, int off0
, int len
, int wait
)
889 struct mbuf
*n
, **np
;
894 KASSERT(off
>= 0, ("m_copym, negative off %d", off
));
895 KASSERT(len
>= 0, ("m_copym, negative len %d", len
));
896 if (off
== 0 && m
->m_flags
& M_PKTHDR
)
899 KASSERT(m
!= NULL
, ("m_copym, offset > size of mbuf chain"));
909 KASSERT(len
== M_COPYALL
,
910 ("m_copym, length > size of mbuf chain"));
914 * Because we are sharing any cluster attachment below,
915 * be sure to get an mbuf that does not have a cluster
916 * associated with it.
919 n
= m_gethdr(wait
, m
->m_type
);
921 n
= m_get(wait
, m
->m_type
);
926 if (!m_dup_pkthdr(n
, m
, wait
))
928 if (len
== M_COPYALL
)
929 n
->m_pkthdr
.len
-= off0
;
931 n
->m_pkthdr
.len
= len
;
934 n
->m_len
= min(len
, m
->m_len
- off
);
935 if (m
->m_flags
& M_EXT
) {
936 KKASSERT((n
->m_flags
& M_EXT
) == 0);
937 n
->m_data
= m
->m_data
+ off
;
938 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
940 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
942 bcopy(mtod(m
, caddr_t
)+off
, mtod(n
, caddr_t
),
945 if (len
!= M_COPYALL
)
961 * Copy an entire packet, including header (which must be present).
962 * An optimization of the common case `m_copym(m, 0, M_COPYALL, how)'.
963 * Note that the copy is read-only, because clusters are not copied,
964 * only their reference counts are incremented.
965 * Preserve alignment of the first mbuf so if the creator has left
966 * some room at the beginning (e.g. for inserting protocol headers)
967 * the copies also have the room available.
970 m_copypacket(struct mbuf
*m
, int how
)
972 struct mbuf
*top
, *n
, *o
;
974 n
= m_gethdr(how
, m
->m_type
);
979 if (!m_dup_pkthdr(n
, m
, how
))
982 if (m
->m_flags
& M_EXT
) {
983 KKASSERT((n
->m_flags
& M_EXT
) == 0);
984 n
->m_data
= m
->m_data
;
985 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
987 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
989 n
->m_data
= n
->m_pktdat
+ (m
->m_data
- m
->m_pktdat
);
990 bcopy(mtod(m
, char *), mtod(n
, char *), n
->m_len
);
995 o
= m_get(how
, m
->m_type
);
1002 n
->m_len
= m
->m_len
;
1003 if (m
->m_flags
& M_EXT
) {
1004 KKASSERT((n
->m_flags
& M_EXT
) == 0);
1005 n
->m_data
= m
->m_data
;
1006 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
1007 n
->m_ext
= m
->m_ext
;
1008 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
1010 bcopy(mtod(m
, char *), mtod(n
, char *), n
->m_len
);
1023 * Copy data from an mbuf chain starting "off" bytes from the beginning,
1024 * continuing for "len" bytes, into the indicated buffer.
1027 m_copydata(const struct mbuf
*m
, int off
, int len
, caddr_t cp
)
1031 KASSERT(off
>= 0, ("m_copydata, negative off %d", off
));
1032 KASSERT(len
>= 0, ("m_copydata, negative len %d", len
));
1034 KASSERT(m
!= NULL
, ("m_copydata, offset > size of mbuf chain"));
1041 KASSERT(m
!= NULL
, ("m_copydata, length > size of mbuf chain"));
1042 count
= min(m
->m_len
- off
, len
);
1043 bcopy(mtod(m
, caddr_t
) + off
, cp
, count
);
1052 * Copy a packet header mbuf chain into a completely new chain, including
1053 * copying any mbuf clusters. Use this instead of m_copypacket() when
1054 * you need a writable copy of an mbuf chain.
1057 m_dup(struct mbuf
*m
, int how
)
1059 struct mbuf
**p
, *top
= NULL
;
1060 int remain
, moff
, nsize
;
1065 KASSERT((m
->m_flags
& M_PKTHDR
) != 0, ("%s: !PKTHDR", __func__
));
1067 /* While there's more data, get a new mbuf, tack it on, and fill it */
1068 remain
= m
->m_pkthdr
.len
;
1071 while (remain
> 0 || top
== NULL
) { /* allow m->m_pkthdr.len == 0 */
1074 /* Get the next new mbuf */
1075 n
= m_getl(remain
, how
, m
->m_type
, top
== NULL
? M_PKTHDR
: 0,
1080 if (!m_dup_pkthdr(n
, m
, how
))
1083 /* Link it into the new chain */
1087 /* Copy data from original mbuf(s) into new mbuf */
1089 while (n
->m_len
< nsize
&& m
!= NULL
) {
1090 int chunk
= min(nsize
- n
->m_len
, m
->m_len
- moff
);
1092 bcopy(m
->m_data
+ moff
, n
->m_data
+ n
->m_len
, chunk
);
1096 if (moff
== m
->m_len
) {
1102 /* Check correct total mbuf length */
1103 KASSERT((remain
> 0 && m
!= NULL
) || (remain
== 0 && m
== NULL
),
1104 ("%s: bogus m_pkthdr.len", __func__
));
1116 * Concatenate mbuf chain n to m.
1117 * Both chains must be of the same type (e.g. MT_DATA).
1118 * Any m_pkthdr is not updated.
1121 m_cat(struct mbuf
*m
, struct mbuf
*n
)
1125 if (m
->m_flags
& M_EXT
||
1126 m
->m_data
+ m
->m_len
+ n
->m_len
>= &m
->m_dat
[MLEN
]) {
1127 /* just join the two chains */
1131 /* splat the data from one into the other */
1132 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
1134 m
->m_len
+= n
->m_len
;
1140 m_adj(struct mbuf
*mp
, int req_len
)
1146 if ((m
= mp
) == NULL
)
1152 while (m
!= NULL
&& len
> 0) {
1153 if (m
->m_len
<= len
) {
1164 if (mp
->m_flags
& M_PKTHDR
)
1165 m
->m_pkthdr
.len
-= (req_len
- len
);
1168 * Trim from tail. Scan the mbuf chain,
1169 * calculating its length and finding the last mbuf.
1170 * If the adjustment only affects this mbuf, then just
1171 * adjust and return. Otherwise, rescan and truncate
1172 * after the remaining size.
1178 if (m
->m_next
== (struct mbuf
*)0)
1182 if (m
->m_len
>= len
) {
1184 if (mp
->m_flags
& M_PKTHDR
)
1185 mp
->m_pkthdr
.len
-= len
;
1192 * Correct length for chain is "count".
1193 * Find the mbuf with last data, adjust its length,
1194 * and toss data from remaining mbufs on chain.
1197 if (m
->m_flags
& M_PKTHDR
)
1198 m
->m_pkthdr
.len
= count
;
1199 for (; m
; m
= m
->m_next
) {
1200 if (m
->m_len
>= count
) {
1207 (m
= m
->m_next
) ->m_len
= 0;
1212 * Rearrange an mbuf chain so that len bytes are contiguous
1213 * and in the data area of an mbuf (so that mtod will work for a structure
1214 * of size len). Returns the resulting mbuf chain on success, frees it and
1215 * returns null on failure. If there is room, it will add up to
1216 * max_protohdr-len extra bytes to the contiguous region in an attempt to
1217 * avoid being called next time.
1220 m_pullup(struct mbuf
*n
, int len
)
1227 * If first mbuf has no cluster, and has room for len bytes
1228 * without shifting current data, pullup into it,
1229 * otherwise allocate a new mbuf to prepend to the chain.
1231 if (!(n
->m_flags
& M_EXT
) &&
1232 n
->m_data
+ len
< &n
->m_dat
[MLEN
] &&
1234 if (n
->m_len
>= len
)
1242 if (n
->m_flags
& M_PKTHDR
)
1243 m
= m_gethdr(MB_DONTWAIT
, n
->m_type
);
1245 m
= m_get(MB_DONTWAIT
, n
->m_type
);
1249 if (n
->m_flags
& M_PKTHDR
)
1250 M_MOVE_PKTHDR(m
, n
);
1252 space
= &m
->m_dat
[MLEN
] - (m
->m_data
+ m
->m_len
);
1254 count
= min(min(max(len
, max_protohdr
), space
), n
->m_len
);
1255 bcopy(mtod(n
, caddr_t
), mtod(m
, caddr_t
) + m
->m_len
,
1265 } while (len
> 0 && n
);
1279 * Partition an mbuf chain in two pieces, returning the tail --
1280 * all but the first len0 bytes. In case of failure, it returns NULL and
1281 * attempts to restore the chain to its original state.
1283 * Note that the resulting mbufs might be read-only, because the new
1284 * mbuf can end up sharing an mbuf cluster with the original mbuf if
1285 * the "breaking point" happens to lie within a cluster mbuf. Use the
1286 * M_WRITABLE() macro to check for this case.
1289 m_split(struct mbuf
*m0
, int len0
, int wait
)
1292 unsigned len
= len0
, remain
;
1294 for (m
= m0
; m
&& len
> m
->m_len
; m
= m
->m_next
)
1298 remain
= m
->m_len
- len
;
1299 if (m0
->m_flags
& M_PKTHDR
) {
1300 n
= m_gethdr(wait
, m0
->m_type
);
1303 n
->m_pkthdr
.rcvif
= m0
->m_pkthdr
.rcvif
;
1304 n
->m_pkthdr
.len
= m0
->m_pkthdr
.len
- len0
;
1305 m0
->m_pkthdr
.len
= len0
;
1306 if (m
->m_flags
& M_EXT
)
1308 if (remain
> MHLEN
) {
1309 /* m can't be the lead packet */
1311 n
->m_next
= m_split(m
, len
, wait
);
1312 if (n
->m_next
== NULL
) {
1320 MH_ALIGN(n
, remain
);
1321 } else if (remain
== 0) {
1326 n
= m_get(wait
, m
->m_type
);
1332 if (m
->m_flags
& M_EXT
) {
1333 KKASSERT((n
->m_flags
& M_EXT
) == 0);
1334 n
->m_data
= m
->m_data
+ len
;
1335 m
->m_ext
.ext_ref(m
->m_ext
.ext_arg
);
1336 n
->m_ext
= m
->m_ext
;
1337 n
->m_flags
|= m
->m_flags
& (M_EXT
| M_EXT_CLUSTER
);
1339 bcopy(mtod(m
, caddr_t
) + len
, mtod(n
, caddr_t
), remain
);
1343 n
->m_next
= m
->m_next
;
1349 * Routine to copy from device local memory into mbufs.
1350 * Note: "offset" is ill-defined and always called as 0, so ignore it.
1353 m_devget(char *buf
, int len
, int offset
, struct ifnet
*ifp
,
1354 void (*copy
)(volatile const void *from
, volatile void *to
, size_t length
))
1356 struct mbuf
*m
, *mfirst
= NULL
, **mtail
;
1365 m
= m_getl(len
, MB_DONTWAIT
, MT_DATA
, flags
, &nsize
);
1370 m
->m_len
= min(len
, nsize
);
1372 if (flags
& M_PKTHDR
) {
1373 if (len
+ max_linkhdr
<= nsize
)
1374 m
->m_data
+= max_linkhdr
;
1375 m
->m_pkthdr
.rcvif
= ifp
;
1376 m
->m_pkthdr
.len
= len
;
1380 copy(buf
, m
->m_data
, (unsigned)m
->m_len
);
1391 * Copy data from a buffer back into the indicated mbuf chain,
1392 * starting "off" bytes from the beginning, extending the mbuf
1393 * chain if necessary.
1396 m_copyback(struct mbuf
*m0
, int off
, int len
, caddr_t cp
)
1399 struct mbuf
*m
= m0
, *n
;
1404 while (off
> (mlen
= m
->m_len
)) {
1407 if (m
->m_next
== NULL
) {
1408 n
= m_getclr(MB_DONTWAIT
, m
->m_type
);
1411 n
->m_len
= min(MLEN
, len
+ off
);
1417 mlen
= min (m
->m_len
- off
, len
);
1418 bcopy(cp
, off
+ mtod(m
, caddr_t
), (unsigned)mlen
);
1426 if (m
->m_next
== NULL
) {
1427 n
= m_get(MB_DONTWAIT
, m
->m_type
);
1430 n
->m_len
= min(MLEN
, len
);
1435 out
: if (((m
= m0
)->m_flags
& M_PKTHDR
) && (m
->m_pkthdr
.len
< totlen
))
1436 m
->m_pkthdr
.len
= totlen
;
1440 m_print(const struct mbuf
*m
)
1443 const struct mbuf
*m2
;
1445 len
= m
->m_pkthdr
.len
;
1448 printf("%p %*D\n", m2
, m2
->m_len
, (u_char
*)m2
->m_data
, "-");
1456 * "Move" mbuf pkthdr from "from" to "to".
1457 * "from" must have M_PKTHDR set, and "to" must be empty.
1460 m_move_pkthdr(struct mbuf
*to
, struct mbuf
*from
)
1462 KASSERT((to
->m_flags
& M_PKTHDR
), ("m_move_pkthdr: not packet header"));
1464 to
->m_flags
|= from
->m_flags
& M_COPYFLAGS
;
1465 to
->m_pkthdr
= from
->m_pkthdr
; /* especially tags */
1466 SLIST_INIT(&from
->m_pkthdr
.tags
); /* purge tags from src */
1470 * Duplicate "from"'s mbuf pkthdr in "to".
1471 * "from" must have M_PKTHDR set, and "to" must be empty.
1472 * In particular, this does a deep copy of the packet tags.
1475 m_dup_pkthdr(struct mbuf
*to
, const struct mbuf
*from
, int how
)
1477 KASSERT((to
->m_flags
& M_PKTHDR
), ("m_dup_pkthdr: not packet header"));
1479 to
->m_flags
= (from
->m_flags
& M_COPYFLAGS
) |
1480 (to
->m_flags
& ~M_COPYFLAGS
);
1481 to
->m_pkthdr
= from
->m_pkthdr
;
1482 SLIST_INIT(&to
->m_pkthdr
.tags
);
1483 return (m_tag_copy_chain(to
, from
, how
));
1487 * Defragment a mbuf chain, returning the shortest possible
1488 * chain of mbufs and clusters. If allocation fails and
1489 * this cannot be completed, NULL will be returned, but
1490 * the passed in chain will be unchanged. Upon success,
1491 * the original chain will be freed, and the new chain
1494 * If a non-packet header is passed in, the original
1495 * mbuf (chain?) will be returned unharmed.
1497 * m_defrag_nofree doesn't free the passed in mbuf.
1500 m_defrag(struct mbuf
*m0
, int how
)
1504 if ((m_new
= m_defrag_nofree(m0
, how
)) == NULL
)
1512 m_defrag_nofree(struct mbuf
*m0
, int how
)
1514 struct mbuf
*m_new
= NULL
, *m_final
= NULL
;
1515 int progress
= 0, length
, nsize
;
1517 if (!(m0
->m_flags
& M_PKTHDR
))
1520 #ifdef MBUF_STRESS_TEST
1521 if (m_defragrandomfailures
) {
1522 int temp
= arc4random() & 0xff;
1528 m_final
= m_getl(m0
->m_pkthdr
.len
, how
, MT_DATA
, M_PKTHDR
, &nsize
);
1529 if (m_final
== NULL
)
1531 m_final
->m_len
= 0; /* in case m0->m_pkthdr.len is zero */
1533 if (m_dup_pkthdr(m_final
, m0
, how
) == NULL
)
1538 while (progress
< m0
->m_pkthdr
.len
) {
1539 length
= m0
->m_pkthdr
.len
- progress
;
1540 if (length
> MCLBYTES
)
1543 if (m_new
== NULL
) {
1544 m_new
= m_getl(length
, how
, MT_DATA
, 0, &nsize
);
1549 m_copydata(m0
, progress
, length
, mtod(m_new
, caddr_t
));
1551 m_new
->m_len
= length
;
1552 if (m_new
!= m_final
)
1553 m_cat(m_final
, m_new
);
1556 if (m0
->m_next
== NULL
)
1559 m_defragbytes
+= m_final
->m_pkthdr
.len
;
1570 * Move data from uio into mbufs.
1573 m_uiomove(struct uio
*uio
)
1575 struct mbuf
*m
; /* current working mbuf */
1576 struct mbuf
*head
= NULL
; /* result mbuf chain */
1577 struct mbuf
**mp
= &head
;
1578 int resid
= uio
->uio_resid
, nsize
, flags
= M_PKTHDR
, error
;
1581 m
= m_getl(resid
, MB_WAIT
, MT_DATA
, flags
, &nsize
);
1583 m
->m_pkthdr
.len
= 0;
1584 /* Leave room for protocol headers. */
1589 m
->m_len
= min(nsize
, resid
);
1590 error
= uiomove(mtod(m
, caddr_t
), m
->m_len
, uio
);
1597 head
->m_pkthdr
.len
+= m
->m_len
;
1599 } while (resid
> 0);
1609 m_last(struct mbuf
*m
)
1617 * Return the number of bytes in an mbuf chain.
1618 * If lastm is not NULL, also return the last mbuf.
1621 m_lengthm(struct mbuf
*m
, struct mbuf
**lastm
)
1624 struct mbuf
*prev
= m
;
1637 * Like m_lengthm(), except also keep track of mbuf usage.
1640 m_countm(struct mbuf
*m
, struct mbuf
**lastm
, u_int
*pmbcnt
)
1642 u_int len
= 0, mbcnt
= 0;
1643 struct mbuf
*prev
= m
;
1648 if (m
->m_flags
& M_EXT
)
1649 mbcnt
+= m
->m_ext
.ext_size
;