2 * Copyright (c) 2007 Seccuris Inc.
5 * This software was developed by Robert N. M. Watson under contract to
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
37 #include <sys/malloc.h>
39 #include <sys/mutex.h>
41 #include <sys/sf_buf.h>
42 #include <sys/socket.h>
45 #include <machine/atomic.h>
49 #include <net/bpf_zerocopy.h>
50 #include <net/bpfdesc.h>
53 #include <vm/vm_param.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
60 * Zero-copy buffer scheme for BPF: user space "donates" two buffers, which
61 * are mapped into the kernel address space using sf_bufs and used directly
62 * by BPF. Memory is wired since page faults cannot be tolerated in the
63 * contexts where the buffers are copied to (locks held, interrupt context,
64 * etc). Access to shared memory buffers is synchronized using a header on
65 * each buffer, allowing the number of system calls to go to zero as BPF
66 * reaches saturation (buffers filled as fast as they can be drained by the
67 * user process). Full details of the protocol for communicating between the
68 * user process and BPF may be found in bpf(4).
72 * Maximum number of pages per buffer. Since all BPF devices use two, the
73 * maximum per device is 2*BPF_MAX_PAGES. Resource limits on the number of
74 * sf_bufs may be an issue, so do not set this too high. On older systems,
75 * kernel address space limits may also be an issue.
77 #define BPF_MAX_PAGES 512
80 * struct zbuf describes a memory buffer loaned by a user process to the
81 * kernel. We represent this as a series of pages managed using an array of
82 * sf_bufs. Even though the memory is contiguous in user space, it may not
83 * be mapped contiguously in the kernel (i.e., a set of physically
84 * non-contiguous pages in the direct map region) so we must implement
85 * scatter-gather copying. One significant mitigating factor is that on
86 * systems with a direct memory map, we can avoid TLB misses.
88 * At the front of the shared memory region is a bpf_zbuf_header, which
89 * contains shared control data to allow user space and the kernel to
90 * synchronize; this is included in zb_size, but not bpf_bufsize, so that BPF
91 * knows that the space is not available.
94 vm_offset_t zb_uaddr
; /* User address at time of setup. */
95 size_t zb_size
; /* Size of buffer, incl. header. */
96 u_int zb_numpages
; /* Number of pages. */
97 int zb_flags
; /* Flags on zbuf. */
98 struct sf_buf
**zb_pages
; /* Pages themselves. */
99 struct bpf_zbuf_header
*zb_header
; /* Shared header. */
103 * When a buffer has been assigned to userspace, flag it as such, as the
104 * buffer may remain in the store position as a result of the user process
105 * not yet having acknowledged the buffer in the hold position yet.
107 #define ZBUF_FLAG_ASSIGNED 0x00000001 /* Set when owned by user. */
110 * Release a page we've previously wired.
113 zbuf_page_free(vm_page_t pp
)
117 vm_page_unwire(pp
, PQ_INACTIVE
);
118 if (pp
->wire_count
== 0 && pp
->object
== NULL
)
124 * Free an sf_buf with attached page.
127 zbuf_sfbuf_free(struct sf_buf
*sf
)
131 pp
= sf_buf_page(sf
);
137 * Free a zbuf, including its page array, sbufs, and pages. Allow partially
138 * allocated zbufs to be freed so that it may be used even during a zbuf
142 zbuf_free(struct zbuf
*zb
)
146 for (i
= 0; i
< zb
->zb_numpages
; i
++) {
147 if (zb
->zb_pages
[i
] != NULL
)
148 zbuf_sfbuf_free(zb
->zb_pages
[i
]);
150 free(zb
->zb_pages
, M_BPF
);
155 * Given a user pointer to a page of user memory, return an sf_buf for the
156 * page. Because we may be requesting quite a few sf_bufs, prefer failure to
157 * deadlock and use SFB_NOWAIT.
159 static struct sf_buf
*
160 zbuf_sfbuf_get(struct vm_map
*map
, vm_offset_t uaddr
)
165 if (vm_fault_quick_hold_pages(map
, uaddr
, PAGE_SIZE
, VM_PROT_READ
|
166 VM_PROT_WRITE
, &pp
, 1) < 0)
172 sf
= sf_buf_alloc(pp
, SFB_NOWAIT
);
181 * Create a zbuf describing a range of user address space memory. Validate
182 * page alignment, size requirements, etc.
185 zbuf_setup(struct thread
*td
, vm_offset_t uaddr
, size_t len
,
195 * User address must be page-aligned.
197 if (uaddr
& PAGE_MASK
)
201 * Length must be an integer number of full pages.
207 * Length must not exceed per-buffer resource limit.
209 if ((len
/ PAGE_SIZE
) > BPF_MAX_PAGES
)
213 * Allocate the buffer and set up each page with is own sf_buf.
216 zb
= malloc(sizeof(*zb
), M_BPF
, M_ZERO
| M_WAITOK
);
217 zb
->zb_uaddr
= uaddr
;
219 zb
->zb_numpages
= len
/ PAGE_SIZE
;
220 zb
->zb_pages
= malloc(sizeof(struct sf_buf
*) *
221 zb
->zb_numpages
, M_BPF
, M_ZERO
| M_WAITOK
);
222 map
= &td
->td_proc
->p_vmspace
->vm_map
;
223 for (i
= 0; i
< zb
->zb_numpages
; i
++) {
224 zb
->zb_pages
[i
] = zbuf_sfbuf_get(map
,
225 uaddr
+ (i
* PAGE_SIZE
));
226 if (zb
->zb_pages
[i
] == NULL
) {
232 (struct bpf_zbuf_header
*)sf_buf_kva(zb
->zb_pages
[0]);
233 bzero(zb
->zb_header
, sizeof(*zb
->zb_header
));
243 * Copy bytes from a source into the specified zbuf. The caller is
244 * responsible for performing bounds checking, etc.
247 bpf_zerocopy_append_bytes(struct bpf_d
*d
, caddr_t buf
, u_int offset
,
248 void *src
, u_int len
)
250 u_int count
, page
, poffset
;
254 KASSERT(d
->bd_bufmode
== BPF_BUFMODE_ZBUF
,
255 ("bpf_zerocopy_append_bytes: not in zbuf mode"));
256 KASSERT(buf
!= NULL
, ("bpf_zerocopy_append_bytes: NULL buf"));
258 src_bytes
= (u_char
*)src
;
259 zb
= (struct zbuf
*)buf
;
261 KASSERT((zb
->zb_flags
& ZBUF_FLAG_ASSIGNED
) == 0,
262 ("bpf_zerocopy_append_bytes: ZBUF_FLAG_ASSIGNED"));
265 * Scatter-gather copy to user pages mapped into kernel address space
266 * using sf_bufs: copy up to a page at a time.
268 offset
+= sizeof(struct bpf_zbuf_header
);
269 page
= offset
/ PAGE_SIZE
;
270 poffset
= offset
% PAGE_SIZE
;
272 KASSERT(page
< zb
->zb_numpages
, ("bpf_zerocopy_append_bytes:"
273 " page overflow (%d p %d np)\n", page
, zb
->zb_numpages
));
275 count
= min(len
, PAGE_SIZE
- poffset
);
276 bcopy(src_bytes
, ((u_char
*)sf_buf_kva(zb
->zb_pages
[page
])) +
279 if (poffset
== PAGE_SIZE
) {
283 KASSERT(poffset
< PAGE_SIZE
,
284 ("bpf_zerocopy_append_bytes: page offset overflow (%d)",
292 * Copy bytes from an mbuf chain to the specified zbuf: copying will be
293 * scatter-gather both from mbufs, which may be fragmented over memory, and
294 * to pages, which may not be contiguously mapped in kernel address space.
295 * As with bpf_zerocopy_append_bytes(), the caller is responsible for
296 * checking that this will not exceed the buffer limit.
299 bpf_zerocopy_append_mbuf(struct bpf_d
*d
, caddr_t buf
, u_int offset
,
300 void *src
, u_int len
)
302 u_int count
, moffset
, page
, poffset
;
303 const struct mbuf
*m
;
306 KASSERT(d
->bd_bufmode
== BPF_BUFMODE_ZBUF
,
307 ("bpf_zerocopy_append_mbuf not in zbuf mode"));
308 KASSERT(buf
!= NULL
, ("bpf_zerocopy_append_mbuf: NULL buf"));
310 m
= (struct mbuf
*)src
;
311 zb
= (struct zbuf
*)buf
;
313 KASSERT((zb
->zb_flags
& ZBUF_FLAG_ASSIGNED
) == 0,
314 ("bpf_zerocopy_append_mbuf: ZBUF_FLAG_ASSIGNED"));
317 * Scatter gather both from an mbuf chain and to a user page set
318 * mapped into kernel address space using sf_bufs. If we're lucky,
319 * each mbuf requires one copy operation, but if page alignment and
320 * mbuf alignment work out less well, we'll be doing two copies per
323 offset
+= sizeof(struct bpf_zbuf_header
);
324 page
= offset
/ PAGE_SIZE
;
325 poffset
= offset
% PAGE_SIZE
;
328 KASSERT(page
< zb
->zb_numpages
,
329 ("bpf_zerocopy_append_mbuf: page overflow (%d p %d "
330 "np)\n", page
, zb
->zb_numpages
));
332 ("bpf_zerocopy_append_mbuf: end of mbuf chain"));
334 count
= min(m
->m_len
- moffset
, len
);
335 count
= min(count
, PAGE_SIZE
- poffset
);
336 bcopy(mtod(m
, u_char
*) + moffset
,
337 ((u_char
*)sf_buf_kva(zb
->zb_pages
[page
])) + poffset
,
340 if (poffset
== PAGE_SIZE
) {
344 KASSERT(poffset
< PAGE_SIZE
,
345 ("bpf_zerocopy_append_mbuf: page offset overflow (%d)",
348 if (moffset
== m
->m_len
) {
357 * Notification from the BPF framework that a buffer in the store position is
358 * rejecting packets and may be considered full. We mark the buffer as
359 * immutable and assign to userspace so that it is immediately available for
360 * the user process to access.
363 bpf_zerocopy_buffull(struct bpf_d
*d
)
367 KASSERT(d
->bd_bufmode
== BPF_BUFMODE_ZBUF
,
368 ("bpf_zerocopy_buffull: not in zbuf mode"));
370 zb
= (struct zbuf
*)d
->bd_sbuf
;
371 KASSERT(zb
!= NULL
, ("bpf_zerocopy_buffull: zb == NULL"));
373 if ((zb
->zb_flags
& ZBUF_FLAG_ASSIGNED
) == 0) {
374 zb
->zb_flags
|= ZBUF_FLAG_ASSIGNED
;
375 zb
->zb_header
->bzh_kernel_len
= d
->bd_slen
;
376 atomic_add_rel_int(&zb
->zb_header
->bzh_kernel_gen
, 1);
381 * Notification from the BPF framework that a buffer has moved into the held
382 * slot on a descriptor. Zero-copy BPF will update the shared page to let
383 * the user process know and flag the buffer as assigned if it hasn't already
384 * been marked assigned due to filling while it was in the store position.
386 * Note: identical logic as in bpf_zerocopy_buffull(), except that we operate
387 * on bd_hbuf and bd_hlen.
390 bpf_zerocopy_bufheld(struct bpf_d
*d
)
394 KASSERT(d
->bd_bufmode
== BPF_BUFMODE_ZBUF
,
395 ("bpf_zerocopy_bufheld: not in zbuf mode"));
397 zb
= (struct zbuf
*)d
->bd_hbuf
;
398 KASSERT(zb
!= NULL
, ("bpf_zerocopy_bufheld: zb == NULL"));
400 if ((zb
->zb_flags
& ZBUF_FLAG_ASSIGNED
) == 0) {
401 zb
->zb_flags
|= ZBUF_FLAG_ASSIGNED
;
402 zb
->zb_header
->bzh_kernel_len
= d
->bd_hlen
;
403 atomic_add_rel_int(&zb
->zb_header
->bzh_kernel_gen
, 1);
408 * Notification from the BPF framework that the free buffer has been been
409 * rotated out of the held position to the free position. This happens when
410 * the user acknowledges the held buffer.
413 bpf_zerocopy_buf_reclaimed(struct bpf_d
*d
)
417 KASSERT(d
->bd_bufmode
== BPF_BUFMODE_ZBUF
,
418 ("bpf_zerocopy_reclaim_buf: not in zbuf mode"));
420 KASSERT(d
->bd_fbuf
!= NULL
,
421 ("bpf_zerocopy_buf_reclaimed: NULL free buf"));
422 zb
= (struct zbuf
*)d
->bd_fbuf
;
423 zb
->zb_flags
&= ~ZBUF_FLAG_ASSIGNED
;
427 * Query from the BPF framework regarding whether the buffer currently in the
428 * held position can be moved to the free position, which can be indicated by
429 * the user process making their generation number equal to the kernel
433 bpf_zerocopy_canfreebuf(struct bpf_d
*d
)
437 KASSERT(d
->bd_bufmode
== BPF_BUFMODE_ZBUF
,
438 ("bpf_zerocopy_canfreebuf: not in zbuf mode"));
440 zb
= (struct zbuf
*)d
->bd_hbuf
;
443 if (zb
->zb_header
->bzh_kernel_gen
==
444 atomic_load_acq_int(&zb
->zb_header
->bzh_user_gen
))
450 * Query from the BPF framework as to whether or not the buffer current in
451 * the store position can actually be written to. This may return false if
452 * the store buffer is assigned to userspace before the hold buffer is
456 bpf_zerocopy_canwritebuf(struct bpf_d
*d
)
460 KASSERT(d
->bd_bufmode
== BPF_BUFMODE_ZBUF
,
461 ("bpf_zerocopy_canwritebuf: not in zbuf mode"));
463 zb
= (struct zbuf
*)d
->bd_sbuf
;
464 KASSERT(zb
!= NULL
, ("bpf_zerocopy_canwritebuf: bd_sbuf NULL"));
466 if (zb
->zb_flags
& ZBUF_FLAG_ASSIGNED
)
472 * Free zero copy buffers at request of descriptor.
475 bpf_zerocopy_free(struct bpf_d
*d
)
479 KASSERT(d
->bd_bufmode
== BPF_BUFMODE_ZBUF
,
480 ("bpf_zerocopy_free: not in zbuf mode"));
482 zb
= (struct zbuf
*)d
->bd_sbuf
;
485 zb
= (struct zbuf
*)d
->bd_hbuf
;
488 zb
= (struct zbuf
*)d
->bd_fbuf
;
494 * Ioctl to return the maximum buffer size.
497 bpf_zerocopy_ioctl_getzmax(struct thread
*td
, struct bpf_d
*d
, size_t *i
)
500 KASSERT(d
->bd_bufmode
== BPF_BUFMODE_ZBUF
,
501 ("bpf_zerocopy_ioctl_getzmax: not in zbuf mode"));
503 *i
= BPF_MAX_PAGES
* PAGE_SIZE
;
508 * Ioctl to force rotation of the two buffers, if there's any data available.
509 * This can be used by user space to implement timeouts when waiting for a
513 bpf_zerocopy_ioctl_rotzbuf(struct thread
*td
, struct bpf_d
*d
,
518 bzero(bz
, sizeof(*bz
));
520 if (d
->bd_hbuf
== NULL
&& d
->bd_slen
!= 0) {
522 bzh
= (struct zbuf
*)d
->bd_hbuf
;
523 bz
->bz_bufa
= (void *)bzh
->zb_uaddr
;
524 bz
->bz_buflen
= d
->bd_hlen
;
531 * Ioctl to configure zero-copy buffers -- may be done only once.
534 bpf_zerocopy_ioctl_setzbuf(struct thread
*td
, struct bpf_d
*d
,
537 struct zbuf
*zba
, *zbb
;
540 KASSERT(d
->bd_bufmode
== BPF_BUFMODE_ZBUF
,
541 ("bpf_zerocopy_ioctl_setzbuf: not in zbuf mode"));
544 * Must set both buffers. Cannot clear them.
546 if (bz
->bz_bufa
== NULL
|| bz
->bz_bufb
== NULL
)
550 * Buffers must have a size greater than 0. Alignment and other size
551 * validity checking is done in zbuf_setup().
553 if (bz
->bz_buflen
== 0)
557 * Allocate new buffers.
559 error
= zbuf_setup(td
, (vm_offset_t
)bz
->bz_bufa
, bz
->bz_buflen
,
563 error
= zbuf_setup(td
, (vm_offset_t
)bz
->bz_bufb
, bz
->bz_buflen
,
571 * We only allow buffers to be installed once, so atomically check
572 * that no buffers are currently installed and install new buffers.
575 if (d
->bd_hbuf
!= NULL
|| d
->bd_sbuf
!= NULL
|| d
->bd_fbuf
!= NULL
||
584 * Point BPF descriptor at buffers; initialize sbuf as zba so that
585 * it is always filled first in the sequence, per bpf(4).
587 d
->bd_fbuf
= (caddr_t
)zbb
;
588 d
->bd_sbuf
= (caddr_t
)zba
;
593 * We expose only the space left in the buffer after the size of the
594 * shared management region.
596 d
->bd_bufsize
= bz
->bz_buflen
- sizeof(struct bpf_zbuf_header
);