2 * Copyright (C) 2013 Universita` di Pisa. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 #include <sys/types.h>
27 #include <sys/module.h>
28 #include <sys/errno.h>
29 #include <sys/param.h> /* defines used in kernel.h */
30 #include <sys/kernel.h> /* types used in module initialization */
31 #include <sys/conf.h> /* DEV_MODULE */
33 #include <sys/devfs.h>
35 #include <vm/vm.h> /* vtophys */
36 #include <vm/pmap.h> /* vtophys */
37 #include <vm/vm_param.h>
38 #include <vm/vm_object.h>
39 #include <vm/vm_page.h>
40 #include <vm/vm_page2.h>
41 #include <vm/vm_pager.h>
44 #include <sys/malloc.h>
45 #include <sys/socket.h> /* sockaddrs */
46 #include <sys/event.h>
48 #include <net/if_var.h>
49 #include <net/ifq_var.h>
50 #include <sys/bus.h> /* bus_dmamap_* */
52 #include <net/netmap.h>
53 #include <net/netmap/netmap_kern.h>
54 #include <net/netmap/netmap_mem2.h>
57 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */
60 * Intercept the rx routine in the standard device driver.
61 * Second argument is non-zero to intercept, 0 to restore
64 netmap_catch_rx(struct netmap_adapter
*na
, int intercept
)
66 struct netmap_generic_adapter
*gna
= (struct netmap_generic_adapter
*)na
;
67 struct ifnet
*ifp
= na
->ifp
;
70 if (gna
->save_if_input
) {
71 D("cannot intercept again");
72 return EINVAL
; /* already set */
74 gna
->save_if_input
= ifp
->if_input
;
75 ifp
->if_input
= generic_rx_handler
;
77 if (!gna
->save_if_input
){
79 return EINVAL
; /* not saved */
81 ifp
->if_input
= gna
->save_if_input
;
82 gna
->save_if_input
= NULL
;
89 * Intercept the packet steering routine in the tx path,
90 * so that we can decide which queue is used for an mbuf.
91 * Second argument is non-zero to intercept, 0 to restore.
93 * XXX see if FreeBSD has such a mechanism
96 netmap_catch_packet_steering(struct netmap_generic_adapter
*na
, int enable
)
103 /* Transmit routine used by generic_netmap_txsync(). Returns 0 on success
104 * and non-zero on error (which may be packet drops or other errors).
105 * addr and len identify the netmap buffer, m is the (preallocated)
106 * mbuf to use for transmissions.
108 * We should add a reference to the mbuf so the m_freem() at the end
109 * of the transmission does not consume resources.
111 * On FreeBSD, and on multiqueue cards, we can force the queue using
112 * if ((m->m_flags & M_FLOWID) != 0)
113 * i = m->m_pkthdr.flowid % adapter->num_queues;
115 * i = curcpu % adapter->num_queues;
119 generic_xmit_frame(struct ifnet
*ifp
, struct mbuf
*m
,
120 void *addr
, u_int len
, u_int ring_nr
)
124 m
->m_len
= m
->m_pkthdr
.len
= 0;
126 // copy data to the mbuf
127 m_copyback(m
, 0, len
, addr
);
130 // inc refcount. We are alone, so we can skip the atomic
131 atomic_fetchadd_int(m
->m_ext
.ref_cnt
, 1);
132 m
->m_flags
|= M_FLOWID
;
134 m
->m_pkthdr
.hash
= ring_nr
; /* XXX probably not accurate */
135 m
->m_pkthdr
.rcvif
= ifp
; /* used for tx notification */
136 ret
= ifq_dispatch(ifp
, m
, NULL
);
141 * The following two functions are empty until we have a generic
142 * way to extract the info from the ifp
145 generic_find_num_desc(struct ifnet
*ifp
, unsigned int *tx
, unsigned int *rx
)
152 generic_find_num_queues(struct ifnet
*ifp
, u_int
*txq
, u_int
*rxq
)
159 void netmap_mitigation_init(struct netmap_generic_adapter
*na
)
166 void netmap_mitigation_start(struct netmap_generic_adapter
*na
)
171 void netmap_mitigation_restart(struct netmap_generic_adapter
*na
)
176 int netmap_mitigation_active(struct netmap_generic_adapter
*na
)
182 void netmap_mitigation_cleanup(struct netmap_generic_adapter
*na
)
189 * In order to track whether pages are still mapped, we hook into
190 * the standard cdev_pager and intercept the constructor and
194 struct netmap_vm_handle_t
{
196 struct netmap_priv_d
*priv
;
200 netmap_dev_pager_ctor(void *handle
, vm_ooffset_t size
, vm_prot_t prot
,
201 vm_ooffset_t foff
, struct ucred
*cred
, u_short
*color
)
203 struct netmap_vm_handle_t
*vmh
= handle
;
205 D("handle %p size %jd prot %d foff %jd",
206 handle
, (intmax_t)size
, prot
, (intmax_t)foff
);
215 netmap_dev_pager_dtor(void *handle
)
217 struct netmap_vm_handle_t
*vmh
= handle
;
218 struct cdev
*dev
= vmh
->dev
;
219 struct netmap_priv_d
*priv
= vmh
->priv
;
221 D("handle %p", handle
);
223 kfree(vmh
, M_DEVBUF
);
229 MALLOC_DEFINE(M_FICT_PAGES
, "", "");
231 static inline vm_page_t
232 vm_page_getfake(vm_paddr_t paddr
, vm_memattr_t memattr
)
236 m
= kmalloc(sizeof(struct vm_page
), M_FICT_PAGES
,
238 vm_page_initfake(m
, paddr
, memattr
);
243 vm_page_updatefake(vm_page_t m
, vm_paddr_t paddr
, vm_memattr_t memattr
)
245 KASSERT((m
->flags
& PG_FICTITIOUS
) != 0,
246 ("vm_page_updatefake: bad page %p", m
));
247 m
->phys_addr
= paddr
;
248 pmap_page_set_memattr(m
, memattr
);
252 netmap_dev_pager_fault(vm_object_t object
, vm_ooffset_t offset
,
253 int prot
, vm_page_t
*mres
)
255 struct netmap_vm_handle_t
*vmh
= object
->handle
;
256 struct netmap_priv_d
*priv
= vmh
->priv
;
259 vm_memattr_t memattr
;
262 ND("object %p offset %jd prot %d mres %p",
263 object
, (intmax_t)offset
, prot
, mres
);
264 memattr
= object
->memattr
;
265 pidx
= OFF_TO_IDX(offset
);
266 paddr
= netmap_mem_ofstophys(priv
->np_mref
, offset
);
268 return VM_PAGER_FAIL
;
270 if (((*mres
)->flags
& PG_FICTITIOUS
) != 0) {
272 * If the passed in result page is a fake page, update it with
273 * the new physical address.
276 vm_page_updatefake(page
, paddr
, memattr
);
279 * Replace the passed in reqpage page with our own fake page and
280 * free up the all of the original pages.
282 #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */
283 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK
284 #define VM_OBJECT_WLOCK VM_OBJECT_LOCK
285 #endif /* VM_OBJECT_WUNLOCK */
287 VM_OBJECT_WUNLOCK(object
);
288 page
= vm_page_getfake(paddr
, memattr
);
289 VM_OBJECT_WLOCK(object
);
292 vm_page_insert(page
, object
, pidx
);
294 page
->valid
= VM_PAGE_BITS_ALL
;
295 return (VM_PAGER_OK
);
299 static struct cdev_pager_ops netmap_cdev_pager_ops
= {
300 .cdev_pg_ctor
= netmap_dev_pager_ctor
,
301 .cdev_pg_dtor
= netmap_dev_pager_dtor
,
302 .cdev_pg_fault
= netmap_dev_pager_fault
,
307 netmap_mmap_single(struct dev_mmap_single_args
*ap
)
310 struct cdev
*cdev
= ap
->a_head
.a_dev
;
311 vm_ooffset_t
*foff
= ap
->a_offset
;
312 vm_object_t
*objp
= ap
->a_object
;
313 vm_size_t objsize
= ap
->a_size
;
314 struct netmap_vm_handle_t
*vmh
;
315 struct netmap_priv_d
*priv
;
316 int prot
= ap
->a_nprot
;
319 D("cdev %p foff %jd size %jd objp %p prot %d", cdev
,
320 (intmax_t )*foff
, (intmax_t )objsize
, objp
, prot
);
322 vmh
= kmalloc(sizeof(struct netmap_vm_handle_t
), M_DEVBUF
,
329 error
= devfs_get_cdevpriv(ap
->a_fp
, (void**)&priv
);
336 error
= netmap_get_memory(priv
);
340 obj
= cdev_pager_allocate(vmh
, OBJT_DEVICE
,
341 &netmap_cdev_pager_ops
, objsize
, prot
,
344 D("cdev_pager_allocate failed");
358 kfree(vmh
, M_DEVBUF
);
363 // XXX can we remove this ?
365 netmap_close(struct dev_close_args
*ap
)
368 D("dev %p fflag 0x%x devtype %d",
369 ap
->a_head
.a_dev
, ap
->a_fflag
, ap
->a_devtype
);
375 netmap_open(struct dev_open_args
*ap
)
377 struct netmap_priv_d
*priv
;
380 // XXX wait or nowait ?
381 priv
= kmalloc(sizeof(struct netmap_priv_d
), M_DEVBUF
,
386 error
= devfs_set_cdevpriv(ap
->a_fp
, priv
, netmap_dtor
);
390 priv
->np_refcount
= 1;
396 struct dev_ops netmap_cdevsw
= {
398 .d_open
= netmap_open
,
399 .d_mmap_single
= netmap_mmap_single
,
400 .d_ioctl
= netmap_ioctl
,
401 .d_kqfilter
= netmap_kqfilter
,
402 .d_close
= netmap_close
,
407 * Kernel entry point.
409 * Initialize/finalize the module and return.
411 * Return 0 on success, errno on failure.
414 netmap_loader(__unused
struct module
*module
, int event
, __unused
void *arg
)
420 error
= netmap_init();
436 DEV_MODULE(netmap
, netmap_loader
, NULL
);