2 * Copyright (C) 2011-2013 Matteo Landi, Luigi Rizzo. All rights reserved.
3 * Copyright (C) 2013 Universita` di Pisa. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * $FreeBSD: head/sys/dev/netmap/netmap_kern.h 238985 2012-08-02 11:59:43Z luigi $
30 * The header contains the definitions of constants and function
31 * prototypes used only in kernelspace.
34 #ifndef _NET_NETMAP_KERN_H_
35 #define _NET_NETMAP_KERN_H_
37 #define WITH_VALE // comment out to disable VALE support
39 #define likely(x) __builtin_expect((long)!!(x), 1L)
40 #define unlikely(x) __builtin_expect((long)!!(x), 0L)
42 #define NM_LOCK_T struct lock
43 #define NMG_LOCK_T struct lock
44 #define NMG_LOCK_INIT() lockinit(&netmap_global_lock, \
45 "netmap global lock", 0, LK_CANRECURSE)
46 #define NMG_LOCK_DESTROY() lockuninit(&netmap_global_lock)
47 #define NMG_LOCK() lockmgr(&netmap_global_lock, LK_EXCLUSIVE)
48 #define NMG_UNLOCK() lockmgr(&netmap_global_lock, LK_RELEASE)
49 #define NMG_LOCK_ASSERT() KKASSERT(lockstatus(&netmap_global_lock, NULL) != 0)
51 #define NM_SELINFO_T struct kqinfo
52 #define MBUF_LEN(m) ((m)->m_pkthdr.len)
53 #define MBUF_IFP(m) ((m)->m_pkthdr.rcvif)
54 #define NM_SEND_UP(ifp, m) ((ifp)->if_input(ifp, m, NULL, -1))
56 #define NM_ATOMIC_T volatile int // XXX ?
57 /* atomic operations */
58 #include <machine/atomic.h>
59 #define NM_ATOMIC_TEST_AND_SET(p) (!atomic_cmpset_acq_int((p), 0, 1))
60 #define NM_ATOMIC_CLEAR(p) atomic_store_rel_int((p), 0)
62 #define prefetch(x) __builtin_prefetch(x)
64 #define mb() cpu_mfence()
65 #define rmb() cpu_lfence()
66 #define wmb() cpu_sfence()
68 MALLOC_DECLARE(M_NETMAP
);
70 // XXX linux struct, not used in FreeBSD
71 struct net_device_ops
{
76 #define IFCAP_NETMAP 0x8000 /* XXX move to <net/if.h> */
78 #define ND(format, ...)
79 #define D(format, ...) \
81 struct timeval __xxts; \
83 kprintf("%03d.%06d %s [%d] " format "\n", \
84 (int)__xxts.tv_sec % 1000, (int)__xxts.tv_usec, \
85 __FUNCTION__, __LINE__, ##__VA_ARGS__); \
88 /* rate limited, lps indicates how many per second */
89 #define RD(lps, format, ...) \
91 static int t0, __cnt; \
92 if (t0 != time_second) { \
97 D(format, ##__VA_ARGS__); \
100 struct netmap_adapter
;
103 struct netmap_priv_d
;
105 const char *nm_dump_buf(char *p
, int len
, int lim
, char *dst
);
107 #include <net/netmap/netmap_mbq.h>
109 extern NMG_LOCK_T netmap_global_lock
;
112 * private, kernel view of a ring. Keeps track of the status of
113 * a ring across system calls.
115 * nr_hwcur index of the next buffer to refill.
116 * It corresponds to ring->cur - ring->reserved
118 * nr_hwavail the number of slots "owned" by userspace.
119 * nr_hwavail =:= ring->avail + ring->reserved
121 * The indexes in the NIC and netmap rings are offset by nkr_hwofs slots.
122 * This is so that, on a reset, buffers owned by userspace are not
123 * modified by the kernel. In particular:
124 * RX rings: the next empty buffer (hwcur + hwavail + hwofs) coincides with
125 * the next empty buffer as known by the hardware (next_to_check or so).
126 * TX rings: hwcur + hwofs coincides with next_to_send
128 * Clients cannot issue concurrent syscall on a ring. The system
129 * detects this and reports an error using two flags,
130 * NKR_WBUSY and NKR_RBUSY
131 * For received packets, slot->flags is set to nkr_slot_flags
132 * so we can provide a proper initial value (e.g. set NS_FORWARD
133 * when operating in 'transparent' mode).
135 * The following fields are used to implement lock-free copy of packets
136 * from input to output ports in VALE switch:
137 * nkr_hwlease buffer after the last one being copied.
138 * A writer in nm_bdg_flush reserves N buffers
139 * from nr_hwlease, advances it, then does the
140 * copy outside the lock.
141 * In RX rings (used for VALE ports),
142 * nkr_hwcur + nkr_hwavail <= nkr_hwlease < nkr_hwcur+N-1
143 * In TX rings (used for NIC or host stack ports)
144 * nkr_hwcur <= nkr_hwlease < nkr_hwcur+ nkr_hwavail
145 * nkr_leases array of nkr_num_slots where writers can report
146 * completion of their block. NR_NOSLOT (~0) indicates
147 * that the writer has not finished yet
148 * nkr_lease_idx index of next free slot in nr_leases, to be assigned
150 * The kring is manipulated by txsync/rxsync and generic netmap function.
151 * q_lock is used to arbitrate access to the kring from within the netmap
152 * code, and this and other protections guarantee that there is never
153 * more than 1 concurrent call to txsync or rxsync. So we are free
154 * to manipulate the kring from within txsync/rxsync without any extra
157 struct netmap_kring
{
158 struct netmap_ring
*ring
;
161 uint32_t nr_kflags
; /* private driver flags */
162 int32_t nr_hwreserved
;
163 #define NKR_PENDINTR 0x1 // Pending interrupt.
164 uint32_t nkr_num_slots
;
165 int32_t nkr_hwofs
; /* offset between NIC and netmap ring */
167 uint16_t nkr_slot_flags
; /* initial value for flags */
168 struct netmap_adapter
*na
;
169 struct nm_bdg_fwd
*nkr_ft
;
170 uint32_t *nkr_leases
;
171 #define NR_NOSLOT ((uint32_t)~0)
172 uint32_t nkr_hwlease
;
173 uint32_t nkr_lease_idx
;
175 NM_SELINFO_T si
; /* poll/select wait queue */
176 NM_LOCK_T q_lock
; /* protects kring and ring. */
177 NM_ATOMIC_T nr_busy
; /* prevent concurrent syscalls */
179 volatile int nkr_stopped
;
181 /* support for adapters without native netmap support.
182 * On tx rings we preallocate an array of tx buffers
183 * (same size as the netmap ring), on rx rings we
184 * store incoming packets in a queue.
185 * XXX who writes to the rx queue ?
187 struct mbuf
**tx_pool
;
188 u_int nr_ntc
; /* Emulation of a next-to-clean RX ring pointer. */
189 struct mbq rx_queue
; /* A queue for intercepted rx mbufs. */
191 } __attribute__((__aligned__(64)));
194 /* return the next index, with wraparound */
195 static inline uint32_t
196 nm_next(uint32_t i
, uint32_t lim
)
198 return unlikely (i
== lim
) ? 0 : i
+ 1;
203 * Here is the layout for the Rx and Tx rings.
207 +-----------------+ +-----------------+
209 |XXX free slot XXX| |XXX free slot XXX|
210 +-----------------+ +-----------------+
211 | |<-hwcur | |<-hwcur
212 | reserved h | | (ready |
213 +----------- w -+ | to be |
214 cur->| a | | sent) h |
215 | v | +---------- w |
216 | a | cur->| (being a |
217 | i | | prepared) v |
219 +-----------------+ + a ------ i +
220 | | ... | v l |<-hwlease
221 | (being | ... | a | ...
222 | prepared) | ... | i | ...
223 +-----------------+ ... | l | ...
224 | |<-hwlease +-----------------+
229 +-----------------+ +-----------------+
231 * The cur/avail (user view) and hwcur/hwavail (kernel view)
232 * are used in the normal operation of the card.
234 * When a ring is the output of a switch port (Rx ring for
235 * a VALE port, Tx ring for the host stack or NIC), slots
236 * are reserved in blocks through 'hwlease' which points
237 * to the next unused slot.
238 * On an Rx ring, hwlease is always after hwavail,
239 * and completions cause avail to advance.
240 * On a Tx ring, hwlease is always between cur and hwavail,
241 * and completions cause cur to advance.
243 * nm_kr_space() returns the maximum number of slots that
245 * nm_kr_lease() reserves the required number of buffers,
246 * advances nkr_hwlease and also returns an entry in
247 * a circular array where completions should be reported.
253 enum txrx
{ NR_RX
= 0, NR_TX
= 1 };
256 * The "struct netmap_adapter" extends the "struct adapter"
257 * (or equivalent) device descriptor.
258 * It contains all base fields needed to support netmap operation.
259 * There are in fact different types of netmap adapters
260 * (native, generic, VALE switch...) so a netmap_adapter is
261 * just the first field in the derived type.
263 struct netmap_adapter
{
265 * On linux we do not have a good way to tell if an interface
266 * is netmap-capable. So we use the following trick:
267 * NA(ifp) points here, and the first entry (which hopefully
268 * always exists and is at least 32 bits) contains a magic
269 * value which we can use to detect that the interface is good.
272 uint32_t na_flags
; /* future place for IFCAP_NETMAP */
273 #define NAF_SKIP_INTR 1 /* use the regular interrupt handler.
274 * useful during initialization
276 #define NAF_SW_ONLY 2 /* forward packets only to sw adapter */
277 #define NAF_BDG_MAYSLEEP 4 /* the bridge is allowed to sleep when
278 * forwarding packets coming from this
281 #define NAF_MEM_OWNER 8 /* the adapter is responsible for the
282 * deallocation of the memory allocator
284 #define NAF_NATIVE_ON 16 /* the adapter is native and the attached
285 * interface is in netmap mode
287 int active_fds
; /* number of user-space descriptors using this
288 interface, which is equal to the number of
289 struct netmap_if objs in the mapped region. */
291 u_int num_rx_rings
; /* number of adapter receive rings */
292 u_int num_tx_rings
; /* number of adapter transmit rings */
294 u_int num_tx_desc
; /* number of descriptor in each queue */
297 /* tx_rings and rx_rings are private but allocated
298 * as a contiguous chunk of memory. Each array has
299 * N+1 entries, for the adapter queues and for the host queue.
301 struct netmap_kring
*tx_rings
; /* array of TX rings. */
302 struct netmap_kring
*rx_rings
; /* array of RX rings. */
303 void *tailroom
; /* space below the rings array */
304 /* (used for leases) */
307 NM_SELINFO_T tx_si
, rx_si
; /* global wait queues */
309 /* copy of if_qflush and if_transmit pointers, to intercept
310 * packets from the network stack when netmap is active.
312 int (*if_transmit
)(struct ifnet
*, struct mbuf
*);
314 /* references to the ifnet and device routines, used by
315 * the generic netmap functions.
317 struct ifnet
*ifp
; /* adapter is ifp->if_softc */
319 /* private cleanup */
320 void (*nm_dtor
)(struct netmap_adapter
*);
322 int (*nm_register
)(struct netmap_adapter
*, int onoff
);
324 int (*nm_txsync
)(struct netmap_adapter
*, u_int ring
, int flags
);
325 int (*nm_rxsync
)(struct netmap_adapter
*, u_int ring
, int flags
);
326 #define NAF_FORCE_READ 1
327 #define NAF_FORCE_RECLAIM 2
328 /* return configuration information */
329 int (*nm_config
)(struct netmap_adapter
*,
330 u_int
*txr
, u_int
*txd
, u_int
*rxr
, u_int
*rxd
);
331 int (*nm_krings_create
)(struct netmap_adapter
*);
332 void (*nm_krings_delete
)(struct netmap_adapter
*);
333 int (*nm_notify
)(struct netmap_adapter
*,
334 u_int ring
, enum txrx
, int flags
);
335 #define NAF_GLOBAL_NOTIFY 4
336 #define NAF_DISABLE_NOTIFY 8
338 /* standard refcount to control the lifetime of the adapter
339 * (it should be equal to the lifetime of the corresponding ifp)
343 /* memory allocator (opaque)
344 * We also cache a pointer to the lut_entry for translating
345 * buffer addresses, and the total number of buffers.
347 struct netmap_mem_d
*nm_mem
;
348 struct lut_entry
*na_lut
;
349 uint32_t na_lut_objtotal
; /* max buffer index */
351 /* used internally. If non-null, the interface cannot be bound
358 * If the NIC is owned by the kernel
359 * (i.e., bridge), neither another bridge nor user can use it;
360 * if the NIC is owned by a user, only users can share it.
361 * Evaluation must be done under NMG_LOCK().
363 #define NETMAP_OWNED_BY_KERN(na) (na->na_private)
364 #define NETMAP_OWNED_BY_ANY(na) \
365 (NETMAP_OWNED_BY_KERN(na) || (na->active_fds > 0))
369 * derived netmap adapters for various types of ports
371 struct netmap_vp_adapter
{ /* VALE software port */
372 struct netmap_adapter up
;
377 * bdg_port is the port number used in the bridge;
378 * na_bdg points to the bridge this NA is attached to.
381 struct nm_bridge
*na_bdg
;
385 struct netmap_hw_adapter
{ /* physical device */
386 struct netmap_adapter up
;
388 struct net_device_ops nm_ndo
; // XXX linux only
391 struct netmap_generic_adapter
{ /* non-native device */
392 struct netmap_hw_adapter up
;
394 /* Pointer to a previously used netmap adapter. */
395 struct netmap_adapter
*prev
;
397 /* generic netmap adapters support:
398 * a net_device_ops struct overrides ndo_select_queue(),
399 * save_if_input saves the if_input hook (FreeBSD),
400 * mit_timer and mit_pending implement rx interrupt mitigation,
402 struct net_device_ops generic_ndo
;
403 void (*save_if_input
)(struct ifnet
*, struct mbuf
*,
404 const struct pktinfo
*, int);
406 struct hrtimer mit_timer
;
412 /* bridge wrapper for non VALE ports. It is used to connect real devices to the bridge.
414 * The real device must already have its own netmap adapter (hwna). The
415 * bridge wrapper and the hwna adapter share the same set of netmap rings and
416 * buffers, but they have two separate sets of krings descriptors, with tx/rx
420 * bwrap krings rings krings hwna
421 * +------+ +------+ +-----+ +------+ +------+
422 * |tx_rings->| |\ /| |----| |<-tx_rings|
423 * | | +------+ \ / +-----+ +------+ | |
426 * | | +------+/ \+-----+ +------+ | |
427 * |rx_rings->| | | |----| |<-rx_rings|
428 * | | +------+ +-----+ +------+ | |
431 * - packets coming from the bridge go to the brwap rx rings, which are also the
432 * hwna tx rings. The bwrap notify callback will then complete the hwna tx
433 * (see netmap_bwrap_notify).
434 * - packets coming from the outside go to the hwna rx rings, which are also the
435 * bwrap tx rings. The (overwritten) hwna notify method will then complete
436 * the bridge tx (see netmap_bwrap_intr_notify).
438 * The bridge wrapper may optionally connect the hwna 'host' rings to the
439 * bridge. This is done by using a second port in the bridge and connecting it
440 * to the 'host' netmap_vp_adapter contained in the netmap_bwrap_adapter.
441 * The brwap host adapter cross-links the hwna host rings in the same way as shown above.
443 * - packets coming from the bridge and directed to host stack are handled by the
444 * bwrap host notify callback (see netmap_bwrap_host_notify)
445 * - packets coming from the host stack are still handled by the overwritten
446 * hwna notify callback (netmap_bwrap_intr_notify), but are diverted to the
447 * host adapter depending on the ring number.
450 struct netmap_bwrap_adapter
{
451 struct netmap_vp_adapter up
;
452 struct netmap_vp_adapter host
; /* for host rings */
453 struct netmap_adapter
*hwna
; /* the underlying device */
455 /* backup of the hwna notify callback */
456 int (*save_notify
)(struct netmap_adapter
*,
457 u_int ring
, enum txrx
, int flags
);
458 /* When we attach a physical interface to the bridge, we
459 * allow the controlling process to terminate, so we need
460 * a place to store the netmap_priv_d data structure.
461 * This is only done when physical interfaces are attached to a bridge.
463 struct netmap_priv_d
*na_kpriv
;
468 * Available space in the ring. Only used in VALE code
470 static inline uint32_t
471 nm_kr_space(struct netmap_kring
*k
, int is_rx
)
476 int busy
= k
->nkr_hwlease
- k
->nr_hwcur
+ k
->nr_hwreserved
;
478 busy
+= k
->nkr_num_slots
;
479 space
= k
->nkr_num_slots
- 1 - busy
;
481 space
= k
->nr_hwcur
+ k
->nr_hwavail
- k
->nkr_hwlease
;
483 space
+= k
->nkr_num_slots
;
487 if (k
->nkr_hwlease
>= k
->nkr_num_slots
||
488 k
->nr_hwcur
>= k
->nkr_num_slots
||
489 k
->nr_hwavail
>= k
->nkr_num_slots
||
491 busy
>= k
->nkr_num_slots
) {
492 D("invalid kring, cur %d avail %d lease %d lease_idx %d lim %d", k
->nr_hwcur
, k
->nr_hwavail
, k
->nkr_hwlease
,
493 k
->nkr_lease_idx
, k
->nkr_num_slots
);
502 /* make a lease on the kring for N positions. return the
505 static inline uint32_t
506 nm_kr_lease(struct netmap_kring
*k
, u_int n
, int is_rx
)
508 uint32_t lim
= k
->nkr_num_slots
- 1;
509 uint32_t lease_idx
= k
->nkr_lease_idx
;
511 k
->nkr_leases
[lease_idx
] = NR_NOSLOT
;
512 k
->nkr_lease_idx
= nm_next(lease_idx
, lim
);
514 if (n
> nm_kr_space(k
, is_rx
)) {
515 D("invalid request for %d slots", n
);
518 /* XXX verify that there are n slots */
520 if (k
->nkr_hwlease
> lim
)
521 k
->nkr_hwlease
-= lim
+ 1;
523 if (k
->nkr_hwlease
>= k
->nkr_num_slots
||
524 k
->nr_hwcur
>= k
->nkr_num_slots
||
525 k
->nr_hwavail
>= k
->nkr_num_slots
||
526 k
->nkr_lease_idx
>= k
->nkr_num_slots
) {
527 D("invalid kring %s, cur %d avail %d lease %d lease_idx %d lim %d",
528 k
->na
->ifp
->if_xname
,
529 k
->nr_hwcur
, k
->nr_hwavail
, k
->nkr_hwlease
,
530 k
->nkr_lease_idx
, k
->nkr_num_slots
);
535 #endif /* WITH_VALE */
537 /* return update position */
538 static inline uint32_t
539 nm_kr_rxpos(struct netmap_kring
*k
)
541 uint32_t pos
= k
->nr_hwcur
+ k
->nr_hwavail
;
542 if (pos
>= k
->nkr_num_slots
)
543 pos
-= k
->nkr_num_slots
;
545 if (pos
>= k
->nkr_num_slots
||
546 k
->nkr_hwlease
>= k
->nkr_num_slots
||
547 k
->nr_hwcur
>= k
->nkr_num_slots
||
548 k
->nr_hwavail
>= k
->nkr_num_slots
||
549 k
->nkr_lease_idx
>= k
->nkr_num_slots
) {
550 D("invalid kring, cur %d avail %d lease %d lease_idx %d lim %d", k
->nr_hwcur
, k
->nr_hwavail
, k
->nkr_hwlease
,
551 k
->nkr_lease_idx
, k
->nkr_num_slots
);
559 * protect against multiple threads using the same ring.
560 * also check that the ring has not been stopped.
561 * We only care for 0 or !=0 as a return code.
564 #define NM_KR_STOPPED 2
566 static __inline
void nm_kr_put(struct netmap_kring
*kr
)
568 NM_ATOMIC_CLEAR(&kr
->nr_busy
);
571 static __inline
int nm_kr_tryget(struct netmap_kring
*kr
)
573 /* check a first time without taking the lock
574 * to avoid starvation for nm_kr_get()
576 if (unlikely(kr
->nkr_stopped
)) {
577 ND("ring %p stopped (%d)", kr
, kr
->nkr_stopped
);
578 return NM_KR_STOPPED
;
580 if (unlikely(NM_ATOMIC_TEST_AND_SET(&kr
->nr_busy
)))
582 /* check a second time with lock held */
583 if (unlikely(kr
->nkr_stopped
)) {
584 ND("ring %p stopped (%d)", kr
, kr
->nkr_stopped
);
586 return NM_KR_STOPPED
;
593 * The following are support routines used by individual drivers to
594 * support netmap operation.
596 * netmap_attach() initializes a struct netmap_adapter, allocating the
597 * struct netmap_ring's and the struct selinfo.
599 * netmap_detach() frees the memory allocated by netmap_attach().
601 * netmap_transmit() replaces the if_transmit routine of the interface,
602 * and is used to intercept packets coming from the stack.
604 * netmap_load_map/netmap_reload_map are helper routines to set/reset
605 * the dmamap for a packet buffer
607 * netmap_reset() is a helper routine to be called in the driver
608 * when reinitializing a ring.
610 int netmap_attach(struct netmap_adapter
*);
611 int netmap_attach_common(struct netmap_adapter
*);
612 void netmap_detach_common(struct netmap_adapter
*na
);
613 void netmap_detach(struct ifnet
*);
614 int netmap_transmit(struct ifnet
*, struct mbuf
*);
615 struct netmap_slot
*netmap_reset(struct netmap_adapter
*na
,
616 enum txrx tx
, u_int n
, u_int new_cur
);
617 int netmap_ring_reinit(struct netmap_kring
*);
621 * Support routines to be used with the VALE switch
623 int netmap_update_config(struct netmap_adapter
*na
);
624 int netmap_krings_create(struct netmap_adapter
*na
, u_int ntx
, u_int nrx
, u_int tailroom
);
625 void netmap_krings_delete(struct netmap_adapter
*na
);
628 netmap_do_regif(struct netmap_priv_d
*priv
, struct netmap_adapter
*na
,
629 uint16_t ringid
, int *err
);
633 u_int
nm_bound_var(u_int
*v
, u_int dflt
, u_int lo
, u_int hi
, const char *msg
);
634 int netmap_get_na(struct nmreq
*nmr
, struct netmap_adapter
**na
, int create
);
635 int netmap_get_hw_na(struct ifnet
*ifp
, struct netmap_adapter
**na
);
639 * The following bridge-related interfaces are used by other kernel modules
640 * In the version that only supports unicast or broadcast, the lookup
641 * function can return 0 .. NM_BDG_MAXPORTS-1 for regular ports,
642 * NM_BDG_MAXPORTS for broadcast, NM_BDG_MAXPORTS+1 for unknown.
643 * XXX in practice "unknown" might be handled same as broadcast.
645 typedef u_int (*bdg_lookup_fn_t
)(char *buf
, u_int len
,
646 uint8_t *ring_nr
, struct netmap_vp_adapter
*);
647 u_int
netmap_bdg_learning(char *, u_int
, uint8_t *,
648 struct netmap_vp_adapter
*);
650 #define NM_BDG_MAXPORTS 254 /* up to 254 */
651 #define NM_BDG_BROADCAST NM_BDG_MAXPORTS
652 #define NM_BDG_NOPORT (NM_BDG_MAXPORTS+1)
654 #define NM_NAME "vale" /* prefix for bridge port name */
657 /* these are redefined in case of no VALE support */
658 int netmap_get_bdg_na(struct nmreq
*nmr
, struct netmap_adapter
**na
, int create
);
659 void netmap_init_bridges(void);
660 int netmap_bdg_ctl(struct nmreq
*nmr
, bdg_lookup_fn_t func
);
662 #else /* !WITH_VALE */
663 #define netmap_get_bdg_na(_1, _2, _3) 0
664 #define netmap_init_bridges(_1)
665 #define netmap_bdg_ctl(_1, _2) EINVAL
666 #endif /* !WITH_VALE */
668 /* Various prototypes */
669 struct dev_kqfilter_args
; /* XXX this shouldn't be here */
670 int netmap_kqfilter(struct dev_kqfilter_args
*ap
);
673 int netmap_init(void);
674 void netmap_fini(void);
675 int netmap_get_memory(struct netmap_priv_d
* p
);
676 void netmap_dtor(void *data
);
677 int netmap_dtor_locked(struct netmap_priv_d
*priv
);
679 struct dev_ioctl_args
; /* XXX this shouldn't be here */
680 int netmap_ioctl(struct dev_ioctl_args
*ap
);
682 /* netmap_adapter creation/destruction */
683 #define NM_IFPNAME(ifp) ((ifp) ? (ifp)->if_xname : "zombie")
684 #define NM_DEBUG_PUTGET 1
686 #ifdef NM_DEBUG_PUTGET
688 #define NM_DBG(f) __##f
690 void __netmap_adapter_get(struct netmap_adapter
*na
);
692 #define netmap_adapter_get(na) \
694 struct netmap_adapter *__na = na; \
695 D("getting %p:%s (%d)", __na, NM_IFPNAME(__na->ifp), __na->na_refcount); \
696 __netmap_adapter_get(__na); \
699 int __netmap_adapter_put(struct netmap_adapter
*na
);
701 #define netmap_adapter_put(na) \
703 struct netmap_adapter *__na = na; \
704 D("putting %p:%s (%d)", __na, NM_IFPNAME(__na->ifp), __na->na_refcount); \
705 __netmap_adapter_put(__na); \
708 #else /* !NM_DEBUG_PUTGET */
711 void netmap_adapter_get(struct netmap_adapter
*na
);
712 int netmap_adapter_put(struct netmap_adapter
*na
);
714 #endif /* !NM_DEBUG_PUTGET */
718 extern u_int netmap_buf_size
;
719 #define NETMAP_BUF_SIZE netmap_buf_size // XXX remove
720 extern int netmap_mitigate
;
721 extern int netmap_no_pendintr
;
722 extern u_int netmap_total_buffers
;
723 extern char *netmap_buffer_base
;
724 extern int netmap_verbose
; // XXX debugging
725 enum { /* verbose flags */
726 NM_VERB_ON
= 1, /* generic verbose */
727 NM_VERB_HOST
= 0x2, /* verbose host stack */
728 NM_VERB_RXSYNC
= 0x10, /* verbose on rxsync/txsync */
729 NM_VERB_TXSYNC
= 0x20,
730 NM_VERB_RXINTR
= 0x100, /* verbose on rx/tx intr (driver) */
731 NM_VERB_TXINTR
= 0x200,
732 NM_VERB_NIC_RXSYNC
= 0x1000, /* verbose on rx/tx intr (driver) */
733 NM_VERB_NIC_TXSYNC
= 0x2000,
736 extern int netmap_txsync_retry
;
737 extern int netmap_generic_mit
;
738 extern int netmap_generic_ringsize
;
741 * NA returns a pointer to the struct netmap adapter from the ifp,
742 * WNA is used to write it.
745 #define WNA(_ifp) (_ifp)->if_unused7 /* XXX better name ;) */
747 #define NA(_ifp) ((struct netmap_adapter *)WNA(_ifp))
750 * Macros to determine if an interface is netmap capable or netmap enabled.
751 * See the magic field in struct netmap_adapter.
754 * on FreeBSD just use if_capabilities and if_capenable.
756 #define NETMAP_CAPABLE(ifp) (NA(ifp) && \
757 (ifp)->if_capabilities & IFCAP_NETMAP )
759 #define NETMAP_SET_CAPABLE(ifp) \
760 (ifp)->if_capabilities |= IFCAP_NETMAP
762 /* Callback invoked by the dma machinery after a successfull dmamap_load */
763 static void netmap_dmamap_cb(__unused
void *arg
,
764 __unused bus_dma_segment_t
* segs
, __unused
int nseg
, __unused
int error
)
768 /* bus_dmamap_load wrapper: call aforementioned function if map != NULL.
769 * XXX can we do it without a callback ?
772 netmap_load_map(bus_dma_tag_t tag
, bus_dmamap_t map
, void *buf
)
775 bus_dmamap_load(tag
, map
, buf
, NETMAP_BUF_SIZE
,
776 netmap_dmamap_cb
, NULL
, BUS_DMA_NOWAIT
);
779 /* update the map when a buffer changes. */
781 netmap_reload_map(bus_dma_tag_t tag
, bus_dmamap_t map
, void *buf
)
784 bus_dmamap_unload(tag
, map
);
785 bus_dmamap_load(tag
, map
, buf
, NETMAP_BUF_SIZE
,
786 netmap_dmamap_cb
, NULL
, BUS_DMA_NOWAIT
);
791 * functions to map NIC to KRING indexes (n2k) and vice versa (k2n)
794 netmap_idx_n2k(struct netmap_kring
*kr
, int idx
)
796 int n
= kr
->nkr_num_slots
;
797 idx
+= kr
->nkr_hwofs
;
808 netmap_idx_k2n(struct netmap_kring
*kr
, int idx
)
810 int n
= kr
->nkr_num_slots
;
811 idx
-= kr
->nkr_hwofs
;
821 /* Entries of the look-up table. */
823 void *vaddr
; /* virtual address. */
824 vm_paddr_t paddr
; /* physical address. */
827 struct netmap_obj_pool
;
828 extern struct lut_entry
*netmap_buffer_lut
;
829 #define NMB_VA(i) (netmap_buffer_lut[i].vaddr)
830 #define NMB_PA(i) (netmap_buffer_lut[i].paddr)
833 * NMB return the virtual address of a buffer (buffer 0 on bad index)
834 * PNMB also fills the physical address
837 NMB(struct netmap_slot
*slot
)
839 uint32_t i
= slot
->buf_idx
;
840 return (unlikely(i
>= netmap_total_buffers
)) ? NMB_VA(0) : NMB_VA(i
);
844 PNMB(struct netmap_slot
*slot
, uint64_t *pp
)
846 uint32_t i
= slot
->buf_idx
;
847 void *ret
= (i
>= netmap_total_buffers
) ? NMB_VA(0) : NMB_VA(i
);
849 *pp
= (i
>= netmap_total_buffers
) ? NMB_PA(0) : NMB_PA(i
);
853 /* Generic version of NMB, which uses device-specific memory. */
855 BDG_NMB(struct netmap_adapter
*na
, struct netmap_slot
*slot
)
857 struct lut_entry
*lut
= na
->na_lut
;
858 uint32_t i
= slot
->buf_idx
;
859 return (unlikely(i
>= na
->na_lut_objtotal
)) ?
860 lut
[0].vaddr
: lut
[i
].vaddr
;
863 /* default functions to handle rx/tx interrupts */
864 int netmap_rx_irq(struct ifnet
*, u_int
, u_int
*);
865 #define netmap_tx_irq(_n, _q) netmap_rx_irq(_n, _q, NULL)
866 int netmap_common_irq(struct ifnet
*, u_int
, u_int
*work_done
);
869 void netmap_txsync_to_host(struct netmap_adapter
*na
);
870 void netmap_disable_all_rings(struct ifnet
*);
871 void netmap_enable_all_rings(struct ifnet
*);
872 void netmap_disable_ring(struct netmap_kring
*kr
);
875 /* Structure associated to each thread which registered an interface.
877 * The first 4 fields of this structure are written by NIOCREGIF and
878 * read by poll() and NIOC?XSYNC.
879 * There is low contention among writers (actually, a correct user program
880 * should have no contention among writers) and among writers and readers,
881 * so we use a single global lock to protect the structure initialization.
882 * Since initialization involves the allocation of memory, we reuse the memory
884 * Read access to the structure is lock free. Readers must check that
885 * np_nifp is not NULL before using the other fields.
886 * If np_nifp is NULL initialization has not been performed, so they should
887 * return an error to userlevel.
889 * The ref_done field is used to regulate access to the refcount in the
890 * memory allocator. The refcount must be incremented at most once for
891 * each open("/dev/netmap"). The increment is performed by the first
892 * function that calls netmap_get_memory() (currently called by
893 * mmap(), NIOCGINFO and NIOCREGIF).
894 * If the refcount is incremented, it is then decremented when the
895 * private structure is destroyed.
897 struct netmap_priv_d
{
898 struct netmap_if
* volatile np_nifp
; /* netmap if descriptor. */
900 struct netmap_adapter
*np_na
;
901 int np_ringid
; /* from the ioctl */
902 u_int np_qfirst
, np_qlast
; /* range of rings to scan */
905 struct netmap_mem_d
*np_mref
; /* use with NMG_LOCK held */
906 /* np_refcount is only used on FreeBSD */
907 int np_refcount
; /* use with NMG_LOCK held */
912 * generic netmap emulation for devices that do not have
913 * native netmap support.
914 * XXX generic_netmap_register() is only exported to implement
917 int generic_netmap_register(struct netmap_adapter
*na
, int enable
);
918 int generic_netmap_attach(struct ifnet
*ifp
);
920 int netmap_catch_rx(struct netmap_adapter
*na
, int intercept
);
921 void generic_rx_handler(struct ifnet
*ifp
, struct mbuf
*m
,
922 const struct pktinfo
*, int);
923 void netmap_catch_packet_steering(struct netmap_generic_adapter
*na
, int enable
);
924 int generic_xmit_frame(struct ifnet
*ifp
, struct mbuf
*m
, void *addr
, u_int len
, u_int ring_nr
);
925 int generic_find_num_desc(struct ifnet
*ifp
, u_int
*tx
, u_int
*rx
);
926 void generic_find_num_queues(struct ifnet
*ifp
, u_int
*txq
, u_int
*rxq
);
929 nma_is_generic(struct netmap_adapter
*na
)
931 return na
->nm_register
== generic_netmap_register
;
935 * netmap_mitigation API. This is used by the generic adapter
936 * to reduce the number of interrupt requests/selwakeup
937 * to clients on incoming packets.
939 void netmap_mitigation_init(struct netmap_generic_adapter
*na
);
940 void netmap_mitigation_start(struct netmap_generic_adapter
*na
);
941 void netmap_mitigation_restart(struct netmap_generic_adapter
*na
);
942 int netmap_mitigation_active(struct netmap_generic_adapter
*na
);
943 void netmap_mitigation_cleanup(struct netmap_generic_adapter
*na
);
945 // int generic_timer_handler(struct hrtimer *t);
947 #endif /* _NET_NETMAP_KERN_H_ */