kernel - remove mapzone
[dragonfly.git] / sys / net / netmap / netmap_generic.c
blobf1e71f9b9797f97498bf428c84fdf84f5c72c9c2
1 /*
2 * Copyright (C) 2013 Universita` di Pisa. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
27 * This module implements netmap support on top of standard,
28 * unmodified device drivers.
30 * A NIOCREGIF request is handled here if the device does not
31 * have native support. TX and RX rings are emulated as follows:
33 * NIOCREGIF
34 * We preallocate a block of TX mbufs (roughly as many as
35 * tx descriptors; the number is not critical) to speed up
36 * operation during transmissions. The refcount on most of
37 * these buffers is artificially bumped up so we can recycle
38 * them more easily. Also, the destructor is intercepted
39 * so we use it as an interrupt notification to wake up
40 * processes blocked on a poll().
42 * For each receive ring we allocate one "struct mbq"
43 * (an mbuf tailq plus a spinlock). We intercept packets
44 * (through if_input)
45 * on the receive path and put them in the mbq from which
46 * netmap receive routines can grab them.
48 * TX:
49 * in the generic_txsync() routine, netmap buffers are copied
50 * (or linked, in a future) to the preallocated mbufs
51 * and pushed to the transmit queue. Some of these mbufs
52 * (those with NS_REPORT, or otherwise every half ring)
53 * have the refcount=1, others have refcount=2.
54 * When the destructor is invoked, we take that as
55 * a notification that all mbufs up to that one in
56 * the specific ring have been completed, and generate
57 * the equivalent of a transmit interrupt.
59 * RX:
64 #include <sys/cdefs.h> /* prerequisite */
65 __FBSDID("$FreeBSD: head/sys/dev/netmap/netmap.c 257666 2013-11-05 01:06:22Z luigi $");
67 #include <sys/types.h>
68 #include <sys/errno.h>
69 #include <sys/malloc.h>
70 #include <sys/lock.h> /* PROT_EXEC */
71 #include <sys/socket.h> /* sockaddrs */
72 #include <sys/event.h>
73 #include <net/if.h>
74 #include <net/if_var.h>
75 #include <sys/bus.h> /* bus_dmamap_* in netmap_kern.h */
77 // XXX temporary - D() defined here
78 #include <net/netmap.h>
79 #include <net/netmap/netmap_kern.h>
80 #include <net/netmap/netmap_mem2.h>
82 #define rtnl_lock() D("rtnl_lock called");
83 #define rtnl_unlock() D("rtnl_lock called");
84 #define MBUF_TXQ(m) ((m)->m_pkthdr.hash)
85 #define smp_mb()
88 * mbuf wrappers
92 * we allocate an EXT_PACKET
94 #define netmap_get_mbuf(len) m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)
96 /* mbuf destructor, also need to change the type to EXT_EXTREF,
97 * add an M_NOFREE flag, and then clear the flag and
98 * chain into uma_zfree(zone_pack, mf)
99 * (or reinstall the buffer ?)
101 #define SET_MBUF_DESTRUCTOR(m, fn) do { \
102 (m)->m_ext.ext_free = (void *)fn; \
103 /* (m)->m_ext.ext_type = EXT_EXTREF; */ \
104 } while (0)
107 #define GET_MBUF_REFCNT(m) ((m)->m_ext.ref_cnt ? *(m)->m_ext.ref_cnt : -1)
109 /* ======================== usage stats =========================== */
111 #ifdef RATE
112 #define IFRATE(x) x
113 struct rate_stats {
114 unsigned long txpkt;
115 unsigned long txsync;
116 unsigned long txirq;
117 unsigned long rxpkt;
118 unsigned long rxirq;
119 unsigned long rxsync;
122 struct rate_context {
123 unsigned refcount;
124 struct timer_list timer;
125 struct rate_stats new;
126 struct rate_stats old;
129 #define RATE_PRINTK(_NAME_) \
130 printk( #_NAME_ " = %lu Hz\n", (cur._NAME_ - ctx->old._NAME_)/RATE_PERIOD);
131 #define RATE_PERIOD 2
132 static void rate_callback(unsigned long arg)
134 struct rate_context * ctx = (struct rate_context *)arg;
135 struct rate_stats cur = ctx->new;
136 int r;
138 RATE_PRINTK(txpkt);
139 RATE_PRINTK(txsync);
140 RATE_PRINTK(txirq);
141 RATE_PRINTK(rxpkt);
142 RATE_PRINTK(rxsync);
143 RATE_PRINTK(rxirq);
144 printk("\n");
146 ctx->old = cur;
147 r = mod_timer(&ctx->timer, jiffies +
148 msecs_to_jiffies(RATE_PERIOD * 1000));
149 if (unlikely(r))
150 D("[v1000] Error: mod_timer()");
153 static struct rate_context rate_ctx;
155 #else /* !RATE */
156 #define IFRATE(x)
157 #endif /* !RATE */
160 /* =============== GENERIC NETMAP ADAPTER SUPPORT ================= */
161 #define GENERIC_BUF_SIZE netmap_buf_size /* Size of the mbufs in the Tx pool. */
164 * Wrapper used by the generic adapter layer to notify
165 * the poller threads. Differently from netmap_rx_irq(), we check
166 * only IFCAP_NETMAP instead of NAF_NATIVE_ON to enable the irq.
168 static int
169 netmap_generic_irq(struct ifnet *ifp, u_int q, u_int *work_done)
171 if (unlikely(!(ifp->if_capenable & IFCAP_NETMAP)))
172 return 0;
174 return netmap_common_irq(ifp, q, work_done);
178 /* Enable/disable netmap mode for a generic network interface. */
179 int generic_netmap_register(struct netmap_adapter *na, int enable)
181 struct ifnet *ifp = na->ifp;
182 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
183 struct mbuf *m;
184 int error;
185 int i, r;
187 #ifdef REG_RESET
188 error = ifp->netdev_ops->ndo_stop(ifp);
189 if (error) {
190 return error;
192 #endif /* REG_RESET */
194 if (enable) { /* Enable netmap mode. */
195 /* Initialize the rx queue, as generic_rx_handler() can
196 * be called as soon as netmap_catch_rx() returns.
198 for (r=0; r<na->num_rx_rings; r++) {
199 mbq_safe_init(&na->rx_rings[r].rx_queue);
200 na->rx_rings[r].nr_ntc = 0;
203 /* Init the mitigation timer. */
204 netmap_mitigation_init(gna);
207 * Preallocate packet buffers for the tx rings.
209 for (r=0; r<na->num_tx_rings; r++) {
210 na->tx_rings[r].nr_ntc = 0;
211 na->tx_rings[r].tx_pool = kmalloc(na->num_tx_desc * sizeof(struct mbuf *),
212 M_DEVBUF, M_NOWAIT | M_ZERO);
213 if (!na->tx_rings[r].tx_pool) {
214 D("tx_pool allocation failed");
215 error = ENOMEM;
216 goto free_tx_pool;
218 for (i=0; i<na->num_tx_desc; i++) {
219 m = netmap_get_mbuf(GENERIC_BUF_SIZE);
220 if (!m) {
221 D("tx_pool[%d] allocation failed", i);
222 error = ENOMEM;
223 goto free_mbufs;
225 na->tx_rings[r].tx_pool[i] = m;
228 rtnl_lock();
229 /* Prepare to intercept incoming traffic. */
230 error = netmap_catch_rx(na, 1);
231 if (error) {
232 D("netdev_rx_handler_register() failed");
233 goto register_handler;
235 ifp->if_capenable |= IFCAP_NETMAP;
237 /* Make netmap control the packet steering. */
238 netmap_catch_packet_steering(gna, 1);
240 rtnl_unlock();
242 #ifdef RATE
243 if (rate_ctx.refcount == 0) {
244 D("setup_timer()");
245 memset(&rate_ctx, 0, sizeof(rate_ctx));
246 setup_timer(&rate_ctx.timer, &rate_callback, (unsigned long)&rate_ctx);
247 if (mod_timer(&rate_ctx.timer, jiffies + msecs_to_jiffies(1500))) {
248 D("Error: mod_timer()");
251 rate_ctx.refcount++;
252 #endif /* RATE */
254 } else { /* Disable netmap mode. */
255 rtnl_lock();
257 ifp->if_capenable &= ~IFCAP_NETMAP;
259 /* Release packet steering control. */
260 netmap_catch_packet_steering(gna, 0);
262 /* Do not intercept packets on the rx path. */
263 netmap_catch_rx(na, 0);
265 rtnl_unlock();
267 /* Free the mbufs going to the netmap rings */
268 for (r=0; r<na->num_rx_rings; r++) {
269 mbq_safe_purge(&na->rx_rings[r].rx_queue);
270 mbq_safe_destroy(&na->rx_rings[r].rx_queue);
273 netmap_mitigation_cleanup(gna);
275 for (r=0; r<na->num_tx_rings; r++) {
276 for (i=0; i<na->num_tx_desc; i++) {
277 m_freem(na->tx_rings[r].tx_pool[i]);
279 kfree(na->tx_rings[r].tx_pool, M_DEVBUF);
282 #ifdef RATE
283 if (--rate_ctx.refcount == 0) {
284 D("del_timer()");
285 del_timer(&rate_ctx.timer);
287 #endif
290 #ifdef REG_RESET
291 error = ifp->netdev_ops->ndo_open(ifp);
292 if (error) {
293 goto alloc_tx_pool;
295 #endif
297 return 0;
299 register_handler:
300 rtnl_unlock();
301 free_tx_pool:
302 r--;
303 i = na->num_tx_desc; /* Useless, but just to stay safe. */
304 free_mbufs:
305 i--;
306 for (; r>=0; r--) {
307 for (; i>=0; i--) {
308 m_freem(na->tx_rings[r].tx_pool[i]);
310 kfree(na->tx_rings[r].tx_pool, M_DEVBUF);
311 i = na->num_tx_desc - 1;
314 return error;
318 * Callback invoked when the device driver frees an mbuf used
319 * by netmap to transmit a packet. This usually happens when
320 * the NIC notifies the driver that transmission is completed.
322 static void
323 generic_mbuf_destructor(struct mbuf *m)
325 if (netmap_verbose)
326 D("Tx irq (%p) queue %d", m, MBUF_TXQ(m));
327 netmap_generic_irq(MBUF_IFP(m), MBUF_TXQ(m), NULL);
328 #if 0
329 m->m_ext.ext_type = EXT_PACKET;
330 #endif
331 m->m_ext.ext_free = NULL;
332 #if 0
333 if (*(m->m_ext.ref_cnt) == 0)
334 *(m->m_ext.ref_cnt) = 1;
335 uma_zfree(zone_pack, m);
336 #endif
337 IFRATE(rate_ctx.new.txirq++);
340 /* Record completed transmissions and update hwavail.
342 * nr_ntc is the oldest tx buffer not yet completed
343 * (same as nr_hwavail + nr_hwcur + 1),
344 * nr_hwcur is the first unsent buffer.
345 * When cleaning, we try to recover buffers between nr_ntc and nr_hwcur.
347 static int
348 generic_netmap_tx_clean(struct netmap_kring *kring)
350 u_int num_slots = kring->nkr_num_slots;
351 u_int ntc = kring->nr_ntc;
352 u_int hwcur = kring->nr_hwcur;
353 u_int n = 0;
354 struct mbuf **tx_pool = kring->tx_pool;
356 while (ntc != hwcur) { /* buffers not completed */
357 struct mbuf *m = tx_pool[ntc];
359 if (unlikely(m == NULL)) {
360 /* try to replenish the entry */
361 tx_pool[ntc] = m = netmap_get_mbuf(GENERIC_BUF_SIZE);
362 if (unlikely(m == NULL)) {
363 D("mbuf allocation failed, XXX error");
364 // XXX how do we proceed ? break ?
365 return -ENOMEM;
367 #if 0
368 } else if (GET_MBUF_REFCNT(m) != 1) {
369 break; /* This mbuf is still busy: its refcnt is 2. */
370 #endif
372 if (unlikely(++ntc == num_slots)) {
373 ntc = 0;
375 n++;
377 kring->nr_ntc = ntc;
378 kring->nr_hwavail += n;
379 ND("tx completed [%d] -> hwavail %d", n, kring->nr_hwavail);
381 return n;
386 * We have pending packets in the driver between nr_ntc and j.
387 * Compute a position in the middle, to be used to generate
388 * a notification.
390 static inline u_int
391 generic_tx_event_middle(struct netmap_kring *kring, u_int hwcur)
393 u_int n = kring->nkr_num_slots;
394 u_int ntc = kring->nr_ntc;
395 u_int e;
397 if (hwcur >= ntc) {
398 e = (hwcur + ntc) / 2;
399 } else { /* wrap around */
400 e = (hwcur + n + ntc) / 2;
401 if (e >= n) {
402 e -= n;
406 if (unlikely(e >= n)) {
407 D("This cannot happen");
408 e = 0;
411 return e;
415 * We have pending packets in the driver between nr_ntc and hwcur.
416 * Schedule a notification approximately in the middle of the two.
417 * There is a race but this is only called within txsync which does
418 * a double check.
420 static void
421 generic_set_tx_event(struct netmap_kring *kring, u_int hwcur)
423 struct mbuf *m;
424 u_int e;
426 if (kring->nr_ntc == hwcur) {
427 return;
429 e = generic_tx_event_middle(kring, hwcur);
431 m = kring->tx_pool[e];
432 if (m == NULL) {
433 /* This can happen if there is already an event on the netmap
434 slot 'e': There is nothing to do. */
435 return;
437 ND("Event at %d mbuf %p refcnt %d", e, m, GET_MBUF_REFCNT(m));
438 kring->tx_pool[e] = NULL;
439 SET_MBUF_DESTRUCTOR(m, generic_mbuf_destructor);
441 // XXX wmb() ?
442 /* Decrement the refcount an free it if we have the last one. */
443 m_freem(m);
444 smp_mb();
449 * generic_netmap_txsync() transforms netmap buffers into mbufs
450 * and passes them to the standard device driver
451 * (ndo_start_xmit() or ifp->if_transmit() ).
452 * On linux this is not done directly, but using dev_queue_xmit(),
453 * since it implements the TX flow control (and takes some locks).
455 static int
456 generic_netmap_txsync(struct netmap_adapter *na, u_int ring_nr, int flags)
458 struct ifnet *ifp = na->ifp;
459 struct netmap_kring *kring = &na->tx_rings[ring_nr];
460 struct netmap_ring *ring = kring->ring;
461 u_int j, k, num_slots = kring->nkr_num_slots;
462 int new_slots, ntx;
464 IFRATE(rate_ctx.new.txsync++);
466 // TODO: handle the case of mbuf allocation failure
467 /* first, reclaim completed buffers */
468 generic_netmap_tx_clean(kring);
470 /* Take a copy of ring->cur now, and never read it again. */
471 k = ring->cur;
472 if (unlikely(k >= num_slots)) {
473 return netmap_ring_reinit(kring);
476 rmb();
477 j = kring->nr_hwcur;
479 * 'new_slots' counts how many new slots have been added:
480 * everything from hwcur to cur, excluding reserved ones, if any.
481 * nr_hwreserved start from hwcur and counts how many slots were
482 * not sent to the NIC from the previous round.
484 new_slots = k - j - kring->nr_hwreserved;
485 if (new_slots < 0) {
486 new_slots += num_slots;
488 ntx = 0;
489 if (j != k) {
490 /* Process new packets to send:
491 * j is the current index in the netmap ring.
493 while (j != k) {
494 struct netmap_slot *slot = &ring->slot[j]; /* Current slot in the netmap ring */
495 void *addr = NMB(slot);
496 u_int len = slot->len;
497 struct mbuf *m;
498 int tx_ret;
500 if (unlikely(addr == netmap_buffer_base || len > NETMAP_BUF_SIZE)) {
501 return netmap_ring_reinit(kring);
503 /* Tale a mbuf from the tx pool and copy in the user packet. */
504 m = kring->tx_pool[j];
505 if (unlikely(!m)) {
506 RD(5, "This should never happen");
507 kring->tx_pool[j] = m = netmap_get_mbuf(GENERIC_BUF_SIZE);
508 if (unlikely(m == NULL)) {
509 D("mbuf allocation failed");
510 break;
513 /* XXX we should ask notifications when NS_REPORT is set,
514 * or roughly every half frame. We can optimize this
515 * by lazily requesting notifications only when a
516 * transmission fails. Probably the best way is to
517 * break on failures and set notifications when
518 * ring->avail == 0 || j != k
520 tx_ret = generic_xmit_frame(ifp, m, addr, len, ring_nr);
521 if (unlikely(tx_ret)) {
522 RD(5, "start_xmit failed: err %d [%u,%u,%u,%u]",
523 tx_ret, kring->nr_ntc, j, k, kring->nr_hwavail);
525 * No room for this mbuf in the device driver.
526 * Request a notification FOR A PREVIOUS MBUF,
527 * then call generic_netmap_tx_clean(kring) to do the
528 * double check and see if we can free more buffers.
529 * If there is space continue, else break;
530 * NOTE: the double check is necessary if the problem
531 * occurs in the txsync call after selrecord().
532 * Also, we need some way to tell the caller that not
533 * all buffers were queued onto the device (this was
534 * not a problem with native netmap driver where space
535 * is preallocated). The bridge has a similar problem
536 * and we solve it there by dropping the excess packets.
538 generic_set_tx_event(kring, j);
539 if (generic_netmap_tx_clean(kring)) { /* space now available */
540 continue;
541 } else {
542 break;
545 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED);
546 if (unlikely(++j == num_slots))
547 j = 0;
548 ntx++;
551 /* Update hwcur to the next slot to transmit. */
552 kring->nr_hwcur = j;
555 * Report all new slots as unavailable, even those not sent.
556 * We account for them with with hwreserved, so that
557 * nr_hwreserved =:= cur - nr_hwcur
559 kring->nr_hwavail -= new_slots;
560 kring->nr_hwreserved = k - j;
561 if (kring->nr_hwreserved < 0) {
562 kring->nr_hwreserved += num_slots;
565 IFRATE(rate_ctx.new.txpkt += ntx);
567 if (!kring->nr_hwavail) {
568 /* No more available slots? Set a notification event
569 * on a netmap slot that will be cleaned in the future.
570 * No doublecheck is performed, since txsync() will be
571 * called twice by netmap_poll().
573 generic_set_tx_event(kring, j);
575 ND("tx #%d, hwavail = %d", n, kring->nr_hwavail);
578 /* Synchronize the user's view to the kernel view. */
579 ring->avail = kring->nr_hwavail;
580 ring->reserved = kring->nr_hwreserved;
582 return 0;
586 * This handler is registered (through netmap_catch_rx())
587 * within the attached network interface
588 * in the RX subsystem, so that every mbuf passed up by
589 * the driver can be stolen to the network stack.
590 * Stolen packets are put in a queue where the
591 * generic_netmap_rxsync() callback can extract them.
593 void generic_rx_handler(struct ifnet *ifp, struct mbuf *m,
594 const struct pktinfo *pi, int cpuid)
596 struct netmap_adapter *na = NA(ifp);
597 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
598 u_int work_done;
599 u_int rr = 0; // receive ring number
601 ND("called");
602 /* limit the size of the queue */
603 if (unlikely(mbq_len(&na->rx_rings[rr].rx_queue) > 1024)) {
604 m_freem(m);
605 } else {
606 mbq_safe_enqueue(&na->rx_rings[rr].rx_queue, m);
609 if (netmap_generic_mit < 32768) {
610 /* no rx mitigation, pass notification up */
611 netmap_generic_irq(na->ifp, rr, &work_done);
612 IFRATE(rate_ctx.new.rxirq++);
613 } else {
614 /* same as send combining, filter notification if there is a
615 * pending timer, otherwise pass it up and start a timer.
617 if (likely(netmap_mitigation_active(gna))) {
618 /* Record that there is some pending work. */
619 gna->mit_pending = 1;
620 } else {
621 netmap_generic_irq(na->ifp, rr, &work_done);
622 IFRATE(rate_ctx.new.rxirq++);
623 netmap_mitigation_start(gna);
629 * generic_netmap_rxsync() extracts mbufs from the queue filled by
630 * generic_netmap_rx_handler() and puts their content in the netmap
631 * receive ring.
632 * Access must be protected because the rx handler is asynchronous,
634 static int
635 generic_netmap_rxsync(struct netmap_adapter *na, u_int ring_nr, int flags)
637 struct netmap_kring *kring = &na->rx_rings[ring_nr];
638 struct netmap_ring *ring = kring->ring;
639 u_int j, n, lim = kring->nkr_num_slots - 1;
640 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR;
641 u_int k, resvd = ring->reserved;
643 if (ring->cur > lim)
644 return netmap_ring_reinit(kring);
646 /* Import newly received packets into the netmap ring. */
647 if (netmap_no_pendintr || force_update) {
648 uint16_t slot_flags = kring->nkr_slot_flags;
649 struct mbuf *m;
651 n = 0;
652 j = kring->nr_ntc; /* first empty slot in the receive ring */
653 /* extract buffers from the rx queue, stop at most one
654 * slot before nr_hwcur (index k)
656 k = (kring->nr_hwcur) ? kring->nr_hwcur-1 : lim;
657 while (j != k) {
658 int len;
659 void *addr = NMB(&ring->slot[j]);
661 if (addr == netmap_buffer_base) { /* Bad buffer */
662 return netmap_ring_reinit(kring);
665 * Call the locked version of the function.
666 * XXX Ideally we could grab a batch of mbufs at once,
667 * by changing rx_queue into a ring.
669 m = mbq_safe_dequeue(&kring->rx_queue);
670 if (!m)
671 break;
672 len = MBUF_LEN(m);
673 m_copydata(m, 0, len, addr);
674 ring->slot[j].len = len;
675 ring->slot[j].flags = slot_flags;
676 m_freem(m);
677 if (unlikely(j++ == lim))
678 j = 0;
679 n++;
681 if (n) {
682 kring->nr_ntc = j;
683 kring->nr_hwavail += n;
684 IFRATE(rate_ctx.new.rxpkt += n);
686 kring->nr_kflags &= ~NKR_PENDINTR;
689 // XXX should we invert the order ?
690 /* Skip past packets that userspace has released */
691 j = kring->nr_hwcur;
692 k = ring->cur;
693 if (resvd > 0) {
694 if (resvd + ring->avail >= lim + 1) {
695 D("XXX invalid reserve/avail %d %d", resvd, ring->avail);
696 ring->reserved = resvd = 0; // XXX panic...
698 k = (k >= resvd) ? k - resvd : k + lim + 1 - resvd;
700 if (j != k) {
701 /* Userspace has released some packets. */
702 for (n = 0; j != k; n++) {
703 struct netmap_slot *slot = &ring->slot[j];
705 slot->flags &= ~NS_BUF_CHANGED;
706 if (unlikely(j++ == lim))
707 j = 0;
709 kring->nr_hwavail -= n;
710 kring->nr_hwcur = k;
712 /* Tell userspace that there are new packets. */
713 ring->avail = kring->nr_hwavail - resvd;
714 IFRATE(rate_ctx.new.rxsync++);
716 return 0;
719 static void
720 generic_netmap_dtor(struct netmap_adapter *na)
722 struct ifnet *ifp = na->ifp;
723 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter*)na;
724 struct netmap_adapter *prev_na = gna->prev;
726 if (prev_na != NULL) {
727 D("Released generic NA %p", gna);
728 #if 0
729 if_rele(na->ifp);
730 #endif
731 netmap_adapter_put(prev_na);
733 if (ifp != NULL) {
734 WNA(ifp) = prev_na;
735 D("Restored native NA %p", prev_na);
736 na->ifp = NULL;
741 * generic_netmap_attach() makes it possible to use netmap on
742 * a device without native netmap support.
743 * This is less performant than native support but potentially
744 * faster than raw sockets or similar schemes.
746 * In this "emulated" mode, netmap rings do not necessarily
747 * have the same size as those in the NIC. We use a default
748 * value and possibly override it if the OS has ways to fetch the
749 * actual configuration.
752 generic_netmap_attach(struct ifnet *ifp)
754 struct netmap_adapter *na;
755 struct netmap_generic_adapter *gna;
756 int retval;
757 u_int num_tx_desc, num_rx_desc;
759 num_tx_desc = num_rx_desc = netmap_generic_ringsize; /* starting point */
761 generic_find_num_desc(ifp, &num_tx_desc, &num_rx_desc);
762 ND("Netmap ring size: TX = %d, RX = %d", num_tx_desc, num_rx_desc);
764 gna = kmalloc(sizeof(*gna), M_DEVBUF, M_NOWAIT | M_ZERO);
765 if (gna == NULL) {
766 D("no memory on attach, give up");
767 return ENOMEM;
769 na = (struct netmap_adapter *)gna;
770 na->ifp = ifp;
771 na->num_tx_desc = num_tx_desc;
772 na->num_rx_desc = num_rx_desc;
773 na->nm_register = &generic_netmap_register;
774 na->nm_txsync = &generic_netmap_txsync;
775 na->nm_rxsync = &generic_netmap_rxsync;
776 na->nm_dtor = &generic_netmap_dtor;
777 /* when using generic, IFCAP_NETMAP is set so we force
778 * NAF_SKIP_INTR to use the regular interrupt handler
780 na->na_flags = NAF_SKIP_INTR;
782 ND("[GNA] num_tx_queues(%d), real_num_tx_queues(%d), len(%lu)",
783 ifp->num_tx_queues, ifp->real_num_tx_queues,
784 ifp->tx_queue_len);
785 ND("[GNA] num_rx_queues(%d), real_num_rx_queues(%d)",
786 ifp->num_rx_queues, ifp->real_num_rx_queues);
788 generic_find_num_queues(ifp, &na->num_tx_rings, &na->num_rx_rings);
790 retval = netmap_attach_common(na);
791 if (retval) {
792 kfree(gna, M_DEVBUF);
795 return retval;