2 * Copyright (C) 2011-2014 Universita` di Pisa. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Functions and macros to manipulate netmap structures and packets
31 * in userspace. See netmap(4) for more information.
33 * The address of the struct netmap_if, say nifp, is computed from the
34 * value returned from ioctl(.., NIOCREG, ...) and the mmap region:
35 * ioctl(fd, NIOCREG, &req);
36 * mem = mmap(0, ... );
37 * nifp = NETMAP_IF(mem, req.nr_nifp);
38 * (so simple, we could just do it manually)
41 * struct netmap_ring *NETMAP_TXRING(nifp, index)
42 * struct netmap_ring *NETMAP_RXRING(nifp, index)
43 * we can access ring->cur, ring->head, ring->tail, etc.
45 * ring->slot[i] gives us the i-th slot (we can access
46 * directly len, flags, buf_idx)
48 * char *buf = NETMAP_BUF(ring, x) returns a pointer to
49 * the buffer numbered x
51 * All ring indexes (head, cur, tail) should always move forward.
52 * To compute the next index in a circular ring you can use
53 * i = nm_ring_next(ring, i);
55 * To ease porting apps from pcap to netmap we supply a few fuctions
56 * that can be called to open, close, read and write on netmap in a way
57 * similar to libpcap. Note that the read/write function depend on
58 * an ioctl()/select()/poll() being issued to refill rings or push
61 * In order to use these, include #define NETMAP_WITH_LIBS
62 * in the source file that invokes these functions.
65 #ifndef _NET_NETMAP_USER_H_
66 #define _NET_NETMAP_USER_H_
69 #include <sys/socket.h> /* apple needs sockaddr */
70 #include <net/if.h> /* IFNAMSIZ */
73 #define likely(x) __builtin_expect(!!(x), 1)
74 #define unlikely(x) __builtin_expect(!!(x), 0)
75 #endif /* likely and unlikely */
77 #include <net/netmap.h>
80 #define _NETMAP_OFFSET(type, ptr, offset) \
81 ((type)(void *)((char *)(ptr) + (offset)))
83 #define NETMAP_IF(_base, _ofs) _NETMAP_OFFSET(struct netmap_if *, _base, _ofs)
85 #define NETMAP_TXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \
86 nifp, (nifp)->ring_ofs[index] )
88 #define NETMAP_RXRING(nifp, index) _NETMAP_OFFSET(struct netmap_ring *, \
89 nifp, (nifp)->ring_ofs[index + (nifp)->ni_tx_rings + 1] )
91 #define NETMAP_BUF(ring, index) \
92 ((char *)(ring) + (ring)->buf_ofs + ((index)*(ring)->nr_buf_size))
94 #define NETMAP_BUF_IDX(ring, buf) \
95 ( ((char *)(buf) - ((char *)(ring) + (ring)->buf_ofs) ) / \
99 static inline uint32_t
100 nm_ring_next(struct netmap_ring
*r
, uint32_t i
)
102 return ( unlikely(i
+ 1 == r
->num_slots
) ? 0 : i
+ 1);
107 * Return 1 if we have pending transmissions in the tx ring.
108 * When everything is complete ring->head = ring->tail + 1 (modulo ring size)
111 nm_tx_pending(struct netmap_ring
*r
)
113 return nm_ring_next(r
, r
->tail
) != r
->head
;
117 static inline uint32_t
118 nm_ring_space(struct netmap_ring
*ring
)
120 int ret
= ring
->tail
- ring
->cur
;
122 ret
+= ring
->num_slots
;
127 #ifdef NETMAP_WITH_LIBS
129 * Support for simple I/O libraries.
130 * Include other system headers required for compiling this.
133 #ifndef HAVE_NETMAP_WITH_LIBS
134 #define HAVE_NETMAP_WITH_LIBS
137 #include <sys/time.h>
138 #include <sys/mman.h>
139 #include <string.h> /* memset */
140 #include <sys/ioctl.h>
141 #include <sys/errno.h> /* EINVAL */
142 #include <fcntl.h> /* O_RDWR */
143 #include <unistd.h> /* close() */
147 #ifndef ND /* debug macros */
149 #define ND(_fmt, ...) do {} while(0)
150 #define D(_fmt, ...) \
152 struct timeval _t0; \
153 gettimeofday(&_t0, NULL); \
154 fprintf(stderr, "%03d.%06d %s [%d] " _fmt "\n", \
155 (int)(_t0.tv_sec % 1000), (int)_t0.tv_usec, \
156 __FUNCTION__, __LINE__, ##__VA_ARGS__); \
159 /* Rate limited version of "D", lps indicates how many per second */
160 #define RD(lps, format, ...) \
162 static int __t0, __cnt; \
163 struct timeval __xxts; \
164 gettimeofday(&__xxts, NULL); \
165 if (__t0 != __xxts.tv_sec) { \
166 __t0 = __xxts.tv_sec; \
169 if (__cnt++ < lps) { \
170 D(format, ##__VA_ARGS__); \
175 struct nm_pkthdr
{ /* same as pcap_pkthdr */
181 struct nm_stat
{ /* same as pcap_stat */
190 #define NM_ERRBUF_SIZE 512
193 struct nm_desc
*self
; /* point to self if netmap. */
197 int done_mmap
; /* set if mem is the result of mmap */
198 struct netmap_if
* const nifp
;
199 uint16_t first_tx_ring
, last_tx_ring
, cur_tx_ring
;
200 uint16_t first_rx_ring
, last_rx_ring
, cur_rx_ring
;
201 struct nmreq req
; /* also contains the nr_name = ifname */
202 struct nm_pkthdr hdr
;
205 * The memory contains netmap_if, rings and then buffers.
206 * Given a pointer (e.g. to nm_inject) we can compare with
207 * mem/buf_start/buf_end to tell if it is a buffer or
208 * some other descriptor in our region.
209 * We also store a pointer to some ring as it helps in the
210 * translation from buffer indexes to addresses.
212 struct netmap_ring
* const some_ring
;
213 void * const buf_start
;
214 void * const buf_end
;
215 /* parameters from pcap_open_live */
221 /* save flags so we can restore them on close */
227 char msg
[NM_ERRBUF_SIZE
];
231 * when the descriptor is open correctly, d->self == d
232 * Eventually we should also use some magic number.
234 #define P2NMD(p) ((struct nm_desc *)(p))
235 #define IS_NETMAP_DESC(d) ((d) && P2NMD(d)->self == P2NMD(d))
236 #define NETMAP_FD(d) (P2NMD(d)->fd)
240 * this is a slightly optimized copy routine which rounds
241 * to multiple of 64 bytes and is often faster than dealing
242 * with other odd sizes. We assume there is enough room
243 * in the source and destination buffers.
245 * XXX only for multiples of 64 bytes, non overlapped.
248 nm_pkt_copy(const void *_src
, void *_dst
, int l
)
250 const uint64_t *src
= (const uint64_t *)_src
;
251 uint64_t *dst
= (uint64_t *)_dst
;
253 if (unlikely(l
>= 1024)) {
257 for (; likely(l
> 0); l
-=64) {
271 * The callback, invoked on each received packet. Same as libpcap
273 typedef void (*nm_cb_t
)(u_char
*, const struct nm_pkthdr
*, const u_char
*d
);
276 *--- the pcap-like API ---
278 * nm_open() opens a file descriptor, binds to a port and maps memory.
280 * ifname (netmap:foo or vale:foo) is the port name
281 * a suffix can indicate the follwing:
282 * ^ bind the host (sw) ring pair
283 * * bind host and NIC ring pairs (transparent)
284 * -NN bind individual NIC ring pair
285 * {NN bind master side of pipe NN
286 * }NN bind slave side of pipe NN
287 * a suffix starting with + and the following flags,
290 * z zero copy monitor
294 * req provides the initial values of nmreq before parsing ifname.
295 * Remember that the ifname parsing will override the ring
296 * number in nm_ringid, and part of nm_flags;
297 * flags special functions, normally 0
298 * indicates which fields of *arg are significant
299 * arg special functions, normally NULL
300 * if passed a netmap_desc with mem != NULL,
301 * use that memory instead of mmap.
304 static struct nm_desc
*nm_open(const char *ifname
, const struct nmreq
*req
,
305 uint64_t flags
, const struct nm_desc
*arg
);
308 * nm_open can import some fields from the parent descriptor.
309 * These flags control which ones.
310 * Also in flags you can specify NETMAP_NO_TX_POLL and NETMAP_DO_RX_POLL,
311 * which set the initial value for these flags.
312 * Note that the 16 low bits of the flags are reserved for data
313 * that may go into the nmreq.
316 NM_OPEN_NO_MMAP
= 0x040000, /* reuse mmap from parent */
317 NM_OPEN_IFNAME
= 0x080000, /* nr_name, nr_ringid, nr_flags */
318 NM_OPEN_ARG1
= 0x100000,
319 NM_OPEN_ARG2
= 0x200000,
320 NM_OPEN_ARG3
= 0x400000,
321 NM_OPEN_RING_CFG
= 0x800000, /* tx|rx rings|slots */
326 * nm_close() closes and restores the port to its previous state
329 static int nm_close(struct nm_desc
*);
332 * nm_inject() is the same as pcap_inject()
333 * nm_dispatch() is the same as pcap_dispatch()
334 * nm_nextpkt() is the same as pcap_next()
337 static int nm_inject(struct nm_desc
*, const void *, size_t);
338 static int nm_dispatch(struct nm_desc
*, int, nm_cb_t
, u_char
*);
339 static u_char
*nm_nextpkt(struct nm_desc
*, struct nm_pkthdr
*);
343 * Try to open, return descriptor if successful, NULL otherwise.
344 * An invalid netmap name will return errno = 0;
345 * You can pass a pointer to a pre-filled nm_desc to add special
346 * parameters. Flags is used as follows
347 * NM_OPEN_NO_MMAP use the memory from arg, only
348 * if the nr_arg2 (memory block) matches.
349 * NM_OPEN_ARG1 use req.nr_arg1 from arg
350 * NM_OPEN_ARG2 use req.nr_arg2 from arg
351 * NM_OPEN_RING_CFG user ring config from arg
353 static struct nm_desc
*
354 nm_open(const char *ifname
, const struct nmreq
*req
,
355 uint64_t new_flags
, const struct nm_desc
*arg
)
357 struct nm_desc
*d
= NULL
;
358 const struct nm_desc
*parent
= arg
;
360 uint32_t nr_ringid
= 0, nr_flags
, nr_reg
;
361 const char *port
= NULL
;
363 char errmsg
[MAXERRMSG
] = "";
364 enum { P_START
, P_RNGSFXOK
, P_GETNUM
, P_FLAGS
, P_FLAGSOK
} p_state
;
367 if (strncmp(ifname
, "netmap:", 7) && strncmp(ifname
, "vale", 4)) {
368 errno
= 0; /* name not recognised, not an error */
371 if (ifname
[0] == 'n')
373 /* scan for a separator */
374 for (port
= ifname
; *port
&& !index("-*^{}/", *port
); port
++)
376 namelen
= port
- ifname
;
377 if (namelen
>= sizeof(d
->req
.nr_name
)) {
378 snprintf(errmsg
, MAXERRMSG
, "name too long");
382 nr_flags
= NR_REG_ALL_NIC
; /* default for no suffix */
387 case '^': /* only SW ring */
388 nr_flags
= NR_REG_SW
;
389 p_state
= P_RNGSFXOK
;
391 case '*': /* NIC and SW */
392 nr_flags
= NR_REG_NIC_SW
;
393 p_state
= P_RNGSFXOK
;
395 case '-': /* one NIC ring pair */
396 nr_flags
= NR_REG_ONE_NIC
;
399 case '{': /* pipe (master endpoint) */
400 nr_flags
= NR_REG_PIPE_MASTER
;
403 case '}': /* pipe (slave endoint) */
404 nr_flags
= NR_REG_PIPE_SLAVE
;
407 case '/': /* start of flags */
411 snprintf(errmsg
, MAXERRMSG
, "unknown modifier: '%c'", *port
);
422 snprintf(errmsg
, MAXERRMSG
, "unexpected character: '%c'", *port
);
428 num
= strtol(port
, (char **)&port
, 10);
429 if (num
< 0 || num
>= NETMAP_RING_MASK
) {
430 snprintf(errmsg
, MAXERRMSG
, "'%ld' out of range [0, %d)",
431 num
, NETMAP_RING_MASK
);
434 nr_ringid
= num
& NETMAP_RING_MASK
;
435 p_state
= P_RNGSFXOK
;
441 nr_flags
|= NR_EXCLUSIVE
;
444 nr_flags
|= NR_ZCOPY_MON
;
447 nr_flags
|= NR_MONITOR_TX
;
450 nr_flags
|= NR_MONITOR_RX
;
453 snprintf(errmsg
, MAXERRMSG
, "unrecognized flag: '%c'", *port
);
461 if (p_state
!= P_START
&& p_state
!= P_RNGSFXOK
&& p_state
!= P_FLAGSOK
) {
462 snprintf(errmsg
, MAXERRMSG
, "unexpected end of port name");
465 ND("flags: %s %s %s %s",
466 (nr_flags
& NR_EXCLUSIVE
) ? "EXCLUSIVE" : "",
467 (nr_flags
& NR_ZCOPY_MON
) ? "ZCOPY_MON" : "",
468 (nr_flags
& NR_MONITOR_TX
) ? "MONITOR_TX" : "",
469 (nr_flags
& NR_MONITOR_RX
) ? "MONITOR_RX" : "");
470 d
= (struct nm_desc
*)calloc(1, sizeof(*d
));
472 snprintf(errmsg
, MAXERRMSG
, "nm_desc alloc failure");
476 d
->self
= d
; /* set this early so nm_close() works */
477 d
->fd
= open("/dev/netmap", O_RDWR
);
479 snprintf(errmsg
, MAXERRMSG
, "cannot open /dev/netmap: %s", strerror(errno
));
485 d
->req
.nr_version
= NETMAP_API
;
486 d
->req
.nr_ringid
&= ~NETMAP_RING_MASK
;
488 /* these fields are overridden by ifname and flags processing */
489 d
->req
.nr_ringid
|= nr_ringid
;
490 d
->req
.nr_flags
= nr_flags
;
491 memcpy(d
->req
.nr_name
, ifname
, namelen
);
492 d
->req
.nr_name
[namelen
] = '\0';
493 /* optionally import info from parent */
494 if (IS_NETMAP_DESC(parent
) && new_flags
) {
495 if (new_flags
& NM_OPEN_ARG1
)
496 D("overriding ARG1 %d", parent
->req
.nr_arg1
);
497 d
->req
.nr_arg1
= new_flags
& NM_OPEN_ARG1
?
498 parent
->req
.nr_arg1
: 4;
499 if (new_flags
& NM_OPEN_ARG2
)
500 D("overriding ARG2 %d", parent
->req
.nr_arg2
);
501 d
->req
.nr_arg2
= new_flags
& NM_OPEN_ARG2
?
502 parent
->req
.nr_arg2
: 0;
503 if (new_flags
& NM_OPEN_ARG3
)
504 D("overriding ARG3 %d", parent
->req
.nr_arg3
);
505 d
->req
.nr_arg3
= new_flags
& NM_OPEN_ARG3
?
506 parent
->req
.nr_arg3
: 0;
507 if (new_flags
& NM_OPEN_RING_CFG
) {
508 D("overriding RING_CFG");
509 d
->req
.nr_tx_slots
= parent
->req
.nr_tx_slots
;
510 d
->req
.nr_rx_slots
= parent
->req
.nr_rx_slots
;
511 d
->req
.nr_tx_rings
= parent
->req
.nr_tx_rings
;
512 d
->req
.nr_rx_rings
= parent
->req
.nr_rx_rings
;
514 if (new_flags
& NM_OPEN_IFNAME
) {
515 D("overriding ifname %s ringid 0x%x flags 0x%x",
516 parent
->req
.nr_name
, parent
->req
.nr_ringid
,
517 parent
->req
.nr_flags
);
518 memcpy(d
->req
.nr_name
, parent
->req
.nr_name
,
519 sizeof(d
->req
.nr_name
));
520 d
->req
.nr_ringid
= parent
->req
.nr_ringid
;
521 d
->req
.nr_flags
= parent
->req
.nr_flags
;
524 /* add the *XPOLL flags */
525 d
->req
.nr_ringid
|= new_flags
& (NETMAP_NO_TX_POLL
| NETMAP_DO_RX_POLL
);
527 if (ioctl(d
->fd
, NIOCREGIF
, &d
->req
)) {
528 snprintf(errmsg
, MAXERRMSG
, "NIOCREGIF failed: %s", strerror(errno
));
532 if (IS_NETMAP_DESC(parent
) && parent
->mem
&&
533 parent
->req
.nr_arg2
== d
->req
.nr_arg2
) {
534 /* do not mmap, inherit from parent */
535 d
->memsize
= parent
->memsize
;
536 d
->mem
= parent
->mem
;
538 /* XXX TODO: check if memsize is too large (or there is overflow) */
539 d
->memsize
= d
->req
.nr_memsize
;
540 d
->mem
= mmap(0, d
->memsize
, PROT_WRITE
| PROT_READ
, MAP_SHARED
,
542 if (d
->mem
== MAP_FAILED
) {
543 snprintf(errmsg
, MAXERRMSG
, "mmap failed: %s", strerror(errno
));
549 struct netmap_if
*nifp
= NETMAP_IF(d
->mem
, d
->req
.nr_offset
);
550 struct netmap_ring
*r
= NETMAP_RXRING(nifp
, );
552 *(struct netmap_if
**)(uintptr_t)&(d
->nifp
) = nifp
;
553 *(struct netmap_ring
**)(uintptr_t)&d
->some_ring
= r
;
554 *(void **)(uintptr_t)&d
->buf_start
= NETMAP_BUF(r
, 0);
555 *(void **)(uintptr_t)&d
->buf_end
=
556 (char *)d
->mem
+ d
->memsize
;
559 nr_reg
= d
->req
.nr_flags
& NR_REG_MASK
;
561 if (nr_reg
== NR_REG_SW
) { /* host stack */
562 d
->first_tx_ring
= d
->last_tx_ring
= d
->req
.nr_tx_rings
;
563 d
->first_rx_ring
= d
->last_rx_ring
= d
->req
.nr_rx_rings
;
564 } else if (nr_reg
== NR_REG_ALL_NIC
) { /* only nic */
565 d
->first_tx_ring
= 0;
566 d
->first_rx_ring
= 0;
567 d
->last_tx_ring
= d
->req
.nr_tx_rings
- 1;
568 d
->last_rx_ring
= d
->req
.nr_rx_rings
- 1;
569 } else if (nr_reg
== NR_REG_NIC_SW
) {
570 d
->first_tx_ring
= 0;
571 d
->first_rx_ring
= 0;
572 d
->last_tx_ring
= d
->req
.nr_tx_rings
;
573 d
->last_rx_ring
= d
->req
.nr_rx_rings
;
574 } else if (nr_reg
== NR_REG_ONE_NIC
) {
575 /* XXX check validity */
576 d
->first_tx_ring
= d
->last_tx_ring
=
577 d
->first_rx_ring
= d
->last_rx_ring
= d
->req
.nr_ringid
& NETMAP_RING_MASK
;
579 d
->first_tx_ring
= d
->last_tx_ring
= 0;
580 d
->first_rx_ring
= d
->last_rx_ring
= 0;
583 #ifdef DEBUG_NETMAP_USER
584 { /* debugging code */
587 D("%s tx %d .. %d %d rx %d .. %d %d", ifname
,
588 d
->first_tx_ring
, d
->last_tx_ring
, d
->req
.nr_tx_rings
,
589 d
->first_rx_ring
, d
->last_rx_ring
, d
->req
.nr_rx_rings
);
590 for (i
= 0; i
<= d
->req
.nr_tx_rings
; i
++) {
591 struct netmap_ring
*r
= NETMAP_TXRING(d
->nifp
, i
);
592 D("TX%d %p h %d c %d t %d", i
, r
, r
->head
, r
->cur
, r
->tail
);
594 for (i
= 0; i
<= d
->req
.nr_rx_rings
; i
++) {
595 struct netmap_ring
*r
= NETMAP_RXRING(d
->nifp
, i
);
596 D("RX%d %p h %d c %d t %d", i
, r
, r
->head
, r
->cur
, r
->tail
);
599 #endif /* debugging */
601 d
->cur_tx_ring
= d
->first_tx_ring
;
602 d
->cur_rx_ring
= d
->first_rx_ring
;
608 D("%s %s", errmsg
, ifname
);
616 nm_close(struct nm_desc
*d
)
619 * ugly trick to avoid unused warnings
621 static void *__xxzt
[] __attribute__ ((unused
)) =
622 { (void *)nm_open
, (void *)nm_inject
,
623 (void *)nm_dispatch
, (void *)nm_nextpkt
} ;
625 if (d
== NULL
|| d
->self
!= d
)
627 if (d
->done_mmap
&& d
->mem
)
628 munmap(d
->mem
, d
->memsize
);
631 bzero(d
, sizeof(*d
));
638 * Same prototype as pcap_inject(), only need to cast.
641 nm_inject(struct nm_desc
*d
, const void *buf
, size_t size
)
643 u_int c
, n
= d
->last_tx_ring
- d
->first_tx_ring
+ 1;
645 for (c
= 0; c
< n
; c
++) {
646 /* compute current ring to use */
647 struct netmap_ring
*ring
;
649 uint32_t ri
= d
->cur_tx_ring
+ c
;
651 if (ri
> d
->last_tx_ring
)
652 ri
= d
->first_tx_ring
;
653 ring
= NETMAP_TXRING(d
->nifp
, ri
);
654 if (nm_ring_empty(ring
)) {
658 idx
= ring
->slot
[i
].buf_idx
;
659 ring
->slot
[i
].len
= size
;
660 nm_pkt_copy(buf
, NETMAP_BUF(ring
, idx
), size
);
662 ring
->head
= ring
->cur
= nm_ring_next(ring
, i
);
670 * Same prototype as pcap_dispatch(), only need to cast.
673 nm_dispatch(struct nm_desc
*d
, int cnt
, nm_cb_t cb
, u_char
*arg
)
675 int n
= d
->last_rx_ring
- d
->first_rx_ring
+ 1;
676 int c
, got
= 0, ri
= d
->cur_rx_ring
;
680 /* cnt == -1 means infinite, but rings have a finite amount
681 * of buffers and the int is large enough that we never wrap,
682 * so we can omit checking for -1
684 for (c
=0; c
< n
&& cnt
!= got
; c
++) {
685 /* compute current ring to use */
686 struct netmap_ring
*ring
;
688 ri
= d
->cur_rx_ring
+ c
;
689 if (ri
> d
->last_rx_ring
)
690 ri
= d
->first_rx_ring
;
691 ring
= NETMAP_RXRING(d
->nifp
, ri
);
692 for ( ; !nm_ring_empty(ring
) && cnt
!= got
; got
++) {
694 u_int idx
= ring
->slot
[i
].buf_idx
;
695 u_char
*buf
= (u_char
*)NETMAP_BUF(ring
, idx
);
697 // __builtin_prefetch(buf);
698 d
->hdr
.len
= d
->hdr
.caplen
= ring
->slot
[i
].len
;
699 d
->hdr
.ts
= ring
->ts
;
700 cb(arg
, &d
->hdr
, buf
);
701 ring
->head
= ring
->cur
= nm_ring_next(ring
, i
);
709 nm_nextpkt(struct nm_desc
*d
, struct nm_pkthdr
*hdr
)
711 int ri
= d
->cur_rx_ring
;
714 /* compute current ring to use */
715 struct netmap_ring
*ring
= NETMAP_RXRING(d
->nifp
, ri
);
716 if (!nm_ring_empty(ring
)) {
718 u_int idx
= ring
->slot
[i
].buf_idx
;
719 u_char
*buf
= (u_char
*)NETMAP_BUF(ring
, idx
);
721 // __builtin_prefetch(buf);
723 hdr
->len
= hdr
->caplen
= ring
->slot
[i
].len
;
724 ring
->cur
= nm_ring_next(ring
, i
);
725 /* we could postpone advancing head if we want
726 * to hold the buffer. This can be supported in
729 ring
->head
= ring
->cur
;
734 if (ri
> d
->last_rx_ring
)
735 ri
= d
->first_rx_ring
;
736 } while (ri
!= d
->cur_rx_ring
);
737 return NULL
; /* nothing found */
740 #endif /* !HAVE_NETMAP_WITH_LIBS */
742 #endif /* NETMAP_WITH_LIBS */
744 #endif /* _NET_NETMAP_USER_H_ */