2 * netsniff-ng - the packet sniffing beast
3 * Copyright 2011 - 2013 Daniel Borkmann.
4 * Subject to the GPL, version 2.
20 static struct iovec iov
[1024] __cacheline_aligned
;
21 static off_t iov_off_rd
= 0, iov_slot
= 0;
23 static ssize_t
pcap_sg_write(int fd
, pcap_pkthdr_t
*phdr
, enum pcap_type type
,
24 const uint8_t *packet
, size_t len
)
26 ssize_t ret
, hdrsize
= pcap_get_hdr_length(phdr
, type
);
28 if (unlikely(iov_slot
== array_size(iov
))) {
29 ret
= writev(fd
, iov
, array_size(iov
));
31 panic("Writev I/O error: %s!\n", strerror(errno
));
36 fmemcpy(iov
[iov_slot
].iov_base
, &phdr
->raw
, hdrsize
);
37 iov
[iov_slot
].iov_len
= hdrsize
;
39 fmemcpy(iov
[iov_slot
].iov_base
+ iov
[iov_slot
].iov_len
, packet
, len
);
40 ret
= (iov
[iov_slot
].iov_len
+= len
);
46 static ssize_t
__pcap_sg_inter_iov_hdr_read(int fd
, pcap_pkthdr_t
*phdr
, enum pcap_type type
,
47 uint8_t *packet
, size_t len
, size_t hdrsize
)
53 offset
= iov
[iov_slot
].iov_len
- iov_off_rd
;
54 remainder
= hdrsize
- offset
;
58 bug_on(offset
+ remainder
!= hdrsize
);
60 fmemcpy(&phdr
->raw
, iov
[iov_slot
].iov_base
+ iov_off_rd
, offset
);
64 if (iov_slot
== array_size(iov
)) {
66 ret
= readv(fd
, iov
, array_size(iov
));
67 if (unlikely(ret
<= 0))
71 fmemcpy(&phdr
->raw
+ offset
, iov
[iov_slot
].iov_base
+ iov_off_rd
, remainder
);
72 iov_off_rd
+= remainder
;
77 static ssize_t
__pcap_sg_inter_iov_data_read(int fd
, uint8_t *packet
, size_t len
, size_t hdrlen
)
83 offset
= iov
[iov_slot
].iov_len
- iov_off_rd
;
84 remainder
= hdrlen
- offset
;
88 bug_on(offset
+ remainder
!= hdrlen
);
90 fmemcpy(packet
, iov
[iov_slot
].iov_base
+ iov_off_rd
, offset
);
94 if (iov_slot
== array_size(iov
)) {
96 ret
= readv(fd
, iov
, array_size(iov
));
97 if (unlikely(ret
<= 0))
101 fmemcpy(packet
+ offset
, iov
[iov_slot
].iov_base
+ iov_off_rd
, remainder
);
102 iov_off_rd
+= remainder
;
107 static ssize_t
pcap_sg_read(int fd
, pcap_pkthdr_t
*phdr
, enum pcap_type type
,
108 uint8_t *packet
, size_t len
)
111 size_t hdrsize
= pcap_get_hdr_length(phdr
, type
), hdrlen
;
113 if (likely(iov
[iov_slot
].iov_len
- iov_off_rd
>= hdrsize
)) {
114 fmemcpy(&phdr
->raw
, iov
[iov_slot
].iov_base
+ iov_off_rd
, hdrsize
);
115 iov_off_rd
+= hdrsize
;
117 ret
= __pcap_sg_inter_iov_hdr_read(fd
, phdr
, type
, packet
,
119 if (unlikely(ret
< 0))
123 hdrlen
= pcap_get_length(phdr
, type
);
124 if (unlikely(hdrlen
== 0 || hdrlen
> len
))
127 if (likely(iov
[iov_slot
].iov_len
- iov_off_rd
>= hdrlen
)) {
128 fmemcpy(packet
, iov
[iov_slot
].iov_base
+ iov_off_rd
, hdrlen
);
129 iov_off_rd
+= hdrlen
;
131 ret
= __pcap_sg_inter_iov_data_read(fd
, packet
, len
, hdrlen
);
132 if (unlikely(ret
< 0))
136 return hdrsize
+ hdrlen
;
139 static void pcap_sg_fsync(int fd
)
141 ssize_t ret
= writev(fd
, iov
, iov_slot
);
143 panic("Writev I/O error: %s!\n", strerror(errno
));
149 static void pcap_sg_init_once(void)
154 static int pcap_sg_prepare_access(int fd
, enum pcap_mode mode
, bool jumbo
)
160 len
= jumbo
? (PAGE_SIZE
* 16) /* 64k max */ :
161 (PAGE_SIZE
* 3) /* 12k max */;
163 for (i
= 0; i
< array_size(iov
); ++i
) {
164 iov
[i
].iov_base
= xzmalloc_aligned(len
, 64);
165 iov
[i
].iov_len
= len
;
168 if (mode
== PCAP_MODE_RD
) {
169 ret
= readv(fd
, iov
, array_size(iov
));
180 static void pcap_sg_prepare_close(int fd
, enum pcap_mode mode
)
184 for (i
= 0; i
< array_size(iov
); ++i
)
185 xfree(iov
[i
].iov_base
);
188 const struct pcap_file_ops pcap_sg_ops
= {
189 .init_once_pcap
= pcap_sg_init_once
,
190 .pull_fhdr_pcap
= pcap_generic_pull_fhdr
,
191 .push_fhdr_pcap
= pcap_generic_push_fhdr
,
192 .prepare_access_pcap
= pcap_sg_prepare_access
,
193 .prepare_close_pcap
= pcap_sg_prepare_close
,
194 .read_pcap
= pcap_sg_read
,
195 .write_pcap
= pcap_sg_write
,
196 .fsync_pcap
= pcap_sg_fsync
,