2 * netsniff-ng - the packet sniffing beast
3 * By Daniel Borkmann <daniel@netsniff-ng.org>
4 * Copyright 2011 Daniel Borkmann.
5 * Subject to the GPL, version 2.
22 #define PAGE_SIZE (getpagesize())
23 #define PAGE_MASK (~(PAGE_SIZE - 1))
24 #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
27 #define ALLSIZ (PAGE_SIZE * 3)
28 #define ALLSIZ_2K (PAGE_SIZE * 3) // 12K max
29 #define ALLSIZ_JUMBO (PAGE_SIZE * 16) // 64K max
31 static struct iovec iov
[IOVSIZ
];
32 static unsigned long c
= 0;
33 static struct spinlock lock
;
34 static ssize_t iov_used
;
36 static int pcap_sg_pull_file_header(int fd
, uint32_t *linktype
)
39 struct pcap_filehdr hdr
;
41 ret
= read(fd
, &hdr
, sizeof(hdr
));
42 if (unlikely(ret
!= sizeof(hdr
)))
45 pcap_validate_header(&hdr
);
47 *linktype
= hdr
.linktype
;
52 static int pcap_sg_push_file_header(int fd
)
55 struct pcap_filehdr hdr
;
57 fmemset(&hdr
, 0, sizeof(hdr
));
58 pcap_prepare_header(&hdr
, LINKTYPE_EN10MB
, 0,
59 PCAP_DEFAULT_SNAPSHOT_LEN
);
60 ret
= write_or_die(fd
, &hdr
, sizeof(hdr
));
61 if (unlikely(ret
!= sizeof(hdr
))) {
62 whine("Failed to write pkt file header!\n");
69 static int pcap_sg_prepare_writing_pcap(int fd
)
75 static ssize_t
pcap_sg_write_pcap_pkt(int fd
, struct pcap_pkthdr
*hdr
,
76 uint8_t *packet
, size_t len
)
82 if (unlikely(c
== IOVSIZ
)) {
83 ret
= writev(fd
, iov
, IOVSIZ
);
85 panic("writev I/O error!\n");
91 fmemcpy(iov
[c
].iov_base
, hdr
, sizeof(*hdr
));
93 iov
[c
].iov_len
+= sizeof(*hdr
);
94 fmemcpy(iov
[c
].iov_base
+ iov
[c
].iov_len
, packet
, len
);
96 iov
[c
].iov_len
+= len
;
101 spinlock_unlock(&lock
);
106 static int pcap_sg_prepare_reading_pcap(int fd
)
110 spinlock_lock(&lock
);
111 if (readv(fd
, iov
, IOVSIZ
) <= 0)
116 spinlock_unlock(&lock
);
121 static ssize_t
pcap_sg_read_pcap_pkt(int fd
, struct pcap_pkthdr
*hdr
,
122 uint8_t *packet
, size_t len
)
126 /* In contrast to writing, reading gets really ugly ... */
127 spinlock_lock(&lock
);
129 if (likely(iov
[c
].iov_len
- iov_used
>= sizeof(*hdr
))) {
130 fmemcpy(hdr
, iov
[c
].iov_base
+ iov_used
, sizeof(*hdr
));
131 iov_used
+= sizeof(*hdr
);
136 offset
= iov
[c
].iov_len
- iov_used
;
137 remainder
= sizeof(*hdr
) - offset
;
141 bug_on(offset
+ remainder
!= sizeof(*hdr
));
143 fmemcpy(hdr
, iov
[c
].iov_base
+ iov_used
, offset
);
149 /* We need to refetch! */
151 if (readv(fd
, iov
, IOVSIZ
) <= 0) {
157 /* Now we copy the remainder and go on with business ... */
158 fmemcpy((uint8_t *) hdr
+ offset
,
159 iov
[c
].iov_base
+ iov_used
, remainder
);
160 iov_used
+= remainder
;
163 /* header read completed */
165 if (unlikely(hdr
->caplen
== 0 || hdr
->caplen
> len
)) {
166 ret
= -EINVAL
; /* Bogus packet */
170 /* now we read data ... */
172 if (likely(iov
[c
].iov_len
- iov_used
>= hdr
->caplen
)) {
173 fmemcpy(packet
, iov
[c
].iov_base
+ iov_used
, hdr
->caplen
);
174 iov_used
+= hdr
->caplen
;
179 offset
= iov
[c
].iov_len
- iov_used
;
180 remainder
= hdr
->caplen
- offset
;
184 bug_on(offset
+ remainder
!= hdr
->caplen
);
186 fmemcpy(packet
, iov
[c
].iov_base
+ iov_used
, offset
);
192 /* We need to refetch! */
194 if (readv(fd
, iov
, IOVSIZ
)) {
200 /* Now we copy the remainder and go on with business ... */
201 fmemcpy(packet
+ offset
, iov
[c
].iov_base
+ iov_used
, remainder
);
202 iov_used
+= remainder
;
205 spinlock_unlock(&lock
);
207 return sizeof(*hdr
) + hdr
->caplen
;
210 spinlock_unlock(&lock
);
214 static void pcap_sg_fsync_pcap(int fd
)
218 spinlock_lock(&lock
);
219 ret
= writev(fd
, iov
, c
);
221 panic("writev I/O error!\n");
226 spinlock_unlock(&lock
);
229 struct pcap_file_ops pcap_sg_ops __read_mostly
= {
230 .name
= "scatter-gather",
231 .pull_file_header
= pcap_sg_pull_file_header
,
232 .push_file_header
= pcap_sg_push_file_header
,
233 .write_pcap_pkt
= pcap_sg_write_pcap_pkt
,
234 .prepare_reading_pcap
= pcap_sg_prepare_reading_pcap
,
235 .prepare_writing_pcap
= pcap_sg_prepare_writing_pcap
,
236 .read_pcap_pkt
= pcap_sg_read_pcap_pkt
,
237 .fsync_pcap
= pcap_sg_fsync_pcap
,
240 int init_pcap_sg(int jumbo_support
)
247 fmemset(iov
, 0, sizeof(iov
));
250 allocsz
= ALLSIZ_JUMBO
;
254 for (i
= 0; i
< IOVSIZ
; ++i
) {
255 iov
[i
].iov_base
= xzmalloc_aligned(allocsz
, 64);
256 iov
[i
].iov_len
= allocsz
;
259 spinlock_init(&lock
);
261 return pcap_ops_group_register(&pcap_sg_ops
, PCAP_OPS_SG
);
264 void cleanup_pcap_sg(void)
268 spinlock_destroy(&lock
);
270 for (i
= 0; i
< IOVSIZ
; ++i
)
271 xfree(iov
[i
].iov_base
);
273 pcap_ops_group_unregister(PCAP_OPS_SG
);