2 * The USB Monitor, inspired by Dave Harding's USBMon.
4 * This is a binary format reader.
6 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it)
7 * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com)
10 #include <linux/kernel.h>
11 #include <linux/types.h>
13 #include <linux/cdev.h>
14 #include <linux/usb.h>
15 #include <linux/poll.h>
16 #include <linux/compat.h>
18 #include <linux/scatterlist.h>
19 #include <linux/slab.h>
21 #include <asm/uaccess.h>
26 * Defined by USB 2.0 clause 9.3, table 9.2.
31 #define MON_IOC_MAGIC 0x92
33 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1)
34 /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */
35 #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats)
36 #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4)
37 #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5)
38 #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get)
39 #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch)
40 #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8)
41 /* #9 was MON_IOCT_SETAPI */
42 #define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get)
45 #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32)
46 #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32)
47 #define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32)
51 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc).
52 * But it's all right. Just use a simple way to make sure the chunk is never
53 * smaller than a page.
55 * N.B. An application does not know our chunk size.
57 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with
58 * page-sized chunks for the time being.
60 #define CHUNK_SIZE PAGE_SIZE
61 #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1))
64 * The magic limit was calculated so that it allows the monitoring
65 * application to pick data once in two ticks. This way, another application,
66 * which presumably drives the bus, gets to hog CPU, yet we collect our data.
67 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an
68 * enormous overhead built into the bus protocol, so we need about 1000 KB.
70 * This is still too much for most cases, where we just snoop a few
71 * descriptor fetches for enumeration. So, the default is a "reasonable"
72 * amount for systems with HZ=250 and incomplete bus saturation.
74 * XXX What about multi-megabyte URBs which take minutes to transfer?
76 #define BUFF_MAX CHUNK_ALIGN(1200*1024)
77 #define BUFF_DFL CHUNK_ALIGN(300*1024)
78 #define BUFF_MIN CHUNK_ALIGN(8*1024)
81 * The per-event API header (2 per URB).
83 * This structure is seen in userland as defined by the documentation.
86 u64 id
; /* URB ID - from submission to callback */
87 unsigned char type
; /* Same as in text API; extensible. */
88 unsigned char xfer_type
; /* ISO, Intr, Control, Bulk */
89 unsigned char epnum
; /* Endpoint number and transfer direction */
90 unsigned char devnum
; /* Device address */
91 unsigned short busnum
; /* Bus number */
94 s64 ts_sec
; /* gettimeofday */
95 s32 ts_usec
; /* gettimeofday */
97 unsigned int len_urb
; /* Length of data (submitted or actual) */
98 unsigned int len_cap
; /* Delivered length */
100 unsigned char setup
[SETUP_LEN
]; /* Only for Control S-type */
108 unsigned int xfer_flags
;
109 unsigned int ndesc
; /* Actual number of ISO descriptors */
113 * ISO vector, packed into the head of data stream.
114 * This has to take 16 bytes to make sure that the end of buffer
115 * wrap is not happening in the middle of a descriptor.
117 struct mon_bin_isodesc
{
119 unsigned int iso_off
;
120 unsigned int iso_len
;
124 /* per file statistic */
125 struct mon_bin_stats
{
131 struct mon_bin_hdr __user
*hdr
; /* Can be 48 bytes or 64. */
133 size_t alloc
; /* Length of data (can be zero) */
136 struct mon_bin_mfetch
{
137 u32 __user
*offvec
; /* Vector of events fetched */
138 u32 nfetch
; /* Number of events to fetch (out: fetched) */
139 u32 nflush
; /* Number of events to flush */
143 struct mon_bin_get32
{
149 struct mon_bin_mfetch32
{
156 /* Having these two values same prevents wrapping of the mon_bin_hdr */
160 #define PKT_SZ_API0 48 /* API 0 (2.6.20) size */
161 #define PKT_SZ_API1 64 /* API 1 size: extra fields */
163 #define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */
165 /* max number of USB bus supported */
166 #define MON_BIN_MAX_MINOR 128
169 * The buffer: map of used pages.
173 unsigned char *ptr
; /* XXX just use page_to_virt everywhere? */
177 * This gets associated with an open file struct.
179 struct mon_reader_bin
{
180 /* The buffer: one per open. */
181 spinlock_t b_lock
; /* Protect b_cnt, b_in */
182 unsigned int b_size
; /* Current size of the buffer - bytes */
183 unsigned int b_cnt
; /* Bytes used */
184 unsigned int b_in
, b_out
; /* Offsets into buffer - bytes */
185 unsigned int b_read
; /* Amount of read data in curr. pkt. */
186 struct mon_pgmap
*b_vec
; /* The map array */
187 wait_queue_head_t b_wait
; /* Wait for data here */
189 struct mutex fetch_lock
; /* Protect b_read, b_out */
192 /* A list of these is needed for "bus 0". Some time later. */
196 unsigned int cnt_lost
;
199 static inline struct mon_bin_hdr
*MON_OFF2HDR(const struct mon_reader_bin
*rp
,
202 return (struct mon_bin_hdr
*)
203 (rp
->b_vec
[offset
/ CHUNK_SIZE
].ptr
+ offset
% CHUNK_SIZE
);
206 #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0)
208 static unsigned char xfer_to_pipe
[4] = {
209 PIPE_CONTROL
, PIPE_ISOCHRONOUS
, PIPE_BULK
, PIPE_INTERRUPT
212 static struct class *mon_bin_class
;
213 static dev_t mon_bin_dev0
;
214 static struct cdev mon_bin_cdev
;
216 static void mon_buff_area_fill(const struct mon_reader_bin
*rp
,
217 unsigned int offset
, unsigned int size
);
218 static int mon_bin_wait_event(struct file
*file
, struct mon_reader_bin
*rp
);
219 static int mon_alloc_buff(struct mon_pgmap
*map
, int npages
);
220 static void mon_free_buff(struct mon_pgmap
*map
, int npages
);
223 * This is a "chunked memcpy". It does not manipulate any counters.
225 static unsigned int mon_copy_to_buff(const struct mon_reader_bin
*this,
226 unsigned int off
, const unsigned char *from
, unsigned int length
)
228 unsigned int step_len
;
230 unsigned int in_page
;
234 * Determine step_len.
237 in_page
= CHUNK_SIZE
- (off
& (CHUNK_SIZE
-1));
238 if (in_page
< step_len
)
242 * Copy data and advance pointers.
244 buf
= this->b_vec
[off
/ CHUNK_SIZE
].ptr
+ off
% CHUNK_SIZE
;
245 memcpy(buf
, from
, step_len
);
246 if ((off
+= step_len
) >= this->b_size
) off
= 0;
254 * This is a little worse than the above because it's "chunked copy_to_user".
255 * The return value is an error code, not an offset.
257 static int copy_from_buf(const struct mon_reader_bin
*this, unsigned int off
,
258 char __user
*to
, int length
)
260 unsigned int step_len
;
262 unsigned int in_page
;
266 * Determine step_len.
269 in_page
= CHUNK_SIZE
- (off
& (CHUNK_SIZE
-1));
270 if (in_page
< step_len
)
274 * Copy data and advance pointers.
276 buf
= this->b_vec
[off
/ CHUNK_SIZE
].ptr
+ off
% CHUNK_SIZE
;
277 if (copy_to_user(to
, buf
, step_len
))
279 if ((off
+= step_len
) >= this->b_size
) off
= 0;
287 * Allocate an (aligned) area in the buffer.
288 * This is called under b_lock.
289 * Returns ~0 on failure.
291 static unsigned int mon_buff_area_alloc(struct mon_reader_bin
*rp
,
296 size
= (size
+ PKT_ALIGN
-1) & ~(PKT_ALIGN
-1);
297 if (rp
->b_cnt
+ size
> rp
->b_size
)
301 if ((rp
->b_in
+= size
) >= rp
->b_size
)
302 rp
->b_in
-= rp
->b_size
;
307 * This is the same thing as mon_buff_area_alloc, only it does not allow
308 * buffers to wrap. This is needed by applications which pass references
309 * into mmap-ed buffers up their stacks (libpcap can do that).
311 * Currently, we always have the header stuck with the data, although
312 * it is not strictly speaking necessary.
314 * When a buffer would wrap, we place a filler packet to mark the space.
316 static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin
*rp
,
320 unsigned int fill_size
;
322 size
= (size
+ PKT_ALIGN
-1) & ~(PKT_ALIGN
-1);
323 if (rp
->b_cnt
+ size
> rp
->b_size
)
325 if (rp
->b_in
+ size
> rp
->b_size
) {
327 * This would wrap. Find if we still have space after
328 * skipping to the end of the buffer. If we do, place
329 * a filler packet and allocate a new packet.
331 fill_size
= rp
->b_size
- rp
->b_in
;
332 if (rp
->b_cnt
+ size
+ fill_size
> rp
->b_size
)
334 mon_buff_area_fill(rp
, rp
->b_in
, fill_size
);
338 rp
->b_cnt
+= size
+ fill_size
;
339 } else if (rp
->b_in
+ size
== rp
->b_size
) {
352 * Return a few (kilo-)bytes to the head of the buffer.
353 * This is used if a data fetch fails.
355 static void mon_buff_area_shrink(struct mon_reader_bin
*rp
, unsigned int size
)
358 /* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */
361 rp
->b_in
+= rp
->b_size
;
366 * This has to be called under both b_lock and fetch_lock, because
367 * it accesses both b_cnt and b_out.
369 static void mon_buff_area_free(struct mon_reader_bin
*rp
, unsigned int size
)
372 size
= (size
+ PKT_ALIGN
-1) & ~(PKT_ALIGN
-1);
374 if ((rp
->b_out
+= size
) >= rp
->b_size
)
375 rp
->b_out
-= rp
->b_size
;
378 static void mon_buff_area_fill(const struct mon_reader_bin
*rp
,
379 unsigned int offset
, unsigned int size
)
381 struct mon_bin_hdr
*ep
;
383 ep
= MON_OFF2HDR(rp
, offset
);
384 memset(ep
, 0, PKT_SIZE
);
386 ep
->len_cap
= size
- PKT_SIZE
;
389 static inline char mon_bin_get_setup(unsigned char *setupb
,
390 const struct urb
*urb
, char ev_type
)
393 if (urb
->setup_packet
== NULL
)
395 memcpy(setupb
, urb
->setup_packet
, SETUP_LEN
);
399 static unsigned int mon_bin_get_data(const struct mon_reader_bin
*rp
,
400 unsigned int offset
, struct urb
*urb
, unsigned int length
,
404 struct scatterlist
*sg
;
405 unsigned int this_len
;
408 if (urb
->num_sgs
== 0) {
409 if (urb
->transfer_buffer
== NULL
) {
413 mon_copy_to_buff(rp
, offset
, urb
->transfer_buffer
, length
);
417 /* If IOMMU coalescing occurred, we cannot trust sg_page */
418 if (urb
->transfer_flags
& URB_DMA_SG_COMBINED
) {
423 /* Copy up to the first non-addressable segment */
424 for_each_sg(urb
->sg
, sg
, urb
->num_sgs
, i
) {
425 if (length
== 0 || PageHighMem(sg_page(sg
)))
427 this_len
= min_t(unsigned int, sg
->length
, length
);
428 offset
= mon_copy_to_buff(rp
, offset
, sg_virt(sg
),
439 static void mon_bin_get_isodesc(const struct mon_reader_bin
*rp
,
440 unsigned int offset
, struct urb
*urb
, char ev_type
, unsigned int ndesc
)
442 struct mon_bin_isodesc
*dp
;
443 struct usb_iso_packet_descriptor
*fp
;
445 fp
= urb
->iso_frame_desc
;
446 while (ndesc
-- != 0) {
447 dp
= (struct mon_bin_isodesc
*)
448 (rp
->b_vec
[offset
/ CHUNK_SIZE
].ptr
+ offset
% CHUNK_SIZE
);
449 dp
->iso_status
= fp
->status
;
450 dp
->iso_off
= fp
->offset
;
451 dp
->iso_len
= (ev_type
== 'S') ? fp
->length
: fp
->actual_length
;
453 if ((offset
+= sizeof(struct mon_bin_isodesc
)) >= rp
->b_size
)
459 static void mon_bin_event(struct mon_reader_bin
*rp
, struct urb
*urb
,
460 char ev_type
, int status
)
462 const struct usb_endpoint_descriptor
*epd
= &urb
->ep
->desc
;
465 unsigned int urb_length
;
469 unsigned int ndesc
, lendesc
;
471 struct mon_bin_hdr
*ep
;
474 do_gettimeofday(&ts
);
476 spin_lock_irqsave(&rp
->b_lock
, flags
);
479 * Find the maximum allowable length, then allocate space.
481 if (usb_endpoint_xfer_isoc(epd
)) {
482 if (urb
->number_of_packets
< 0) {
484 } else if (urb
->number_of_packets
>= ISODESC_MAX
) {
487 ndesc
= urb
->number_of_packets
;
492 lendesc
= ndesc
*sizeof(struct mon_bin_isodesc
);
494 urb_length
= (ev_type
== 'S') ?
495 urb
->transfer_buffer_length
: urb
->actual_length
;
498 if (length
>= rp
->b_size
/5)
499 length
= rp
->b_size
/5;
501 if (usb_urb_dir_in(urb
)) {
502 if (ev_type
== 'S') {
506 /* Cannot rely on endpoint number in case of control ep.0 */
509 if (ev_type
== 'C') {
516 if (rp
->mmap_active
) {
517 offset
= mon_buff_area_alloc_contiguous(rp
,
518 length
+ PKT_SIZE
+ lendesc
);
520 offset
= mon_buff_area_alloc(rp
, length
+ PKT_SIZE
+ lendesc
);
524 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
528 ep
= MON_OFF2HDR(rp
, offset
);
529 if ((offset
+= PKT_SIZE
) >= rp
->b_size
) offset
= 0;
532 * Fill the allocated area.
534 memset(ep
, 0, PKT_SIZE
);
536 ep
->xfer_type
= xfer_to_pipe
[usb_endpoint_type(epd
)];
537 ep
->epnum
= dir
| usb_endpoint_num(epd
);
538 ep
->devnum
= urb
->dev
->devnum
;
539 ep
->busnum
= urb
->dev
->bus
->busnum
;
540 ep
->id
= (unsigned long) urb
;
541 ep
->ts_sec
= ts
.tv_sec
;
542 ep
->ts_usec
= ts
.tv_usec
;
544 ep
->len_urb
= urb_length
;
545 ep
->len_cap
= length
+ lendesc
;
546 ep
->xfer_flags
= urb
->transfer_flags
;
548 if (usb_endpoint_xfer_int(epd
)) {
549 ep
->interval
= urb
->interval
;
550 } else if (usb_endpoint_xfer_isoc(epd
)) {
551 ep
->interval
= urb
->interval
;
552 ep
->start_frame
= urb
->start_frame
;
553 ep
->s
.iso
.error_count
= urb
->error_count
;
554 ep
->s
.iso
.numdesc
= urb
->number_of_packets
;
557 if (usb_endpoint_xfer_control(epd
) && ev_type
== 'S') {
558 ep
->flag_setup
= mon_bin_get_setup(ep
->s
.setup
, urb
, ev_type
);
560 ep
->flag_setup
= '-';
565 mon_bin_get_isodesc(rp
, offset
, urb
, ev_type
, ndesc
);
566 if ((offset
+= lendesc
) >= rp
->b_size
)
567 offset
-= rp
->b_size
;
571 length
= mon_bin_get_data(rp
, offset
, urb
, length
,
574 delta
= (ep
->len_cap
+ PKT_ALIGN
-1) & ~(PKT_ALIGN
-1);
575 ep
->len_cap
-= length
;
576 delta
-= (ep
->len_cap
+ PKT_ALIGN
-1) & ~(PKT_ALIGN
-1);
577 mon_buff_area_shrink(rp
, delta
);
580 ep
->flag_data
= data_tag
;
583 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
585 wake_up(&rp
->b_wait
);
588 static void mon_bin_submit(void *data
, struct urb
*urb
)
590 struct mon_reader_bin
*rp
= data
;
591 mon_bin_event(rp
, urb
, 'S', -EINPROGRESS
);
594 static void mon_bin_complete(void *data
, struct urb
*urb
, int status
)
596 struct mon_reader_bin
*rp
= data
;
597 mon_bin_event(rp
, urb
, 'C', status
);
600 static void mon_bin_error(void *data
, struct urb
*urb
, int error
)
602 struct mon_reader_bin
*rp
= data
;
606 struct mon_bin_hdr
*ep
;
608 do_gettimeofday(&ts
);
610 spin_lock_irqsave(&rp
->b_lock
, flags
);
612 offset
= mon_buff_area_alloc(rp
, PKT_SIZE
);
614 /* Not incrementing cnt_lost. Just because. */
615 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
619 ep
= MON_OFF2HDR(rp
, offset
);
621 memset(ep
, 0, PKT_SIZE
);
623 ep
->xfer_type
= xfer_to_pipe
[usb_endpoint_type(&urb
->ep
->desc
)];
624 ep
->epnum
= usb_urb_dir_in(urb
) ? USB_DIR_IN
: 0;
625 ep
->epnum
|= usb_endpoint_num(&urb
->ep
->desc
);
626 ep
->devnum
= urb
->dev
->devnum
;
627 ep
->busnum
= urb
->dev
->bus
->busnum
;
628 ep
->id
= (unsigned long) urb
;
629 ep
->ts_sec
= ts
.tv_sec
;
630 ep
->ts_usec
= ts
.tv_usec
;
633 ep
->flag_setup
= '-';
636 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
638 wake_up(&rp
->b_wait
);
641 static int mon_bin_open(struct inode
*inode
, struct file
*file
)
643 struct mon_bus
*mbus
;
644 struct mon_reader_bin
*rp
;
648 mutex_lock(&mon_lock
);
649 if ((mbus
= mon_bus_lookup(iminor(inode
))) == NULL
) {
650 mutex_unlock(&mon_lock
);
653 if (mbus
!= &mon_bus0
&& mbus
->u_bus
== NULL
) {
654 printk(KERN_ERR TAG
": consistency error on open\n");
655 mutex_unlock(&mon_lock
);
659 rp
= kzalloc(sizeof(struct mon_reader_bin
), GFP_KERNEL
);
664 spin_lock_init(&rp
->b_lock
);
665 init_waitqueue_head(&rp
->b_wait
);
666 mutex_init(&rp
->fetch_lock
);
667 rp
->b_size
= BUFF_DFL
;
669 size
= sizeof(struct mon_pgmap
) * (rp
->b_size
/CHUNK_SIZE
);
670 if ((rp
->b_vec
= kzalloc(size
, GFP_KERNEL
)) == NULL
) {
675 if ((rc
= mon_alloc_buff(rp
->b_vec
, rp
->b_size
/CHUNK_SIZE
)) < 0)
680 rp
->r
.rnf_submit
= mon_bin_submit
;
681 rp
->r
.rnf_error
= mon_bin_error
;
682 rp
->r
.rnf_complete
= mon_bin_complete
;
684 mon_reader_add(mbus
, &rp
->r
);
686 file
->private_data
= rp
;
687 mutex_unlock(&mon_lock
);
695 mutex_unlock(&mon_lock
);
700 * Extract an event from buffer and copy it to user space.
701 * Wait if there is no event ready.
702 * Returns zero or error.
704 static int mon_bin_get_event(struct file
*file
, struct mon_reader_bin
*rp
,
705 struct mon_bin_hdr __user
*hdr
, unsigned int hdrbytes
,
706 void __user
*data
, unsigned int nbytes
)
709 struct mon_bin_hdr
*ep
;
714 mutex_lock(&rp
->fetch_lock
);
716 if ((rc
= mon_bin_wait_event(file
, rp
)) < 0) {
717 mutex_unlock(&rp
->fetch_lock
);
721 ep
= MON_OFF2HDR(rp
, rp
->b_out
);
723 if (copy_to_user(hdr
, ep
, hdrbytes
)) {
724 mutex_unlock(&rp
->fetch_lock
);
728 step_len
= min(ep
->len_cap
, nbytes
);
729 if ((offset
= rp
->b_out
+ PKT_SIZE
) >= rp
->b_size
) offset
= 0;
731 if (copy_from_buf(rp
, offset
, data
, step_len
)) {
732 mutex_unlock(&rp
->fetch_lock
);
736 spin_lock_irqsave(&rp
->b_lock
, flags
);
737 mon_buff_area_free(rp
, PKT_SIZE
+ ep
->len_cap
);
738 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
741 mutex_unlock(&rp
->fetch_lock
);
745 static int mon_bin_release(struct inode
*inode
, struct file
*file
)
747 struct mon_reader_bin
*rp
= file
->private_data
;
748 struct mon_bus
* mbus
= rp
->r
.m_bus
;
750 mutex_lock(&mon_lock
);
752 if (mbus
->nreaders
<= 0) {
753 printk(KERN_ERR TAG
": consistency error on close\n");
754 mutex_unlock(&mon_lock
);
757 mon_reader_del(mbus
, &rp
->r
);
759 mon_free_buff(rp
->b_vec
, rp
->b_size
/CHUNK_SIZE
);
763 mutex_unlock(&mon_lock
);
767 static ssize_t
mon_bin_read(struct file
*file
, char __user
*buf
,
768 size_t nbytes
, loff_t
*ppos
)
770 struct mon_reader_bin
*rp
= file
->private_data
;
771 unsigned int hdrbytes
= PKT_SZ_API0
;
773 struct mon_bin_hdr
*ep
;
780 mutex_lock(&rp
->fetch_lock
);
782 if ((rc
= mon_bin_wait_event(file
, rp
)) < 0) {
783 mutex_unlock(&rp
->fetch_lock
);
787 ep
= MON_OFF2HDR(rp
, rp
->b_out
);
789 if (rp
->b_read
< hdrbytes
) {
790 step_len
= min(nbytes
, (size_t)(hdrbytes
- rp
->b_read
));
791 ptr
= ((char *)ep
) + rp
->b_read
;
792 if (step_len
&& copy_to_user(buf
, ptr
, step_len
)) {
793 mutex_unlock(&rp
->fetch_lock
);
798 rp
->b_read
+= step_len
;
802 if (rp
->b_read
>= hdrbytes
) {
803 step_len
= ep
->len_cap
;
804 step_len
-= rp
->b_read
- hdrbytes
;
805 if (step_len
> nbytes
)
807 offset
= rp
->b_out
+ PKT_SIZE
;
808 offset
+= rp
->b_read
- hdrbytes
;
809 if (offset
>= rp
->b_size
)
810 offset
-= rp
->b_size
;
811 if (copy_from_buf(rp
, offset
, buf
, step_len
)) {
812 mutex_unlock(&rp
->fetch_lock
);
817 rp
->b_read
+= step_len
;
822 * Check if whole packet was read, and if so, jump to the next one.
824 if (rp
->b_read
>= hdrbytes
+ ep
->len_cap
) {
825 spin_lock_irqsave(&rp
->b_lock
, flags
);
826 mon_buff_area_free(rp
, PKT_SIZE
+ ep
->len_cap
);
827 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
831 mutex_unlock(&rp
->fetch_lock
);
836 * Remove at most nevents from chunked buffer.
837 * Returns the number of removed events.
839 static int mon_bin_flush(struct mon_reader_bin
*rp
, unsigned nevents
)
842 struct mon_bin_hdr
*ep
;
845 mutex_lock(&rp
->fetch_lock
);
846 spin_lock_irqsave(&rp
->b_lock
, flags
);
847 for (i
= 0; i
< nevents
; ++i
) {
848 if (MON_RING_EMPTY(rp
))
851 ep
= MON_OFF2HDR(rp
, rp
->b_out
);
852 mon_buff_area_free(rp
, PKT_SIZE
+ ep
->len_cap
);
854 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
856 mutex_unlock(&rp
->fetch_lock
);
861 * Fetch at most max event offsets into the buffer and put them into vec.
862 * The events are usually freed later with mon_bin_flush.
863 * Return the effective number of events fetched.
865 static int mon_bin_fetch(struct file
*file
, struct mon_reader_bin
*rp
,
866 u32 __user
*vec
, unsigned int max
)
868 unsigned int cur_out
;
869 unsigned int bytes
, avail
;
871 unsigned int nevents
;
872 struct mon_bin_hdr
*ep
;
876 mutex_lock(&rp
->fetch_lock
);
878 if ((rc
= mon_bin_wait_event(file
, rp
)) < 0) {
879 mutex_unlock(&rp
->fetch_lock
);
883 spin_lock_irqsave(&rp
->b_lock
, flags
);
885 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
890 while (bytes
< avail
) {
894 ep
= MON_OFF2HDR(rp
, cur_out
);
895 if (put_user(cur_out
, &vec
[nevents
])) {
896 mutex_unlock(&rp
->fetch_lock
);
901 size
= ep
->len_cap
+ PKT_SIZE
;
902 size
= (size
+ PKT_ALIGN
-1) & ~(PKT_ALIGN
-1);
903 if ((cur_out
+= size
) >= rp
->b_size
)
904 cur_out
-= rp
->b_size
;
908 mutex_unlock(&rp
->fetch_lock
);
913 * Count events. This is almost the same as the above mon_bin_fetch,
914 * only we do not store offsets into user vector, and we have no limit.
916 static int mon_bin_queued(struct mon_reader_bin
*rp
)
918 unsigned int cur_out
;
919 unsigned int bytes
, avail
;
921 unsigned int nevents
;
922 struct mon_bin_hdr
*ep
;
925 mutex_lock(&rp
->fetch_lock
);
927 spin_lock_irqsave(&rp
->b_lock
, flags
);
929 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
934 while (bytes
< avail
) {
935 ep
= MON_OFF2HDR(rp
, cur_out
);
938 size
= ep
->len_cap
+ PKT_SIZE
;
939 size
= (size
+ PKT_ALIGN
-1) & ~(PKT_ALIGN
-1);
940 if ((cur_out
+= size
) >= rp
->b_size
)
941 cur_out
-= rp
->b_size
;
945 mutex_unlock(&rp
->fetch_lock
);
951 static long mon_bin_ioctl(struct file
*file
, unsigned int cmd
, unsigned long arg
)
953 struct mon_reader_bin
*rp
= file
->private_data
;
954 // struct mon_bus* mbus = rp->r.m_bus;
956 struct mon_bin_hdr
*ep
;
961 case MON_IOCQ_URB_LEN
:
963 * N.B. This only returns the size of data, without the header.
965 spin_lock_irqsave(&rp
->b_lock
, flags
);
966 if (!MON_RING_EMPTY(rp
)) {
967 ep
= MON_OFF2HDR(rp
, rp
->b_out
);
970 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
973 case MON_IOCQ_RING_SIZE
:
977 case MON_IOCT_RING_SIZE
:
979 * Changing the buffer size will flush it's contents; the new
980 * buffer is allocated before releasing the old one to be sure
981 * the device will stay functional also in case of memory
986 struct mon_pgmap
*vec
;
988 if (arg
< BUFF_MIN
|| arg
> BUFF_MAX
)
991 size
= CHUNK_ALIGN(arg
);
992 if ((vec
= kzalloc(sizeof(struct mon_pgmap
) * (size
/CHUNK_SIZE
),
993 GFP_KERNEL
)) == NULL
) {
998 ret
= mon_alloc_buff(vec
, size
/CHUNK_SIZE
);
1004 mutex_lock(&rp
->fetch_lock
);
1005 spin_lock_irqsave(&rp
->b_lock
, flags
);
1006 mon_free_buff(rp
->b_vec
, rp
->b_size
/CHUNK_SIZE
);
1010 rp
->b_read
= rp
->b_in
= rp
->b_out
= rp
->b_cnt
= 0;
1012 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
1013 mutex_unlock(&rp
->fetch_lock
);
1017 case MON_IOCH_MFLUSH
:
1018 ret
= mon_bin_flush(rp
, arg
);
1024 struct mon_bin_get getb
;
1026 if (copy_from_user(&getb
, (void __user
*)arg
,
1027 sizeof(struct mon_bin_get
)))
1030 if (getb
.alloc
> 0x10000000) /* Want to cast to u32 */
1032 ret
= mon_bin_get_event(file
, rp
, getb
.hdr
,
1033 (cmd
== MON_IOCX_GET
)? PKT_SZ_API0
: PKT_SZ_API1
,
1034 getb
.data
, (unsigned int)getb
.alloc
);
1038 case MON_IOCX_MFETCH
:
1040 struct mon_bin_mfetch mfetch
;
1041 struct mon_bin_mfetch __user
*uptr
;
1043 uptr
= (struct mon_bin_mfetch __user
*)arg
;
1045 if (copy_from_user(&mfetch
, uptr
, sizeof(mfetch
)))
1048 if (mfetch
.nflush
) {
1049 ret
= mon_bin_flush(rp
, mfetch
.nflush
);
1052 if (put_user(ret
, &uptr
->nflush
))
1055 ret
= mon_bin_fetch(file
, rp
, mfetch
.offvec
, mfetch
.nfetch
);
1058 if (put_user(ret
, &uptr
->nfetch
))
1064 case MON_IOCG_STATS
: {
1065 struct mon_bin_stats __user
*sp
;
1066 unsigned int nevents
;
1067 unsigned int ndropped
;
1069 spin_lock_irqsave(&rp
->b_lock
, flags
);
1070 ndropped
= rp
->cnt_lost
;
1072 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
1073 nevents
= mon_bin_queued(rp
);
1075 sp
= (struct mon_bin_stats __user
*)arg
;
1076 if (put_user(rp
->cnt_lost
, &sp
->dropped
))
1078 if (put_user(nevents
, &sp
->queued
))
1091 #ifdef CONFIG_COMPAT
1092 static long mon_bin_compat_ioctl(struct file
*file
,
1093 unsigned int cmd
, unsigned long arg
)
1095 struct mon_reader_bin
*rp
= file
->private_data
;
1100 case MON_IOCX_GET32
:
1101 case MON_IOCX_GETX32
:
1103 struct mon_bin_get32 getb
;
1105 if (copy_from_user(&getb
, (void __user
*)arg
,
1106 sizeof(struct mon_bin_get32
)))
1109 ret
= mon_bin_get_event(file
, rp
, compat_ptr(getb
.hdr32
),
1110 (cmd
== MON_IOCX_GET32
)? PKT_SZ_API0
: PKT_SZ_API1
,
1111 compat_ptr(getb
.data32
), getb
.alloc32
);
1117 case MON_IOCX_MFETCH32
:
1119 struct mon_bin_mfetch32 mfetch
;
1120 struct mon_bin_mfetch32 __user
*uptr
;
1122 uptr
= (struct mon_bin_mfetch32 __user
*) compat_ptr(arg
);
1124 if (copy_from_user(&mfetch
, uptr
, sizeof(mfetch
)))
1127 if (mfetch
.nflush32
) {
1128 ret
= mon_bin_flush(rp
, mfetch
.nflush32
);
1131 if (put_user(ret
, &uptr
->nflush32
))
1134 ret
= mon_bin_fetch(file
, rp
, compat_ptr(mfetch
.offvec32
),
1138 if (put_user(ret
, &uptr
->nfetch32
))
1143 case MON_IOCG_STATS
:
1144 return mon_bin_ioctl(file
, cmd
, (unsigned long) compat_ptr(arg
));
1146 case MON_IOCQ_URB_LEN
:
1147 case MON_IOCQ_RING_SIZE
:
1148 case MON_IOCT_RING_SIZE
:
1149 case MON_IOCH_MFLUSH
:
1150 return mon_bin_ioctl(file
, cmd
, arg
);
1157 #endif /* CONFIG_COMPAT */
1160 mon_bin_poll(struct file
*file
, struct poll_table_struct
*wait
)
1162 struct mon_reader_bin
*rp
= file
->private_data
;
1163 unsigned int mask
= 0;
1164 unsigned long flags
;
1166 if (file
->f_mode
& FMODE_READ
)
1167 poll_wait(file
, &rp
->b_wait
, wait
);
1169 spin_lock_irqsave(&rp
->b_lock
, flags
);
1170 if (!MON_RING_EMPTY(rp
))
1171 mask
|= POLLIN
| POLLRDNORM
; /* readable */
1172 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
1177 * open and close: just keep track of how many times the device is
1178 * mapped, to use the proper memory allocation function.
1180 static void mon_bin_vma_open(struct vm_area_struct
*vma
)
1182 struct mon_reader_bin
*rp
= vma
->vm_private_data
;
1186 static void mon_bin_vma_close(struct vm_area_struct
*vma
)
1188 struct mon_reader_bin
*rp
= vma
->vm_private_data
;
1193 * Map ring pages to user space.
1195 static int mon_bin_vma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1197 struct mon_reader_bin
*rp
= vma
->vm_private_data
;
1198 unsigned long offset
, chunk_idx
;
1199 struct page
*pageptr
;
1201 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
1202 if (offset
>= rp
->b_size
)
1203 return VM_FAULT_SIGBUS
;
1204 chunk_idx
= offset
/ CHUNK_SIZE
;
1205 pageptr
= rp
->b_vec
[chunk_idx
].pg
;
1207 vmf
->page
= pageptr
;
1211 static const struct vm_operations_struct mon_bin_vm_ops
= {
1212 .open
= mon_bin_vma_open
,
1213 .close
= mon_bin_vma_close
,
1214 .fault
= mon_bin_vma_fault
,
1217 static int mon_bin_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1219 /* don't do anything here: "fault" will set up page table entries */
1220 vma
->vm_ops
= &mon_bin_vm_ops
;
1221 vma
->vm_flags
|= VM_RESERVED
;
1222 vma
->vm_private_data
= filp
->private_data
;
1223 mon_bin_vma_open(vma
);
1227 static const struct file_operations mon_fops_binary
= {
1228 .owner
= THIS_MODULE
,
1229 .open
= mon_bin_open
,
1230 .llseek
= no_llseek
,
1231 .read
= mon_bin_read
,
1232 /* .write = mon_text_write, */
1233 .poll
= mon_bin_poll
,
1234 .unlocked_ioctl
= mon_bin_ioctl
,
1235 #ifdef CONFIG_COMPAT
1236 .compat_ioctl
= mon_bin_compat_ioctl
,
1238 .release
= mon_bin_release
,
1239 .mmap
= mon_bin_mmap
,
1242 static int mon_bin_wait_event(struct file
*file
, struct mon_reader_bin
*rp
)
1244 DECLARE_WAITQUEUE(waita
, current
);
1245 unsigned long flags
;
1247 add_wait_queue(&rp
->b_wait
, &waita
);
1248 set_current_state(TASK_INTERRUPTIBLE
);
1250 spin_lock_irqsave(&rp
->b_lock
, flags
);
1251 while (MON_RING_EMPTY(rp
)) {
1252 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
1254 if (file
->f_flags
& O_NONBLOCK
) {
1255 set_current_state(TASK_RUNNING
);
1256 remove_wait_queue(&rp
->b_wait
, &waita
);
1257 return -EWOULDBLOCK
; /* Same as EAGAIN in Linux */
1260 if (signal_pending(current
)) {
1261 remove_wait_queue(&rp
->b_wait
, &waita
);
1264 set_current_state(TASK_INTERRUPTIBLE
);
1266 spin_lock_irqsave(&rp
->b_lock
, flags
);
1268 spin_unlock_irqrestore(&rp
->b_lock
, flags
);
1270 set_current_state(TASK_RUNNING
);
1271 remove_wait_queue(&rp
->b_wait
, &waita
);
1275 static int mon_alloc_buff(struct mon_pgmap
*map
, int npages
)
1278 unsigned long vaddr
;
1280 for (n
= 0; n
< npages
; n
++) {
1281 vaddr
= get_zeroed_page(GFP_KERNEL
);
1284 free_page((unsigned long) map
[n
].ptr
);
1287 map
[n
].ptr
= (unsigned char *) vaddr
;
1288 map
[n
].pg
= virt_to_page((void *) vaddr
);
1293 static void mon_free_buff(struct mon_pgmap
*map
, int npages
)
1297 for (n
= 0; n
< npages
; n
++)
1298 free_page((unsigned long) map
[n
].ptr
);
1301 int mon_bin_add(struct mon_bus
*mbus
, const struct usb_bus
*ubus
)
1304 unsigned minor
= ubus
? ubus
->busnum
: 0;
1306 if (minor
>= MON_BIN_MAX_MINOR
)
1309 dev
= device_create(mon_bin_class
, ubus
? ubus
->controller
: NULL
,
1310 MKDEV(MAJOR(mon_bin_dev0
), minor
), NULL
,
1315 mbus
->classdev
= dev
;
1319 void mon_bin_del(struct mon_bus
*mbus
)
1321 device_destroy(mon_bin_class
, mbus
->classdev
->devt
);
1324 int __init
mon_bin_init(void)
1328 mon_bin_class
= class_create(THIS_MODULE
, "usbmon");
1329 if (IS_ERR(mon_bin_class
)) {
1330 rc
= PTR_ERR(mon_bin_class
);
1334 rc
= alloc_chrdev_region(&mon_bin_dev0
, 0, MON_BIN_MAX_MINOR
, "usbmon");
1338 cdev_init(&mon_bin_cdev
, &mon_fops_binary
);
1339 mon_bin_cdev
.owner
= THIS_MODULE
;
1341 rc
= cdev_add(&mon_bin_cdev
, mon_bin_dev0
, MON_BIN_MAX_MINOR
);
1348 unregister_chrdev_region(mon_bin_dev0
, MON_BIN_MAX_MINOR
);
1350 class_destroy(mon_bin_class
);
1355 void mon_bin_exit(void)
1357 cdev_del(&mon_bin_cdev
);
1358 unregister_chrdev_region(mon_bin_dev0
, MON_BIN_MAX_MINOR
);
1359 class_destroy(mon_bin_class
);