Linux 3.9-rc4
[linux-2.6/cjktty.git] / drivers / usb / usb-skeleton.c
blobce310170829fdc66442a8c7b629fc028152c96af
1 /*
2 * USB Skeleton driver - 2.2
4 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation, version 2.
10 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c
11 * but has been rewritten to be easier to read and use.
15 #include <linux/kernel.h>
16 #include <linux/errno.h>
17 #include <linux/init.h>
18 #include <linux/slab.h>
19 #include <linux/module.h>
20 #include <linux/kref.h>
21 #include <linux/uaccess.h>
22 #include <linux/usb.h>
23 #include <linux/mutex.h>
26 /* Define these values to match your devices */
27 #define USB_SKEL_VENDOR_ID 0xfff0
28 #define USB_SKEL_PRODUCT_ID 0xfff0
30 /* table of devices that work with this driver */
31 static const struct usb_device_id skel_table[] = {
32 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
33 { } /* Terminating entry */
35 MODULE_DEVICE_TABLE(usb, skel_table);
38 /* Get a minor range for your devices from the usb maintainer */
39 #define USB_SKEL_MINOR_BASE 192
41 /* our private defines. if this grows any larger, use your own .h file */
42 #define MAX_TRANSFER (PAGE_SIZE - 512)
43 /* MAX_TRANSFER is chosen so that the VM is not stressed by
44 allocations > PAGE_SIZE and the number of packets in a page
45 is an integer 512 is the largest possible packet on EHCI */
46 #define WRITES_IN_FLIGHT 8
47 /* arbitrarily chosen */
49 /* Structure to hold all of our device specific stuff */
50 struct usb_skel {
51 struct usb_device *udev; /* the usb device for this device */
52 struct usb_interface *interface; /* the interface for this device */
53 struct semaphore limit_sem; /* limiting the number of writes in progress */
54 struct usb_anchor submitted; /* in case we need to retract our submissions */
55 struct urb *bulk_in_urb; /* the urb to read data with */
56 unsigned char *bulk_in_buffer; /* the buffer to receive data */
57 size_t bulk_in_size; /* the size of the receive buffer */
58 size_t bulk_in_filled; /* number of bytes in the buffer */
59 size_t bulk_in_copied; /* already copied to user space */
60 __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
61 __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
62 int errors; /* the last request tanked */
63 bool ongoing_read; /* a read is going on */
64 bool processed_urb; /* indicates we haven't processed the urb */
65 spinlock_t err_lock; /* lock for errors */
66 struct kref kref;
67 struct mutex io_mutex; /* synchronize I/O with disconnect */
68 struct completion bulk_in_completion; /* to wait for an ongoing read */
70 #define to_skel_dev(d) container_of(d, struct usb_skel, kref)
72 static struct usb_driver skel_driver;
73 static void skel_draw_down(struct usb_skel *dev);
75 static void skel_delete(struct kref *kref)
77 struct usb_skel *dev = to_skel_dev(kref);
79 usb_free_urb(dev->bulk_in_urb);
80 usb_put_dev(dev->udev);
81 kfree(dev->bulk_in_buffer);
82 kfree(dev);
85 static int skel_open(struct inode *inode, struct file *file)
87 struct usb_skel *dev;
88 struct usb_interface *interface;
89 int subminor;
90 int retval = 0;
92 subminor = iminor(inode);
94 interface = usb_find_interface(&skel_driver, subminor);
95 if (!interface) {
96 pr_err("%s - error, can't find device for minor %d\n",
97 __func__, subminor);
98 retval = -ENODEV;
99 goto exit;
102 dev = usb_get_intfdata(interface);
103 if (!dev) {
104 retval = -ENODEV;
105 goto exit;
108 retval = usb_autopm_get_interface(interface);
109 if (retval)
110 goto exit;
112 /* increment our usage count for the device */
113 kref_get(&dev->kref);
115 /* save our object in the file's private structure */
116 file->private_data = dev;
118 exit:
119 return retval;
122 static int skel_release(struct inode *inode, struct file *file)
124 struct usb_skel *dev;
126 dev = file->private_data;
127 if (dev == NULL)
128 return -ENODEV;
130 /* allow the device to be autosuspended */
131 mutex_lock(&dev->io_mutex);
132 if (dev->interface)
133 usb_autopm_put_interface(dev->interface);
134 mutex_unlock(&dev->io_mutex);
136 /* decrement the count on our device */
137 kref_put(&dev->kref, skel_delete);
138 return 0;
141 static int skel_flush(struct file *file, fl_owner_t id)
143 struct usb_skel *dev;
144 int res;
146 dev = file->private_data;
147 if (dev == NULL)
148 return -ENODEV;
150 /* wait for io to stop */
151 mutex_lock(&dev->io_mutex);
152 skel_draw_down(dev);
154 /* read out errors, leave subsequent opens a clean slate */
155 spin_lock_irq(&dev->err_lock);
156 res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0;
157 dev->errors = 0;
158 spin_unlock_irq(&dev->err_lock);
160 mutex_unlock(&dev->io_mutex);
162 return res;
165 static void skel_read_bulk_callback(struct urb *urb)
167 struct usb_skel *dev;
169 dev = urb->context;
171 spin_lock(&dev->err_lock);
172 /* sync/async unlink faults aren't errors */
173 if (urb->status) {
174 if (!(urb->status == -ENOENT ||
175 urb->status == -ECONNRESET ||
176 urb->status == -ESHUTDOWN))
177 dev_err(&dev->interface->dev,
178 "%s - nonzero write bulk status received: %d\n",
179 __func__, urb->status);
181 dev->errors = urb->status;
182 } else {
183 dev->bulk_in_filled = urb->actual_length;
185 dev->ongoing_read = 0;
186 spin_unlock(&dev->err_lock);
188 complete(&dev->bulk_in_completion);
191 static int skel_do_read_io(struct usb_skel *dev, size_t count)
193 int rv;
195 /* prepare a read */
196 usb_fill_bulk_urb(dev->bulk_in_urb,
197 dev->udev,
198 usb_rcvbulkpipe(dev->udev,
199 dev->bulk_in_endpointAddr),
200 dev->bulk_in_buffer,
201 min(dev->bulk_in_size, count),
202 skel_read_bulk_callback,
203 dev);
204 /* tell everybody to leave the URB alone */
205 spin_lock_irq(&dev->err_lock);
206 dev->ongoing_read = 1;
207 spin_unlock_irq(&dev->err_lock);
209 /* do it */
210 rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL);
211 if (rv < 0) {
212 dev_err(&dev->interface->dev,
213 "%s - failed submitting read urb, error %d\n",
214 __func__, rv);
215 dev->bulk_in_filled = 0;
216 rv = (rv == -ENOMEM) ? rv : -EIO;
217 spin_lock_irq(&dev->err_lock);
218 dev->ongoing_read = 0;
219 spin_unlock_irq(&dev->err_lock);
222 return rv;
225 static ssize_t skel_read(struct file *file, char *buffer, size_t count,
226 loff_t *ppos)
228 struct usb_skel *dev;
229 int rv;
230 bool ongoing_io;
232 dev = file->private_data;
234 /* if we cannot read at all, return EOF */
235 if (!dev->bulk_in_urb || !count)
236 return 0;
238 /* no concurrent readers */
239 rv = mutex_lock_interruptible(&dev->io_mutex);
240 if (rv < 0)
241 return rv;
243 if (!dev->interface) { /* disconnect() was called */
244 rv = -ENODEV;
245 goto exit;
248 /* if IO is under way, we must not touch things */
249 retry:
250 spin_lock_irq(&dev->err_lock);
251 ongoing_io = dev->ongoing_read;
252 spin_unlock_irq(&dev->err_lock);
254 if (ongoing_io) {
255 /* nonblocking IO shall not wait */
256 if (file->f_flags & O_NONBLOCK) {
257 rv = -EAGAIN;
258 goto exit;
261 * IO may take forever
262 * hence wait in an interruptible state
264 rv = wait_for_completion_interruptible(&dev->bulk_in_completion);
265 if (rv < 0)
266 goto exit;
268 * by waiting we also semiprocessed the urb
269 * we must finish now
271 dev->bulk_in_copied = 0;
272 dev->processed_urb = 1;
275 if (!dev->processed_urb) {
277 * the URB hasn't been processed
278 * do it now
280 wait_for_completion(&dev->bulk_in_completion);
281 dev->bulk_in_copied = 0;
282 dev->processed_urb = 1;
285 /* errors must be reported */
286 rv = dev->errors;
287 if (rv < 0) {
288 /* any error is reported once */
289 dev->errors = 0;
290 /* to preserve notifications about reset */
291 rv = (rv == -EPIPE) ? rv : -EIO;
292 /* no data to deliver */
293 dev->bulk_in_filled = 0;
294 /* report it */
295 goto exit;
299 * if the buffer is filled we may satisfy the read
300 * else we need to start IO
303 if (dev->bulk_in_filled) {
304 /* we had read data */
305 size_t available = dev->bulk_in_filled - dev->bulk_in_copied;
306 size_t chunk = min(available, count);
308 if (!available) {
310 * all data has been used
311 * actual IO needs to be done
313 rv = skel_do_read_io(dev, count);
314 if (rv < 0)
315 goto exit;
316 else
317 goto retry;
320 * data is available
321 * chunk tells us how much shall be copied
324 if (copy_to_user(buffer,
325 dev->bulk_in_buffer + dev->bulk_in_copied,
326 chunk))
327 rv = -EFAULT;
328 else
329 rv = chunk;
331 dev->bulk_in_copied += chunk;
334 * if we are asked for more than we have,
335 * we start IO but don't wait
337 if (available < count)
338 skel_do_read_io(dev, count - chunk);
339 } else {
340 /* no data in the buffer */
341 rv = skel_do_read_io(dev, count);
342 if (rv < 0)
343 goto exit;
344 else if (!(file->f_flags & O_NONBLOCK))
345 goto retry;
346 rv = -EAGAIN;
348 exit:
349 mutex_unlock(&dev->io_mutex);
350 return rv;
353 static void skel_write_bulk_callback(struct urb *urb)
355 struct usb_skel *dev;
357 dev = urb->context;
359 /* sync/async unlink faults aren't errors */
360 if (urb->status) {
361 if (!(urb->status == -ENOENT ||
362 urb->status == -ECONNRESET ||
363 urb->status == -ESHUTDOWN))
364 dev_err(&dev->interface->dev,
365 "%s - nonzero write bulk status received: %d\n",
366 __func__, urb->status);
368 spin_lock(&dev->err_lock);
369 dev->errors = urb->status;
370 spin_unlock(&dev->err_lock);
373 /* free up our allocated buffer */
374 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
375 urb->transfer_buffer, urb->transfer_dma);
376 up(&dev->limit_sem);
379 static ssize_t skel_write(struct file *file, const char *user_buffer,
380 size_t count, loff_t *ppos)
382 struct usb_skel *dev;
383 int retval = 0;
384 struct urb *urb = NULL;
385 char *buf = NULL;
386 size_t writesize = min(count, (size_t)MAX_TRANSFER);
388 dev = file->private_data;
390 /* verify that we actually have some data to write */
391 if (count == 0)
392 goto exit;
395 * limit the number of URBs in flight to stop a user from using up all
396 * RAM
398 if (!(file->f_flags & O_NONBLOCK)) {
399 if (down_interruptible(&dev->limit_sem)) {
400 retval = -ERESTARTSYS;
401 goto exit;
403 } else {
404 if (down_trylock(&dev->limit_sem)) {
405 retval = -EAGAIN;
406 goto exit;
410 spin_lock_irq(&dev->err_lock);
411 retval = dev->errors;
412 if (retval < 0) {
413 /* any error is reported once */
414 dev->errors = 0;
415 /* to preserve notifications about reset */
416 retval = (retval == -EPIPE) ? retval : -EIO;
418 spin_unlock_irq(&dev->err_lock);
419 if (retval < 0)
420 goto error;
422 /* create a urb, and a buffer for it, and copy the data to the urb */
423 urb = usb_alloc_urb(0, GFP_KERNEL);
424 if (!urb) {
425 retval = -ENOMEM;
426 goto error;
429 buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL,
430 &urb->transfer_dma);
431 if (!buf) {
432 retval = -ENOMEM;
433 goto error;
436 if (copy_from_user(buf, user_buffer, writesize)) {
437 retval = -EFAULT;
438 goto error;
441 /* this lock makes sure we don't submit URBs to gone devices */
442 mutex_lock(&dev->io_mutex);
443 if (!dev->interface) { /* disconnect() was called */
444 mutex_unlock(&dev->io_mutex);
445 retval = -ENODEV;
446 goto error;
449 /* initialize the urb properly */
450 usb_fill_bulk_urb(urb, dev->udev,
451 usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
452 buf, writesize, skel_write_bulk_callback, dev);
453 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
454 usb_anchor_urb(urb, &dev->submitted);
456 /* send the data out the bulk port */
457 retval = usb_submit_urb(urb, GFP_KERNEL);
458 mutex_unlock(&dev->io_mutex);
459 if (retval) {
460 dev_err(&dev->interface->dev,
461 "%s - failed submitting write urb, error %d\n",
462 __func__, retval);
463 goto error_unanchor;
467 * release our reference to this urb, the USB core will eventually free
468 * it entirely
470 usb_free_urb(urb);
473 return writesize;
475 error_unanchor:
476 usb_unanchor_urb(urb);
477 error:
478 if (urb) {
479 usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma);
480 usb_free_urb(urb);
482 up(&dev->limit_sem);
484 exit:
485 return retval;
488 static const struct file_operations skel_fops = {
489 .owner = THIS_MODULE,
490 .read = skel_read,
491 .write = skel_write,
492 .open = skel_open,
493 .release = skel_release,
494 .flush = skel_flush,
495 .llseek = noop_llseek,
499 * usb class driver info in order to get a minor number from the usb core,
500 * and to have the device registered with the driver core
502 static struct usb_class_driver skel_class = {
503 .name = "skel%d",
504 .fops = &skel_fops,
505 .minor_base = USB_SKEL_MINOR_BASE,
508 static int skel_probe(struct usb_interface *interface,
509 const struct usb_device_id *id)
511 struct usb_skel *dev;
512 struct usb_host_interface *iface_desc;
513 struct usb_endpoint_descriptor *endpoint;
514 size_t buffer_size;
515 int i;
516 int retval = -ENOMEM;
518 /* allocate memory for our device state and initialize it */
519 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
520 if (!dev) {
521 dev_err(&interface->dev, "Out of memory\n");
522 goto error;
524 kref_init(&dev->kref);
525 sema_init(&dev->limit_sem, WRITES_IN_FLIGHT);
526 mutex_init(&dev->io_mutex);
527 spin_lock_init(&dev->err_lock);
528 init_usb_anchor(&dev->submitted);
529 init_completion(&dev->bulk_in_completion);
531 dev->udev = usb_get_dev(interface_to_usbdev(interface));
532 dev->interface = interface;
534 /* set up the endpoint information */
535 /* use only the first bulk-in and bulk-out endpoints */
536 iface_desc = interface->cur_altsetting;
537 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
538 endpoint = &iface_desc->endpoint[i].desc;
540 if (!dev->bulk_in_endpointAddr &&
541 usb_endpoint_is_bulk_in(endpoint)) {
542 /* we found a bulk in endpoint */
543 buffer_size = usb_endpoint_maxp(endpoint);
544 dev->bulk_in_size = buffer_size;
545 dev->bulk_in_endpointAddr = endpoint->bEndpointAddress;
546 dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
547 if (!dev->bulk_in_buffer) {
548 dev_err(&interface->dev,
549 "Could not allocate bulk_in_buffer\n");
550 goto error;
552 dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL);
553 if (!dev->bulk_in_urb) {
554 dev_err(&interface->dev,
555 "Could not allocate bulk_in_urb\n");
556 goto error;
560 if (!dev->bulk_out_endpointAddr &&
561 usb_endpoint_is_bulk_out(endpoint)) {
562 /* we found a bulk out endpoint */
563 dev->bulk_out_endpointAddr = endpoint->bEndpointAddress;
566 if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) {
567 dev_err(&interface->dev,
568 "Could not find both bulk-in and bulk-out endpoints\n");
569 goto error;
572 /* save our data pointer in this interface device */
573 usb_set_intfdata(interface, dev);
575 /* we can register the device now, as it is ready */
576 retval = usb_register_dev(interface, &skel_class);
577 if (retval) {
578 /* something prevented us from registering this driver */
579 dev_err(&interface->dev,
580 "Not able to get a minor for this device.\n");
581 usb_set_intfdata(interface, NULL);
582 goto error;
585 /* let the user know what node this device is now attached to */
586 dev_info(&interface->dev,
587 "USB Skeleton device now attached to USBSkel-%d",
588 interface->minor);
589 return 0;
591 error:
592 if (dev)
593 /* this frees allocated memory */
594 kref_put(&dev->kref, skel_delete);
595 return retval;
598 static void skel_disconnect(struct usb_interface *interface)
600 struct usb_skel *dev;
601 int minor = interface->minor;
603 dev = usb_get_intfdata(interface);
604 usb_set_intfdata(interface, NULL);
606 /* give back our minor */
607 usb_deregister_dev(interface, &skel_class);
609 /* prevent more I/O from starting */
610 mutex_lock(&dev->io_mutex);
611 dev->interface = NULL;
612 mutex_unlock(&dev->io_mutex);
614 usb_kill_anchored_urbs(&dev->submitted);
616 /* decrement our usage count */
617 kref_put(&dev->kref, skel_delete);
619 dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
622 static void skel_draw_down(struct usb_skel *dev)
624 int time;
626 time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000);
627 if (!time)
628 usb_kill_anchored_urbs(&dev->submitted);
629 usb_kill_urb(dev->bulk_in_urb);
632 static int skel_suspend(struct usb_interface *intf, pm_message_t message)
634 struct usb_skel *dev = usb_get_intfdata(intf);
636 if (!dev)
637 return 0;
638 skel_draw_down(dev);
639 return 0;
642 static int skel_resume(struct usb_interface *intf)
644 return 0;
647 static int skel_pre_reset(struct usb_interface *intf)
649 struct usb_skel *dev = usb_get_intfdata(intf);
651 mutex_lock(&dev->io_mutex);
652 skel_draw_down(dev);
654 return 0;
657 static int skel_post_reset(struct usb_interface *intf)
659 struct usb_skel *dev = usb_get_intfdata(intf);
661 /* we are sure no URBs are active - no locking needed */
662 dev->errors = -EPIPE;
663 mutex_unlock(&dev->io_mutex);
665 return 0;
668 static struct usb_driver skel_driver = {
669 .name = "skeleton",
670 .probe = skel_probe,
671 .disconnect = skel_disconnect,
672 .suspend = skel_suspend,
673 .resume = skel_resume,
674 .pre_reset = skel_pre_reset,
675 .post_reset = skel_post_reset,
676 .id_table = skel_table,
677 .supports_autosuspend = 1,
680 module_usb_driver(skel_driver);
682 MODULE_LICENSE("GPL");