2 * file_storage.c -- File-backed USB Storage Gadget, for USB development
4 * Copyright (C) 2003-2008 Alan Stern
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 * The FSG driver is fairly straightforward. There is a main kernel
45 * thread that handles most of the work. Interrupt routines field
46 * callbacks from the controller driver: bulk- and interrupt-request
47 * completion notifications, endpoint-0 events, and disconnect events.
48 * Completion events are passed to the main thread by wakeup calls. Many
49 * ep0 requests are handled at interrupt time, but SetInterface,
50 * SetConfiguration, and device reset requests are forwarded to the
51 * thread in the form of "exceptions" using SIGUSR1 signals (since they
52 * should interrupt any ongoing file I/O operations).
54 * The thread's main routine implements the standard command/data/status
55 * parts of a SCSI interaction. It and its subroutines are full of tests
56 * for pending signals/exceptions -- all this polling is necessary since
57 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
58 * indication that the driver really wants to be running in userspace.)
59 * An important point is that so long as the thread is alive it keeps an
60 * open reference to the backing file. This will prevent unmounting
61 * the backing file's underlying filesystem and could cause problems
62 * during system shutdown, for example. To prevent such problems, the
63 * thread catches INT, TERM, and KILL signals and converts them into
66 * In normal operation the main thread is started during the gadget's
67 * fsg_bind() callback and stopped during fsg_unbind(). But it can also
68 * exit when it receives a signal, and there's no point leaving the
69 * gadget running when the thread is dead. So just before the thread
70 * exits, it deregisters the gadget driver. This makes things a little
71 * tricky: The driver is deregistered at two places, and the exiting
72 * thread can indirectly call fsg_unbind() which in turn can tell the
73 * thread to exit. The first problem is resolved through the use of the
74 * REGISTERED atomic bitflag; the driver will only be deregistered once.
75 * The second problem is resolved by having fsg_unbind() check
76 * fsg->state; it won't try to stop the thread if the state is already
77 * FSG_STATE_TERMINATED.
79 * To provide maximum throughput, the driver uses a circular pipeline of
80 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
81 * arbitrarily long; in practice the benefits don't justify having more
82 * than 2 stages (i.e., double buffering). But it helps to think of the
83 * pipeline as being a long one. Each buffer head contains a bulk-in and
84 * a bulk-out request pointer (since the buffer can be used for both
85 * output and input -- directions always are given from the host's
86 * point of view) as well as a pointer to the buffer and various state
89 * Use of the pipeline follows a simple protocol. There is a variable
90 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
91 * At any time that buffer head may still be in use from an earlier
92 * request, so each buffer head has a state variable indicating whether
93 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
94 * buffer head to be EMPTY, filling the buffer either by file I/O or by
95 * USB I/O (during which the buffer head is BUSY), and marking the buffer
96 * head FULL when the I/O is complete. Then the buffer will be emptied
97 * (again possibly by USB I/O, during which it is marked BUSY) and
98 * finally marked EMPTY again (possibly by a completion routine).
100 * A module parameter tells the driver to avoid stalling the bulk
101 * endpoints wherever the transport specification allows. This is
102 * necessary for some UDCs like the SuperH, which cannot reliably clear a
103 * halt on a bulk endpoint. However, under certain circumstances the
104 * Bulk-only specification requires a stall. In such cases the driver
105 * will halt the endpoint and set a flag indicating that it should clear
106 * the halt in software during the next device reset. Hopefully this
107 * will permit everything to work correctly. Furthermore, although the
108 * specification allows the bulk-out endpoint to halt when the host sends
109 * too much data, implementing this would cause an unavoidable race.
110 * The driver will always use the "no-stall" approach for OUT transfers.
112 * One subtle point concerns sending status-stage responses for ep0
113 * requests. Some of these requests, such as device reset, can involve
114 * interrupting an ongoing file I/O operation, which might take an
115 * arbitrarily long time. During that delay the host might give up on
116 * the original ep0 request and issue a new one. When that happens the
117 * driver should not notify the host about completion of the original
118 * request, as the host will no longer be waiting for it. So the driver
119 * assigns to each ep0 request a unique tag, and it keeps track of the
120 * tag value of the request associated with a long-running exception
121 * (device-reset, interface-change, or configuration-change). When the
122 * exception handler is finished, the status-stage response is submitted
123 * only if the current ep0 request tag is equal to the exception request
124 * tag. Thus only the most recently received ep0 request will get a
125 * status-stage response.
127 * Warning: This driver source file is too long. It ought to be split up
128 * into a header file plus about 3 separate .c files, to handle the details
129 * of the Gadget, USB Mass Storage, and SCSI protocols.
133 /* #define VERBOSE_DEBUG */
134 /* #define DUMP_MSGS */
137 #include <linux/blkdev.h>
138 #include <linux/completion.h>
139 #include <linux/dcache.h>
140 #include <linux/delay.h>
141 #include <linux/device.h>
142 #include <linux/fcntl.h>
143 #include <linux/file.h>
144 #include <linux/fs.h>
145 #include <linux/kref.h>
146 #include <linux/kthread.h>
147 #include <linux/limits.h>
148 #include <linux/rwsem.h>
149 #include <linux/slab.h>
150 #include <linux/spinlock.h>
151 #include <linux/string.h>
152 #include <linux/freezer.h>
153 #include <linux/utsname.h>
155 #include <linux/usb/ch9.h>
156 #include <linux/usb/gadget.h>
158 #include "gadget_chips.h"
163 * Kbuild is not very cooperative with respect to linking separately
164 * compiled library objects into one module. So for now we won't use
165 * separate compilation ... ensuring init/exit sections work to shrink
166 * the runtime footprint, and giving us at least some parts of what
167 * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
169 #include "usbstring.c"
171 #include "epautoconf.c"
173 /*-------------------------------------------------------------------------*/
175 #define DRIVER_DESC "File-backed Storage Gadget"
176 #define DRIVER_NAME "g_file_storage"
177 /* DRIVER_VERSION must be at least 6 characters long, as it is used
178 * to generate a fallback serial number. */
179 #define DRIVER_VERSION "20 November 2008"
181 static char fsg_string_manufacturer
[64];
182 static const char fsg_string_product
[] = DRIVER_DESC
;
183 static char fsg_string_serial
[13];
184 static const char fsg_string_config
[] = "Self-powered";
185 static const char fsg_string_interface
[] = "Mass Storage";
188 #include "storage_common.c"
191 MODULE_DESCRIPTION(DRIVER_DESC
);
192 MODULE_AUTHOR("Alan Stern");
193 MODULE_LICENSE("Dual BSD/GPL");
196 * This driver assumes self-powered hardware and has no way for users to
197 * trigger remote wakeup. It uses autoconfiguration to select endpoints
198 * and endpoint addresses.
202 /*-------------------------------------------------------------------------*/
205 /* Encapsulate the module parameter settings */
208 char *file
[FSG_MAX_LUNS
];
209 int ro
[FSG_MAX_LUNS
];
210 int nofua
[FSG_MAX_LUNS
];
211 unsigned int num_filenames
;
212 unsigned int num_ros
;
213 unsigned int num_nofuas
;
220 char *transport_parm
;
222 unsigned short vendor
;
223 unsigned short product
;
224 unsigned short release
;
229 char *transport_name
;
233 } mod_data
= { // Default values
234 .transport_parm
= "BBB",
235 .protocol_parm
= "SCSI",
239 .vendor
= FSG_VENDOR_ID
,
240 .product
= FSG_PRODUCT_ID
,
241 .release
= 0xffff, // Use controller chip type
246 module_param_array_named(file
, mod_data
.file
, charp
, &mod_data
.num_filenames
,
248 MODULE_PARM_DESC(file
, "names of backing files or devices");
250 module_param_array_named(ro
, mod_data
.ro
, bool, &mod_data
.num_ros
, S_IRUGO
);
251 MODULE_PARM_DESC(ro
, "true to force read-only");
253 module_param_array_named(nofua
, mod_data
.nofua
, bool, &mod_data
.num_nofuas
,
255 MODULE_PARM_DESC(nofua
, "true to ignore SCSI WRITE(10,12) FUA bit");
257 module_param_named(luns
, mod_data
.nluns
, uint
, S_IRUGO
);
258 MODULE_PARM_DESC(luns
, "number of LUNs");
260 module_param_named(removable
, mod_data
.removable
, bool, S_IRUGO
);
261 MODULE_PARM_DESC(removable
, "true to simulate removable media");
263 module_param_named(stall
, mod_data
.can_stall
, bool, S_IRUGO
);
264 MODULE_PARM_DESC(stall
, "false to prevent bulk stalls");
266 module_param_named(cdrom
, mod_data
.cdrom
, bool, S_IRUGO
);
267 MODULE_PARM_DESC(cdrom
, "true to emulate cdrom instead of disk");
269 module_param_named(serial
, mod_data
.serial
, charp
, S_IRUGO
);
270 MODULE_PARM_DESC(serial
, "USB serial number");
272 /* In the non-TEST version, only the module parameters listed above
274 #ifdef CONFIG_USB_FILE_STORAGE_TEST
276 module_param_named(transport
, mod_data
.transport_parm
, charp
, S_IRUGO
);
277 MODULE_PARM_DESC(transport
, "type of transport (BBB, CBI, or CB)");
279 module_param_named(protocol
, mod_data
.protocol_parm
, charp
, S_IRUGO
);
280 MODULE_PARM_DESC(protocol
, "type of protocol (RBC, 8020, QIC, UFI, "
283 module_param_named(vendor
, mod_data
.vendor
, ushort
, S_IRUGO
);
284 MODULE_PARM_DESC(vendor
, "USB Vendor ID");
286 module_param_named(product
, mod_data
.product
, ushort
, S_IRUGO
);
287 MODULE_PARM_DESC(product
, "USB Product ID");
289 module_param_named(release
, mod_data
.release
, ushort
, S_IRUGO
);
290 MODULE_PARM_DESC(release
, "USB release number");
292 module_param_named(buflen
, mod_data
.buflen
, uint
, S_IRUGO
);
293 MODULE_PARM_DESC(buflen
, "I/O buffer size");
295 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
299 * These definitions will permit the compiler to avoid generating code for
300 * parts of the driver that aren't used in the non-TEST version. Even gcc
301 * can recognize when a test of a constant expression yields a dead code
305 #ifdef CONFIG_USB_FILE_STORAGE_TEST
307 #define transport_is_bbb() (mod_data.transport_type == USB_PR_BULK)
308 #define transport_is_cbi() (mod_data.transport_type == USB_PR_CBI)
309 #define protocol_is_scsi() (mod_data.protocol_type == USB_SC_SCSI)
313 #define transport_is_bbb() 1
314 #define transport_is_cbi() 0
315 #define protocol_is_scsi() 1
317 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
320 /*-------------------------------------------------------------------------*/
324 /* lock protects: state, all the req_busy's, and cbbuf_cmnd */
326 struct usb_gadget
*gadget
;
328 /* filesem protects: backing files in use */
329 struct rw_semaphore filesem
;
331 /* reference counting: wait until all LUNs are released */
334 struct usb_ep
*ep0
; // Handy copy of gadget->ep0
335 struct usb_request
*ep0req
; // For control responses
336 unsigned int ep0_req_tag
;
337 const char *ep0req_name
;
339 struct usb_request
*intreq
; // For interrupt responses
341 struct fsg_buffhd
*intr_buffhd
;
343 unsigned int bulk_out_maxpacket
;
344 enum fsg_state state
; // For exception handling
345 unsigned int exception_req_tag
;
347 u8 config
, new_config
;
349 unsigned int running
: 1;
350 unsigned int bulk_in_enabled
: 1;
351 unsigned int bulk_out_enabled
: 1;
352 unsigned int intr_in_enabled
: 1;
353 unsigned int phase_error
: 1;
354 unsigned int short_packet_received
: 1;
355 unsigned int bad_lun_okay
: 1;
357 unsigned long atomic_bitflags
;
359 #define IGNORE_BULK_OUT 1
362 struct usb_ep
*bulk_in
;
363 struct usb_ep
*bulk_out
;
364 struct usb_ep
*intr_in
;
366 struct fsg_buffhd
*next_buffhd_to_fill
;
367 struct fsg_buffhd
*next_buffhd_to_drain
;
368 struct fsg_buffhd buffhds
[FSG_NUM_BUFFERS
];
370 int thread_wakeup_needed
;
371 struct completion thread_notifier
;
372 struct task_struct
*thread_task
;
375 u8 cmnd
[MAX_COMMAND_SIZE
];
376 enum data_direction data_dir
;
378 u32 data_size_from_cmnd
;
384 /* The CB protocol offers no way for a host to know when a command
385 * has completed. As a result the next command may arrive early,
386 * and we will still have to handle it. For that reason we need
387 * a buffer to store new commands when using CB (or CBI, which
388 * does not oblige a host to wait for command completion either). */
390 u8 cbbuf_cmnd
[MAX_COMMAND_SIZE
];
393 struct fsg_lun
*luns
;
394 struct fsg_lun
*curlun
;
397 typedef void (*fsg_routine_t
)(struct fsg_dev
*);
399 static int exception_in_progress(struct fsg_dev
*fsg
)
401 return (fsg
->state
> FSG_STATE_IDLE
);
404 /* Make bulk-out requests be divisible by the maxpacket size */
405 static void set_bulk_out_req_length(struct fsg_dev
*fsg
,
406 struct fsg_buffhd
*bh
, unsigned int length
)
410 bh
->bulk_out_intended_length
= length
;
411 rem
= length
% fsg
->bulk_out_maxpacket
;
413 length
+= fsg
->bulk_out_maxpacket
- rem
;
414 bh
->outreq
->length
= length
;
417 static struct fsg_dev
*the_fsg
;
418 static struct usb_gadget_driver fsg_driver
;
421 /*-------------------------------------------------------------------------*/
423 static int fsg_set_halt(struct fsg_dev
*fsg
, struct usb_ep
*ep
)
427 if (ep
== fsg
->bulk_in
)
429 else if (ep
== fsg
->bulk_out
)
433 DBG(fsg
, "%s set halt\n", name
);
434 return usb_ep_set_halt(ep
);
438 /*-------------------------------------------------------------------------*/
441 * DESCRIPTORS ... most are static, but strings and (full) configuration
442 * descriptors are built on demand. Also the (static) config and interface
443 * descriptors are adjusted during fsg_bind().
446 /* There is only one configuration. */
447 #define CONFIG_VALUE 1
449 static struct usb_device_descriptor
451 .bLength
= sizeof device_desc
,
452 .bDescriptorType
= USB_DT_DEVICE
,
454 .bcdUSB
= cpu_to_le16(0x0200),
455 .bDeviceClass
= USB_CLASS_PER_INTERFACE
,
457 /* The next three values can be overridden by module parameters */
458 .idVendor
= cpu_to_le16(FSG_VENDOR_ID
),
459 .idProduct
= cpu_to_le16(FSG_PRODUCT_ID
),
460 .bcdDevice
= cpu_to_le16(0xffff),
462 .iManufacturer
= FSG_STRING_MANUFACTURER
,
463 .iProduct
= FSG_STRING_PRODUCT
,
464 .iSerialNumber
= FSG_STRING_SERIAL
,
465 .bNumConfigurations
= 1,
468 static struct usb_config_descriptor
470 .bLength
= sizeof config_desc
,
471 .bDescriptorType
= USB_DT_CONFIG
,
473 /* wTotalLength computed by usb_gadget_config_buf() */
475 .bConfigurationValue
= CONFIG_VALUE
,
476 .iConfiguration
= FSG_STRING_CONFIG
,
477 .bmAttributes
= USB_CONFIG_ATT_ONE
| USB_CONFIG_ATT_SELFPOWER
,
478 .bMaxPower
= CONFIG_USB_GADGET_VBUS_DRAW
/ 2,
482 static struct usb_qualifier_descriptor
484 .bLength
= sizeof dev_qualifier
,
485 .bDescriptorType
= USB_DT_DEVICE_QUALIFIER
,
487 .bcdUSB
= cpu_to_le16(0x0200),
488 .bDeviceClass
= USB_CLASS_PER_INTERFACE
,
490 .bNumConfigurations
= 1,
496 * Config descriptors must agree with the code that sets configurations
497 * and with code managing interfaces and their altsettings. They must
498 * also handle different speeds and other-speed requests.
500 static int populate_config_buf(struct usb_gadget
*gadget
,
501 u8
*buf
, u8 type
, unsigned index
)
503 enum usb_device_speed speed
= gadget
->speed
;
505 const struct usb_descriptor_header
**function
;
510 if (gadget_is_dualspeed(gadget
) && type
== USB_DT_OTHER_SPEED_CONFIG
)
511 speed
= (USB_SPEED_FULL
+ USB_SPEED_HIGH
) - speed
;
512 function
= gadget_is_dualspeed(gadget
) && speed
== USB_SPEED_HIGH
513 ? (const struct usb_descriptor_header
**)fsg_hs_function
514 : (const struct usb_descriptor_header
**)fsg_fs_function
;
516 /* for now, don't advertise srp-only devices */
517 if (!gadget_is_otg(gadget
))
520 len
= usb_gadget_config_buf(&config_desc
, buf
, EP0_BUFSIZE
, function
);
521 ((struct usb_config_descriptor
*) buf
)->bDescriptorType
= type
;
526 /*-------------------------------------------------------------------------*/
528 /* These routines may be called in process context or in_irq */
530 /* Caller must hold fsg->lock */
531 static void wakeup_thread(struct fsg_dev
*fsg
)
533 /* Tell the main thread that something has happened */
534 fsg
->thread_wakeup_needed
= 1;
535 if (fsg
->thread_task
)
536 wake_up_process(fsg
->thread_task
);
540 static void raise_exception(struct fsg_dev
*fsg
, enum fsg_state new_state
)
544 /* Do nothing if a higher-priority exception is already in progress.
545 * If a lower-or-equal priority exception is in progress, preempt it
546 * and notify the main thread by sending it a signal. */
547 spin_lock_irqsave(&fsg
->lock
, flags
);
548 if (fsg
->state
<= new_state
) {
549 fsg
->exception_req_tag
= fsg
->ep0_req_tag
;
550 fsg
->state
= new_state
;
551 if (fsg
->thread_task
)
552 send_sig_info(SIGUSR1
, SEND_SIG_FORCED
,
555 spin_unlock_irqrestore(&fsg
->lock
, flags
);
559 /*-------------------------------------------------------------------------*/
561 /* The disconnect callback and ep0 routines. These always run in_irq,
562 * except that ep0_queue() is called in the main thread to acknowledge
563 * completion of various requests: set config, set interface, and
564 * Bulk-only device reset. */
566 static void fsg_disconnect(struct usb_gadget
*gadget
)
568 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
570 DBG(fsg
, "disconnect or port reset\n");
571 raise_exception(fsg
, FSG_STATE_DISCONNECT
);
575 static int ep0_queue(struct fsg_dev
*fsg
)
579 rc
= usb_ep_queue(fsg
->ep0
, fsg
->ep0req
, GFP_ATOMIC
);
580 if (rc
!= 0 && rc
!= -ESHUTDOWN
) {
582 /* We can't do much more than wait for a reset */
583 WARNING(fsg
, "error in submission: %s --> %d\n",
589 static void ep0_complete(struct usb_ep
*ep
, struct usb_request
*req
)
591 struct fsg_dev
*fsg
= ep
->driver_data
;
594 dump_msg(fsg
, fsg
->ep0req_name
, req
->buf
, req
->actual
);
595 if (req
->status
|| req
->actual
!= req
->length
)
596 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
597 req
->status
, req
->actual
, req
->length
);
598 if (req
->status
== -ECONNRESET
) // Request was cancelled
599 usb_ep_fifo_flush(ep
);
601 if (req
->status
== 0 && req
->context
)
602 ((fsg_routine_t
) (req
->context
))(fsg
);
606 /*-------------------------------------------------------------------------*/
608 /* Bulk and interrupt endpoint completion handlers.
609 * These always run in_irq. */
611 static void bulk_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
613 struct fsg_dev
*fsg
= ep
->driver_data
;
614 struct fsg_buffhd
*bh
= req
->context
;
616 if (req
->status
|| req
->actual
!= req
->length
)
617 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
618 req
->status
, req
->actual
, req
->length
);
619 if (req
->status
== -ECONNRESET
) // Request was cancelled
620 usb_ep_fifo_flush(ep
);
622 /* Hold the lock while we update the request and buffer states */
624 spin_lock(&fsg
->lock
);
626 bh
->state
= BUF_STATE_EMPTY
;
628 spin_unlock(&fsg
->lock
);
631 static void bulk_out_complete(struct usb_ep
*ep
, struct usb_request
*req
)
633 struct fsg_dev
*fsg
= ep
->driver_data
;
634 struct fsg_buffhd
*bh
= req
->context
;
636 dump_msg(fsg
, "bulk-out", req
->buf
, req
->actual
);
637 if (req
->status
|| req
->actual
!= bh
->bulk_out_intended_length
)
638 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
639 req
->status
, req
->actual
,
640 bh
->bulk_out_intended_length
);
641 if (req
->status
== -ECONNRESET
) // Request was cancelled
642 usb_ep_fifo_flush(ep
);
644 /* Hold the lock while we update the request and buffer states */
646 spin_lock(&fsg
->lock
);
648 bh
->state
= BUF_STATE_FULL
;
650 spin_unlock(&fsg
->lock
);
654 #ifdef CONFIG_USB_FILE_STORAGE_TEST
655 static void intr_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
657 struct fsg_dev
*fsg
= ep
->driver_data
;
658 struct fsg_buffhd
*bh
= req
->context
;
660 if (req
->status
|| req
->actual
!= req
->length
)
661 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
662 req
->status
, req
->actual
, req
->length
);
663 if (req
->status
== -ECONNRESET
) // Request was cancelled
664 usb_ep_fifo_flush(ep
);
666 /* Hold the lock while we update the request and buffer states */
668 spin_lock(&fsg
->lock
);
669 fsg
->intreq_busy
= 0;
670 bh
->state
= BUF_STATE_EMPTY
;
672 spin_unlock(&fsg
->lock
);
676 static void intr_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
678 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
681 /*-------------------------------------------------------------------------*/
683 /* Ep0 class-specific handlers. These always run in_irq. */
685 #ifdef CONFIG_USB_FILE_STORAGE_TEST
686 static void received_cbi_adsc(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
688 struct usb_request
*req
= fsg
->ep0req
;
689 static u8 cbi_reset_cmnd
[6] = {
690 SC_SEND_DIAGNOSTIC
, 4, 0xff, 0xff, 0xff, 0xff};
692 /* Error in command transfer? */
693 if (req
->status
|| req
->length
!= req
->actual
||
694 req
->actual
< 6 || req
->actual
> MAX_COMMAND_SIZE
) {
696 /* Not all controllers allow a protocol stall after
697 * receiving control-out data, but we'll try anyway. */
698 fsg_set_halt(fsg
, fsg
->ep0
);
699 return; // Wait for reset
702 /* Is it the special reset command? */
703 if (req
->actual
>= sizeof cbi_reset_cmnd
&&
704 memcmp(req
->buf
, cbi_reset_cmnd
,
705 sizeof cbi_reset_cmnd
) == 0) {
707 /* Raise an exception to stop the current operation
708 * and reinitialize our state. */
709 DBG(fsg
, "cbi reset request\n");
710 raise_exception(fsg
, FSG_STATE_RESET
);
714 VDBG(fsg
, "CB[I] accept device-specific command\n");
715 spin_lock(&fsg
->lock
);
717 /* Save the command for later */
718 if (fsg
->cbbuf_cmnd_size
)
719 WARNING(fsg
, "CB[I] overwriting previous command\n");
720 fsg
->cbbuf_cmnd_size
= req
->actual
;
721 memcpy(fsg
->cbbuf_cmnd
, req
->buf
, fsg
->cbbuf_cmnd_size
);
724 spin_unlock(&fsg
->lock
);
728 static void received_cbi_adsc(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
730 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
733 static int class_setup_req(struct fsg_dev
*fsg
,
734 const struct usb_ctrlrequest
*ctrl
)
736 struct usb_request
*req
= fsg
->ep0req
;
737 int value
= -EOPNOTSUPP
;
738 u16 w_index
= le16_to_cpu(ctrl
->wIndex
);
739 u16 w_value
= le16_to_cpu(ctrl
->wValue
);
740 u16 w_length
= le16_to_cpu(ctrl
->wLength
);
745 /* Handle Bulk-only class-specific requests */
746 if (transport_is_bbb()) {
747 switch (ctrl
->bRequest
) {
749 case USB_BULK_RESET_REQUEST
:
750 if (ctrl
->bRequestType
!= (USB_DIR_OUT
|
751 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
753 if (w_index
!= 0 || w_value
!= 0) {
758 /* Raise an exception to stop the current operation
759 * and reinitialize our state. */
760 DBG(fsg
, "bulk reset request\n");
761 raise_exception(fsg
, FSG_STATE_RESET
);
762 value
= DELAYED_STATUS
;
765 case USB_BULK_GET_MAX_LUN_REQUEST
:
766 if (ctrl
->bRequestType
!= (USB_DIR_IN
|
767 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
769 if (w_index
!= 0 || w_value
!= 0) {
773 VDBG(fsg
, "get max LUN\n");
774 *(u8
*) req
->buf
= fsg
->nluns
- 1;
780 /* Handle CBI class-specific requests */
782 switch (ctrl
->bRequest
) {
784 case USB_CBI_ADSC_REQUEST
:
785 if (ctrl
->bRequestType
!= (USB_DIR_OUT
|
786 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
788 if (w_index
!= 0 || w_value
!= 0) {
792 if (w_length
> MAX_COMMAND_SIZE
) {
797 fsg
->ep0req
->context
= received_cbi_adsc
;
802 if (value
== -EOPNOTSUPP
)
804 "unknown class-specific control req "
805 "%02x.%02x v%04x i%04x l%u\n",
806 ctrl
->bRequestType
, ctrl
->bRequest
,
807 le16_to_cpu(ctrl
->wValue
), w_index
, w_length
);
812 /*-------------------------------------------------------------------------*/
814 /* Ep0 standard request handlers. These always run in_irq. */
816 static int standard_setup_req(struct fsg_dev
*fsg
,
817 const struct usb_ctrlrequest
*ctrl
)
819 struct usb_request
*req
= fsg
->ep0req
;
820 int value
= -EOPNOTSUPP
;
821 u16 w_index
= le16_to_cpu(ctrl
->wIndex
);
822 u16 w_value
= le16_to_cpu(ctrl
->wValue
);
824 /* Usually this just stores reply data in the pre-allocated ep0 buffer,
825 * but config change events will also reconfigure hardware. */
826 switch (ctrl
->bRequest
) {
828 case USB_REQ_GET_DESCRIPTOR
:
829 if (ctrl
->bRequestType
!= (USB_DIR_IN
| USB_TYPE_STANDARD
|
832 switch (w_value
>> 8) {
835 VDBG(fsg
, "get device descriptor\n");
836 value
= sizeof device_desc
;
837 memcpy(req
->buf
, &device_desc
, value
);
839 case USB_DT_DEVICE_QUALIFIER
:
840 VDBG(fsg
, "get device qualifier\n");
841 if (!gadget_is_dualspeed(fsg
->gadget
))
843 value
= sizeof dev_qualifier
;
844 memcpy(req
->buf
, &dev_qualifier
, value
);
847 case USB_DT_OTHER_SPEED_CONFIG
:
848 VDBG(fsg
, "get other-speed config descriptor\n");
849 if (!gadget_is_dualspeed(fsg
->gadget
))
853 VDBG(fsg
, "get configuration descriptor\n");
855 value
= populate_config_buf(fsg
->gadget
,
862 VDBG(fsg
, "get string descriptor\n");
864 /* wIndex == language code */
865 value
= usb_gadget_get_string(&fsg_stringtab
,
866 w_value
& 0xff, req
->buf
);
871 /* One config, two speeds */
872 case USB_REQ_SET_CONFIGURATION
:
873 if (ctrl
->bRequestType
!= (USB_DIR_OUT
| USB_TYPE_STANDARD
|
876 VDBG(fsg
, "set configuration\n");
877 if (w_value
== CONFIG_VALUE
|| w_value
== 0) {
878 fsg
->new_config
= w_value
;
880 /* Raise an exception to wipe out previous transaction
881 * state (queued bufs, etc) and set the new config. */
882 raise_exception(fsg
, FSG_STATE_CONFIG_CHANGE
);
883 value
= DELAYED_STATUS
;
886 case USB_REQ_GET_CONFIGURATION
:
887 if (ctrl
->bRequestType
!= (USB_DIR_IN
| USB_TYPE_STANDARD
|
890 VDBG(fsg
, "get configuration\n");
891 *(u8
*) req
->buf
= fsg
->config
;
895 case USB_REQ_SET_INTERFACE
:
896 if (ctrl
->bRequestType
!= (USB_DIR_OUT
| USB_TYPE_STANDARD
|
897 USB_RECIP_INTERFACE
))
899 if (fsg
->config
&& w_index
== 0) {
901 /* Raise an exception to wipe out previous transaction
902 * state (queued bufs, etc) and install the new
903 * interface altsetting. */
904 raise_exception(fsg
, FSG_STATE_INTERFACE_CHANGE
);
905 value
= DELAYED_STATUS
;
908 case USB_REQ_GET_INTERFACE
:
909 if (ctrl
->bRequestType
!= (USB_DIR_IN
| USB_TYPE_STANDARD
|
910 USB_RECIP_INTERFACE
))
918 VDBG(fsg
, "get interface\n");
919 *(u8
*) req
->buf
= 0;
925 "unknown control req %02x.%02x v%04x i%04x l%u\n",
926 ctrl
->bRequestType
, ctrl
->bRequest
,
927 w_value
, w_index
, le16_to_cpu(ctrl
->wLength
));
934 static int fsg_setup(struct usb_gadget
*gadget
,
935 const struct usb_ctrlrequest
*ctrl
)
937 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
939 int w_length
= le16_to_cpu(ctrl
->wLength
);
941 ++fsg
->ep0_req_tag
; // Record arrival of a new request
942 fsg
->ep0req
->context
= NULL
;
943 fsg
->ep0req
->length
= 0;
944 dump_msg(fsg
, "ep0-setup", (u8
*) ctrl
, sizeof(*ctrl
));
946 if ((ctrl
->bRequestType
& USB_TYPE_MASK
) == USB_TYPE_CLASS
)
947 rc
= class_setup_req(fsg
, ctrl
);
949 rc
= standard_setup_req(fsg
, ctrl
);
951 /* Respond with data/status or defer until later? */
952 if (rc
>= 0 && rc
!= DELAYED_STATUS
) {
953 rc
= min(rc
, w_length
);
954 fsg
->ep0req
->length
= rc
;
955 fsg
->ep0req
->zero
= rc
< w_length
;
956 fsg
->ep0req_name
= (ctrl
->bRequestType
& USB_DIR_IN
?
957 "ep0-in" : "ep0-out");
961 /* Device either stalls (rc < 0) or reports success */
966 /*-------------------------------------------------------------------------*/
968 /* All the following routines run in process context */
971 /* Use this for bulk or interrupt transfers, not ep0 */
972 static void start_transfer(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
973 struct usb_request
*req
, int *pbusy
,
974 enum fsg_buffer_state
*state
)
978 if (ep
== fsg
->bulk_in
)
979 dump_msg(fsg
, "bulk-in", req
->buf
, req
->length
);
980 else if (ep
== fsg
->intr_in
)
981 dump_msg(fsg
, "intr-in", req
->buf
, req
->length
);
983 spin_lock_irq(&fsg
->lock
);
985 *state
= BUF_STATE_BUSY
;
986 spin_unlock_irq(&fsg
->lock
);
987 rc
= usb_ep_queue(ep
, req
, GFP_KERNEL
);
990 *state
= BUF_STATE_EMPTY
;
992 /* We can't do much more than wait for a reset */
994 /* Note: currently the net2280 driver fails zero-length
995 * submissions if DMA is enabled. */
996 if (rc
!= -ESHUTDOWN
&& !(rc
== -EOPNOTSUPP
&&
998 WARNING(fsg
, "error in submission: %s --> %d\n",
1004 static int sleep_thread(struct fsg_dev
*fsg
)
1008 /* Wait until a signal arrives or we are woken up */
1011 set_current_state(TASK_INTERRUPTIBLE
);
1012 if (signal_pending(current
)) {
1016 if (fsg
->thread_wakeup_needed
)
1020 __set_current_state(TASK_RUNNING
);
1021 fsg
->thread_wakeup_needed
= 0;
1026 /*-------------------------------------------------------------------------*/
1028 static int do_read(struct fsg_dev
*fsg
)
1030 struct fsg_lun
*curlun
= fsg
->curlun
;
1032 struct fsg_buffhd
*bh
;
1035 loff_t file_offset
, file_offset_tmp
;
1036 unsigned int amount
;
1037 unsigned int partial_page
;
1040 /* Get the starting Logical Block Address and check that it's
1042 if (fsg
->cmnd
[0] == SC_READ_6
)
1043 lba
= get_unaligned_be24(&fsg
->cmnd
[1]);
1045 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1047 /* We allow DPO (Disable Page Out = don't save data in the
1048 * cache) and FUA (Force Unit Access = don't read from the
1049 * cache), but we don't implement them. */
1050 if ((fsg
->cmnd
[1] & ~0x18) != 0) {
1051 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1055 if (lba
>= curlun
->num_sectors
) {
1056 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1059 file_offset
= ((loff_t
) lba
) << 9;
1061 /* Carry out the file reads */
1062 amount_left
= fsg
->data_size_from_cmnd
;
1063 if (unlikely(amount_left
== 0))
1064 return -EIO
; // No default reply
1068 /* Figure out how much we need to read:
1069 * Try to read the remaining amount.
1070 * But don't read more than the buffer size.
1071 * And don't try to read past the end of the file.
1072 * Finally, if we're not at a page boundary, don't read past
1074 * If this means reading 0 then we were asked to read past
1075 * the end of file. */
1076 amount
= min((unsigned int) amount_left
, mod_data
.buflen
);
1077 amount
= min((loff_t
) amount
,
1078 curlun
->file_length
- file_offset
);
1079 partial_page
= file_offset
& (PAGE_CACHE_SIZE
- 1);
1080 if (partial_page
> 0)
1081 amount
= min(amount
, (unsigned int) PAGE_CACHE_SIZE
-
1084 /* Wait for the next buffer to become available */
1085 bh
= fsg
->next_buffhd_to_fill
;
1086 while (bh
->state
!= BUF_STATE_EMPTY
) {
1087 rc
= sleep_thread(fsg
);
1092 /* If we were asked to read past the end of file,
1093 * end with an empty buffer. */
1095 curlun
->sense_data
=
1096 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1097 curlun
->sense_data_info
= file_offset
>> 9;
1098 curlun
->info_valid
= 1;
1099 bh
->inreq
->length
= 0;
1100 bh
->state
= BUF_STATE_FULL
;
1104 /* Perform the read */
1105 file_offset_tmp
= file_offset
;
1106 nread
= vfs_read(curlun
->filp
,
1107 (char __user
*) bh
->buf
,
1108 amount
, &file_offset_tmp
);
1109 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
1110 (unsigned long long) file_offset
,
1112 if (signal_pending(current
))
1116 LDBG(curlun
, "error in file read: %d\n",
1119 } else if (nread
< amount
) {
1120 LDBG(curlun
, "partial file read: %d/%u\n",
1121 (int) nread
, amount
);
1122 nread
-= (nread
& 511); // Round down to a block
1124 file_offset
+= nread
;
1125 amount_left
-= nread
;
1126 fsg
->residue
-= nread
;
1127 bh
->inreq
->length
= nread
;
1128 bh
->state
= BUF_STATE_FULL
;
1130 /* If an error occurred, report it and its position */
1131 if (nread
< amount
) {
1132 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
1133 curlun
->sense_data_info
= file_offset
>> 9;
1134 curlun
->info_valid
= 1;
1138 if (amount_left
== 0)
1139 break; // No more left to read
1141 /* Send this buffer and go read some more */
1142 bh
->inreq
->zero
= 0;
1143 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
1144 &bh
->inreq_busy
, &bh
->state
);
1145 fsg
->next_buffhd_to_fill
= bh
->next
;
1148 return -EIO
; // No default reply
1152 /*-------------------------------------------------------------------------*/
1154 static int do_write(struct fsg_dev
*fsg
)
1156 struct fsg_lun
*curlun
= fsg
->curlun
;
1158 struct fsg_buffhd
*bh
;
1160 u32 amount_left_to_req
, amount_left_to_write
;
1161 loff_t usb_offset
, file_offset
, file_offset_tmp
;
1162 unsigned int amount
;
1163 unsigned int partial_page
;
1168 curlun
->sense_data
= SS_WRITE_PROTECTED
;
1171 spin_lock(&curlun
->filp
->f_lock
);
1172 curlun
->filp
->f_flags
&= ~O_SYNC
; // Default is not to wait
1173 spin_unlock(&curlun
->filp
->f_lock
);
1175 /* Get the starting Logical Block Address and check that it's
1177 if (fsg
->cmnd
[0] == SC_WRITE_6
)
1178 lba
= get_unaligned_be24(&fsg
->cmnd
[1]);
1180 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1182 /* We allow DPO (Disable Page Out = don't save data in the
1183 * cache) and FUA (Force Unit Access = write directly to the
1184 * medium). We don't implement DPO; we implement FUA by
1185 * performing synchronous output. */
1186 if ((fsg
->cmnd
[1] & ~0x18) != 0) {
1187 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1191 if (!curlun
->nofua
&& (fsg
->cmnd
[1] & 0x08)) {
1192 spin_lock(&curlun
->filp
->f_lock
);
1193 curlun
->filp
->f_flags
|= O_DSYNC
;
1194 spin_unlock(&curlun
->filp
->f_lock
);
1197 if (lba
>= curlun
->num_sectors
) {
1198 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1202 /* Carry out the file writes */
1204 file_offset
= usb_offset
= ((loff_t
) lba
) << 9;
1205 amount_left_to_req
= amount_left_to_write
= fsg
->data_size_from_cmnd
;
1207 while (amount_left_to_write
> 0) {
1209 /* Queue a request for more data from the host */
1210 bh
= fsg
->next_buffhd_to_fill
;
1211 if (bh
->state
== BUF_STATE_EMPTY
&& get_some_more
) {
1213 /* Figure out how much we want to get:
1214 * Try to get the remaining amount.
1215 * But don't get more than the buffer size.
1216 * And don't try to go past the end of the file.
1217 * If we're not at a page boundary,
1218 * don't go past the next page.
1219 * If this means getting 0, then we were asked
1220 * to write past the end of file.
1221 * Finally, round down to a block boundary. */
1222 amount
= min(amount_left_to_req
, mod_data
.buflen
);
1223 amount
= min((loff_t
) amount
, curlun
->file_length
-
1225 partial_page
= usb_offset
& (PAGE_CACHE_SIZE
- 1);
1226 if (partial_page
> 0)
1227 amount
= min(amount
,
1228 (unsigned int) PAGE_CACHE_SIZE
- partial_page
);
1232 curlun
->sense_data
=
1233 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1234 curlun
->sense_data_info
= usb_offset
>> 9;
1235 curlun
->info_valid
= 1;
1238 amount
-= (amount
& 511);
1241 /* Why were we were asked to transfer a
1247 /* Get the next buffer */
1248 usb_offset
+= amount
;
1249 fsg
->usb_amount_left
-= amount
;
1250 amount_left_to_req
-= amount
;
1251 if (amount_left_to_req
== 0)
1254 /* amount is always divisible by 512, hence by
1255 * the bulk-out maxpacket size */
1256 bh
->outreq
->length
= bh
->bulk_out_intended_length
=
1258 bh
->outreq
->short_not_ok
= 1;
1259 start_transfer(fsg
, fsg
->bulk_out
, bh
->outreq
,
1260 &bh
->outreq_busy
, &bh
->state
);
1261 fsg
->next_buffhd_to_fill
= bh
->next
;
1265 /* Write the received data to the backing file */
1266 bh
= fsg
->next_buffhd_to_drain
;
1267 if (bh
->state
== BUF_STATE_EMPTY
&& !get_some_more
)
1268 break; // We stopped early
1269 if (bh
->state
== BUF_STATE_FULL
) {
1271 fsg
->next_buffhd_to_drain
= bh
->next
;
1272 bh
->state
= BUF_STATE_EMPTY
;
1274 /* Did something go wrong with the transfer? */
1275 if (bh
->outreq
->status
!= 0) {
1276 curlun
->sense_data
= SS_COMMUNICATION_FAILURE
;
1277 curlun
->sense_data_info
= file_offset
>> 9;
1278 curlun
->info_valid
= 1;
1282 amount
= bh
->outreq
->actual
;
1283 if (curlun
->file_length
- file_offset
< amount
) {
1285 "write %u @ %llu beyond end %llu\n",
1286 amount
, (unsigned long long) file_offset
,
1287 (unsigned long long) curlun
->file_length
);
1288 amount
= curlun
->file_length
- file_offset
;
1291 /* Perform the write */
1292 file_offset_tmp
= file_offset
;
1293 nwritten
= vfs_write(curlun
->filp
,
1294 (char __user
*) bh
->buf
,
1295 amount
, &file_offset_tmp
);
1296 VLDBG(curlun
, "file write %u @ %llu -> %d\n", amount
,
1297 (unsigned long long) file_offset
,
1299 if (signal_pending(current
))
1300 return -EINTR
; // Interrupted!
1303 LDBG(curlun
, "error in file write: %d\n",
1306 } else if (nwritten
< amount
) {
1307 LDBG(curlun
, "partial file write: %d/%u\n",
1308 (int) nwritten
, amount
);
1309 nwritten
-= (nwritten
& 511);
1310 // Round down to a block
1312 file_offset
+= nwritten
;
1313 amount_left_to_write
-= nwritten
;
1314 fsg
->residue
-= nwritten
;
1316 /* If an error occurred, report it and its position */
1317 if (nwritten
< amount
) {
1318 curlun
->sense_data
= SS_WRITE_ERROR
;
1319 curlun
->sense_data_info
= file_offset
>> 9;
1320 curlun
->info_valid
= 1;
1324 /* Did the host decide to stop early? */
1325 if (bh
->outreq
->actual
!= bh
->outreq
->length
) {
1326 fsg
->short_packet_received
= 1;
1332 /* Wait for something to happen */
1333 rc
= sleep_thread(fsg
);
1338 return -EIO
; // No default reply
1342 /*-------------------------------------------------------------------------*/
1344 static int do_synchronize_cache(struct fsg_dev
*fsg
)
1346 struct fsg_lun
*curlun
= fsg
->curlun
;
1349 /* We ignore the requested LBA and write out all file's
1350 * dirty data buffers. */
1351 rc
= fsg_lun_fsync_sub(curlun
);
1353 curlun
->sense_data
= SS_WRITE_ERROR
;
1358 /*-------------------------------------------------------------------------*/
1360 static void invalidate_sub(struct fsg_lun
*curlun
)
1362 struct file
*filp
= curlun
->filp
;
1363 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1366 rc
= invalidate_mapping_pages(inode
->i_mapping
, 0, -1);
1367 VLDBG(curlun
, "invalidate_mapping_pages -> %ld\n", rc
);
1370 static int do_verify(struct fsg_dev
*fsg
)
1372 struct fsg_lun
*curlun
= fsg
->curlun
;
1374 u32 verification_length
;
1375 struct fsg_buffhd
*bh
= fsg
->next_buffhd_to_fill
;
1376 loff_t file_offset
, file_offset_tmp
;
1378 unsigned int amount
;
1381 /* Get the starting Logical Block Address and check that it's
1383 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1384 if (lba
>= curlun
->num_sectors
) {
1385 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1389 /* We allow DPO (Disable Page Out = don't save data in the
1390 * cache) but we don't implement it. */
1391 if ((fsg
->cmnd
[1] & ~0x10) != 0) {
1392 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1396 verification_length
= get_unaligned_be16(&fsg
->cmnd
[7]);
1397 if (unlikely(verification_length
== 0))
1398 return -EIO
; // No default reply
1400 /* Prepare to carry out the file verify */
1401 amount_left
= verification_length
<< 9;
1402 file_offset
= ((loff_t
) lba
) << 9;
1404 /* Write out all the dirty buffers before invalidating them */
1405 fsg_lun_fsync_sub(curlun
);
1406 if (signal_pending(current
))
1409 invalidate_sub(curlun
);
1410 if (signal_pending(current
))
1413 /* Just try to read the requested blocks */
1414 while (amount_left
> 0) {
1416 /* Figure out how much we need to read:
1417 * Try to read the remaining amount, but not more than
1419 * And don't try to read past the end of the file.
1420 * If this means reading 0 then we were asked to read
1421 * past the end of file. */
1422 amount
= min((unsigned int) amount_left
, mod_data
.buflen
);
1423 amount
= min((loff_t
) amount
,
1424 curlun
->file_length
- file_offset
);
1426 curlun
->sense_data
=
1427 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1428 curlun
->sense_data_info
= file_offset
>> 9;
1429 curlun
->info_valid
= 1;
1433 /* Perform the read */
1434 file_offset_tmp
= file_offset
;
1435 nread
= vfs_read(curlun
->filp
,
1436 (char __user
*) bh
->buf
,
1437 amount
, &file_offset_tmp
);
1438 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
1439 (unsigned long long) file_offset
,
1441 if (signal_pending(current
))
1445 LDBG(curlun
, "error in file verify: %d\n",
1448 } else if (nread
< amount
) {
1449 LDBG(curlun
, "partial file verify: %d/%u\n",
1450 (int) nread
, amount
);
1451 nread
-= (nread
& 511); // Round down to a sector
1454 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
1455 curlun
->sense_data_info
= file_offset
>> 9;
1456 curlun
->info_valid
= 1;
1459 file_offset
+= nread
;
1460 amount_left
-= nread
;
1466 /*-------------------------------------------------------------------------*/
1468 static int do_inquiry(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1470 u8
*buf
= (u8
*) bh
->buf
;
1472 static char vendor_id
[] = "Linux ";
1473 static char product_disk_id
[] = "File-Stor Gadget";
1474 static char product_cdrom_id
[] = "File-CD Gadget ";
1476 if (!fsg
->curlun
) { // Unsupported LUNs are okay
1477 fsg
->bad_lun_okay
= 1;
1479 buf
[0] = 0x7f; // Unsupported, no device-type
1480 buf
[4] = 31; // Additional length
1485 buf
[0] = (mod_data
.cdrom
? TYPE_CDROM
: TYPE_DISK
);
1486 if (mod_data
.removable
)
1488 buf
[2] = 2; // ANSI SCSI level 2
1489 buf
[3] = 2; // SCSI-2 INQUIRY data format
1490 buf
[4] = 31; // Additional length
1491 // No special options
1492 sprintf(buf
+ 8, "%-8s%-16s%04x", vendor_id
,
1493 (mod_data
.cdrom
? product_cdrom_id
:
1500 static int do_request_sense(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1502 struct fsg_lun
*curlun
= fsg
->curlun
;
1503 u8
*buf
= (u8
*) bh
->buf
;
1508 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1510 * If a REQUEST SENSE command is received from an initiator
1511 * with a pending unit attention condition (before the target
1512 * generates the contingent allegiance condition), then the
1513 * target shall either:
1514 * a) report any pending sense data and preserve the unit
1515 * attention condition on the logical unit, or,
1516 * b) report the unit attention condition, may discard any
1517 * pending sense data, and clear the unit attention
1518 * condition on the logical unit for that initiator.
1520 * FSG normally uses option a); enable this code to use option b).
1523 if (!curlun
) { // Unsupported LUNs are okay
1524 fsg
->bad_lun_okay
= 1;
1525 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
1529 sd
= curlun
->sense_data
;
1530 sdinfo
= curlun
->sense_data_info
;
1531 valid
= curlun
->info_valid
<< 7;
1532 curlun
->sense_data
= SS_NO_SENSE
;
1533 curlun
->sense_data_info
= 0;
1534 curlun
->info_valid
= 0;
1538 buf
[0] = valid
| 0x70; // Valid, current error
1540 put_unaligned_be32(sdinfo
, &buf
[3]); /* Sense information */
1541 buf
[7] = 18 - 8; // Additional sense length
1548 static int do_read_capacity(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1550 struct fsg_lun
*curlun
= fsg
->curlun
;
1551 u32 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1552 int pmi
= fsg
->cmnd
[8];
1553 u8
*buf
= (u8
*) bh
->buf
;
1555 /* Check the PMI and LBA fields */
1556 if (pmi
> 1 || (pmi
== 0 && lba
!= 0)) {
1557 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1561 put_unaligned_be32(curlun
->num_sectors
- 1, &buf
[0]);
1562 /* Max logical block */
1563 put_unaligned_be32(512, &buf
[4]); /* Block length */
1568 static int do_read_header(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1570 struct fsg_lun
*curlun
= fsg
->curlun
;
1571 int msf
= fsg
->cmnd
[1] & 0x02;
1572 u32 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1573 u8
*buf
= (u8
*) bh
->buf
;
1575 if ((fsg
->cmnd
[1] & ~0x02) != 0) { /* Mask away MSF */
1576 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1579 if (lba
>= curlun
->num_sectors
) {
1580 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1585 buf
[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1586 store_cdrom_address(&buf
[4], msf
, lba
);
1591 static int do_read_toc(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1593 struct fsg_lun
*curlun
= fsg
->curlun
;
1594 int msf
= fsg
->cmnd
[1] & 0x02;
1595 int start_track
= fsg
->cmnd
[6];
1596 u8
*buf
= (u8
*) bh
->buf
;
1598 if ((fsg
->cmnd
[1] & ~0x02) != 0 || /* Mask away MSF */
1600 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1605 buf
[1] = (20-2); /* TOC data length */
1606 buf
[2] = 1; /* First track number */
1607 buf
[3] = 1; /* Last track number */
1608 buf
[5] = 0x16; /* Data track, copying allowed */
1609 buf
[6] = 0x01; /* Only track is number 1 */
1610 store_cdrom_address(&buf
[8], msf
, 0);
1612 buf
[13] = 0x16; /* Lead-out track is data */
1613 buf
[14] = 0xAA; /* Lead-out track number */
1614 store_cdrom_address(&buf
[16], msf
, curlun
->num_sectors
);
1619 static int do_mode_sense(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1621 struct fsg_lun
*curlun
= fsg
->curlun
;
1622 int mscmnd
= fsg
->cmnd
[0];
1623 u8
*buf
= (u8
*) bh
->buf
;
1626 int changeable_values
, all_pages
;
1630 if ((fsg
->cmnd
[1] & ~0x08) != 0) { // Mask away DBD
1631 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1634 pc
= fsg
->cmnd
[2] >> 6;
1635 page_code
= fsg
->cmnd
[2] & 0x3f;
1637 curlun
->sense_data
= SS_SAVING_PARAMETERS_NOT_SUPPORTED
;
1640 changeable_values
= (pc
== 1);
1641 all_pages
= (page_code
== 0x3f);
1643 /* Write the mode parameter header. Fixed values are: default
1644 * medium type, no cache control (DPOFUA), and no block descriptors.
1645 * The only variable value is the WriteProtect bit. We will fill in
1646 * the mode data length later. */
1648 if (mscmnd
== SC_MODE_SENSE_6
) {
1649 buf
[2] = (curlun
->ro
? 0x80 : 0x00); // WP, DPOFUA
1652 } else { // SC_MODE_SENSE_10
1653 buf
[3] = (curlun
->ro
? 0x80 : 0x00); // WP, DPOFUA
1655 limit
= 65535; // Should really be mod_data.buflen
1658 /* No block descriptors */
1660 /* The mode pages, in numerical order. The only page we support
1661 * is the Caching page. */
1662 if (page_code
== 0x08 || all_pages
) {
1664 buf
[0] = 0x08; // Page code
1665 buf
[1] = 10; // Page length
1666 memset(buf
+2, 0, 10); // None of the fields are changeable
1668 if (!changeable_values
) {
1669 buf
[2] = 0x04; // Write cache enable,
1670 // Read cache not disabled
1671 // No cache retention priorities
1672 put_unaligned_be16(0xffff, &buf
[4]);
1673 /* Don't disable prefetch */
1674 /* Minimum prefetch = 0 */
1675 put_unaligned_be16(0xffff, &buf
[8]);
1676 /* Maximum prefetch */
1677 put_unaligned_be16(0xffff, &buf
[10]);
1678 /* Maximum prefetch ceiling */
1683 /* Check that a valid page was requested and the mode data length
1684 * isn't too long. */
1686 if (!valid_page
|| len
> limit
) {
1687 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1691 /* Store the mode data length */
1692 if (mscmnd
== SC_MODE_SENSE_6
)
1695 put_unaligned_be16(len
- 2, buf0
);
1700 static int do_start_stop(struct fsg_dev
*fsg
)
1702 struct fsg_lun
*curlun
= fsg
->curlun
;
1705 if (!mod_data
.removable
) {
1706 curlun
->sense_data
= SS_INVALID_COMMAND
;
1710 // int immed = fsg->cmnd[1] & 0x01;
1711 loej
= fsg
->cmnd
[4] & 0x02;
1712 start
= fsg
->cmnd
[4] & 0x01;
1714 #ifdef CONFIG_USB_FILE_STORAGE_TEST
1715 if ((fsg
->cmnd
[1] & ~0x01) != 0 || // Mask away Immed
1716 (fsg
->cmnd
[4] & ~0x03) != 0) { // Mask LoEj, Start
1717 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1723 /* Are we allowed to unload the media? */
1724 if (curlun
->prevent_medium_removal
) {
1725 LDBG(curlun
, "unload attempt prevented\n");
1726 curlun
->sense_data
= SS_MEDIUM_REMOVAL_PREVENTED
;
1729 if (loej
) { // Simulate an unload/eject
1730 up_read(&fsg
->filesem
);
1731 down_write(&fsg
->filesem
);
1732 fsg_lun_close(curlun
);
1733 up_write(&fsg
->filesem
);
1734 down_read(&fsg
->filesem
);
1738 /* Our emulation doesn't support mounting; the medium is
1739 * available for use as soon as it is loaded. */
1740 if (!fsg_lun_is_open(curlun
)) {
1741 curlun
->sense_data
= SS_MEDIUM_NOT_PRESENT
;
1750 static int do_prevent_allow(struct fsg_dev
*fsg
)
1752 struct fsg_lun
*curlun
= fsg
->curlun
;
1755 if (!mod_data
.removable
) {
1756 curlun
->sense_data
= SS_INVALID_COMMAND
;
1760 prevent
= fsg
->cmnd
[4] & 0x01;
1761 if ((fsg
->cmnd
[4] & ~0x01) != 0) { // Mask away Prevent
1762 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1766 if (curlun
->prevent_medium_removal
&& !prevent
)
1767 fsg_lun_fsync_sub(curlun
);
1768 curlun
->prevent_medium_removal
= prevent
;
1773 static int do_read_format_capacities(struct fsg_dev
*fsg
,
1774 struct fsg_buffhd
*bh
)
1776 struct fsg_lun
*curlun
= fsg
->curlun
;
1777 u8
*buf
= (u8
*) bh
->buf
;
1779 buf
[0] = buf
[1] = buf
[2] = 0;
1780 buf
[3] = 8; // Only the Current/Maximum Capacity Descriptor
1783 put_unaligned_be32(curlun
->num_sectors
, &buf
[0]);
1784 /* Number of blocks */
1785 put_unaligned_be32(512, &buf
[4]); /* Block length */
1786 buf
[4] = 0x02; /* Current capacity */
1791 static int do_mode_select(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1793 struct fsg_lun
*curlun
= fsg
->curlun
;
1795 /* We don't support MODE SELECT */
1796 curlun
->sense_data
= SS_INVALID_COMMAND
;
1801 /*-------------------------------------------------------------------------*/
1803 static int halt_bulk_in_endpoint(struct fsg_dev
*fsg
)
1807 rc
= fsg_set_halt(fsg
, fsg
->bulk_in
);
1809 VDBG(fsg
, "delayed bulk-in endpoint halt\n");
1811 if (rc
!= -EAGAIN
) {
1812 WARNING(fsg
, "usb_ep_set_halt -> %d\n", rc
);
1817 /* Wait for a short time and then try again */
1818 if (msleep_interruptible(100) != 0)
1820 rc
= usb_ep_set_halt(fsg
->bulk_in
);
1825 static int wedge_bulk_in_endpoint(struct fsg_dev
*fsg
)
1829 DBG(fsg
, "bulk-in set wedge\n");
1830 rc
= usb_ep_set_wedge(fsg
->bulk_in
);
1832 VDBG(fsg
, "delayed bulk-in endpoint wedge\n");
1834 if (rc
!= -EAGAIN
) {
1835 WARNING(fsg
, "usb_ep_set_wedge -> %d\n", rc
);
1840 /* Wait for a short time and then try again */
1841 if (msleep_interruptible(100) != 0)
1843 rc
= usb_ep_set_wedge(fsg
->bulk_in
);
1848 static int pad_with_zeros(struct fsg_dev
*fsg
)
1850 struct fsg_buffhd
*bh
= fsg
->next_buffhd_to_fill
;
1851 u32 nkeep
= bh
->inreq
->length
;
1855 bh
->state
= BUF_STATE_EMPTY
; // For the first iteration
1856 fsg
->usb_amount_left
= nkeep
+ fsg
->residue
;
1857 while (fsg
->usb_amount_left
> 0) {
1859 /* Wait for the next buffer to be free */
1860 while (bh
->state
!= BUF_STATE_EMPTY
) {
1861 rc
= sleep_thread(fsg
);
1866 nsend
= min(fsg
->usb_amount_left
, (u32
) mod_data
.buflen
);
1867 memset(bh
->buf
+ nkeep
, 0, nsend
- nkeep
);
1868 bh
->inreq
->length
= nsend
;
1869 bh
->inreq
->zero
= 0;
1870 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
1871 &bh
->inreq_busy
, &bh
->state
);
1872 bh
= fsg
->next_buffhd_to_fill
= bh
->next
;
1873 fsg
->usb_amount_left
-= nsend
;
1879 static int throw_away_data(struct fsg_dev
*fsg
)
1881 struct fsg_buffhd
*bh
;
1885 while ((bh
= fsg
->next_buffhd_to_drain
)->state
!= BUF_STATE_EMPTY
||
1886 fsg
->usb_amount_left
> 0) {
1888 /* Throw away the data in a filled buffer */
1889 if (bh
->state
== BUF_STATE_FULL
) {
1891 bh
->state
= BUF_STATE_EMPTY
;
1892 fsg
->next_buffhd_to_drain
= bh
->next
;
1894 /* A short packet or an error ends everything */
1895 if (bh
->outreq
->actual
!= bh
->outreq
->length
||
1896 bh
->outreq
->status
!= 0) {
1897 raise_exception(fsg
, FSG_STATE_ABORT_BULK_OUT
);
1903 /* Try to submit another request if we need one */
1904 bh
= fsg
->next_buffhd_to_fill
;
1905 if (bh
->state
== BUF_STATE_EMPTY
&& fsg
->usb_amount_left
> 0) {
1906 amount
= min(fsg
->usb_amount_left
,
1907 (u32
) mod_data
.buflen
);
1909 /* amount is always divisible by 512, hence by
1910 * the bulk-out maxpacket size */
1911 bh
->outreq
->length
= bh
->bulk_out_intended_length
=
1913 bh
->outreq
->short_not_ok
= 1;
1914 start_transfer(fsg
, fsg
->bulk_out
, bh
->outreq
,
1915 &bh
->outreq_busy
, &bh
->state
);
1916 fsg
->next_buffhd_to_fill
= bh
->next
;
1917 fsg
->usb_amount_left
-= amount
;
1921 /* Otherwise wait for something to happen */
1922 rc
= sleep_thread(fsg
);
1930 static int finish_reply(struct fsg_dev
*fsg
)
1932 struct fsg_buffhd
*bh
= fsg
->next_buffhd_to_fill
;
1935 switch (fsg
->data_dir
) {
1937 break; // Nothing to send
1939 /* If we don't know whether the host wants to read or write,
1940 * this must be CB or CBI with an unknown command. We mustn't
1941 * try to send or receive any data. So stall both bulk pipes
1942 * if we can and wait for a reset. */
1943 case DATA_DIR_UNKNOWN
:
1944 if (mod_data
.can_stall
) {
1945 fsg_set_halt(fsg
, fsg
->bulk_out
);
1946 rc
= halt_bulk_in_endpoint(fsg
);
1950 /* All but the last buffer of data must have already been sent */
1951 case DATA_DIR_TO_HOST
:
1952 if (fsg
->data_size
== 0)
1953 ; // Nothing to send
1955 /* If there's no residue, simply send the last buffer */
1956 else if (fsg
->residue
== 0) {
1957 bh
->inreq
->zero
= 0;
1958 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
1959 &bh
->inreq_busy
, &bh
->state
);
1960 fsg
->next_buffhd_to_fill
= bh
->next
;
1963 /* There is a residue. For CB and CBI, simply mark the end
1964 * of the data with a short packet. However, if we are
1965 * allowed to stall, there was no data at all (residue ==
1966 * data_size), and the command failed (invalid LUN or
1967 * sense data is set), then halt the bulk-in endpoint
1969 else if (!transport_is_bbb()) {
1970 if (mod_data
.can_stall
&&
1971 fsg
->residue
== fsg
->data_size
&&
1972 (!fsg
->curlun
|| fsg
->curlun
->sense_data
!= SS_NO_SENSE
)) {
1973 bh
->state
= BUF_STATE_EMPTY
;
1974 rc
= halt_bulk_in_endpoint(fsg
);
1976 bh
->inreq
->zero
= 1;
1977 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
1978 &bh
->inreq_busy
, &bh
->state
);
1979 fsg
->next_buffhd_to_fill
= bh
->next
;
1983 /* For Bulk-only, if we're allowed to stall then send the
1984 * short packet and halt the bulk-in endpoint. If we can't
1985 * stall, pad out the remaining data with 0's. */
1987 if (mod_data
.can_stall
) {
1988 bh
->inreq
->zero
= 1;
1989 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
1990 &bh
->inreq_busy
, &bh
->state
);
1991 fsg
->next_buffhd_to_fill
= bh
->next
;
1992 rc
= halt_bulk_in_endpoint(fsg
);
1994 rc
= pad_with_zeros(fsg
);
1998 /* We have processed all we want from the data the host has sent.
1999 * There may still be outstanding bulk-out requests. */
2000 case DATA_DIR_FROM_HOST
:
2001 if (fsg
->residue
== 0)
2002 ; // Nothing to receive
2004 /* Did the host stop sending unexpectedly early? */
2005 else if (fsg
->short_packet_received
) {
2006 raise_exception(fsg
, FSG_STATE_ABORT_BULK_OUT
);
2010 /* We haven't processed all the incoming data. Even though
2011 * we may be allowed to stall, doing so would cause a race.
2012 * The controller may already have ACK'ed all the remaining
2013 * bulk-out packets, in which case the host wouldn't see a
2014 * STALL. Not realizing the endpoint was halted, it wouldn't
2015 * clear the halt -- leading to problems later on. */
2017 /* We can't stall. Read in the excess data and throw it
2020 rc
= throw_away_data(fsg
);
2027 static int send_status(struct fsg_dev
*fsg
)
2029 struct fsg_lun
*curlun
= fsg
->curlun
;
2030 struct fsg_buffhd
*bh
;
2032 u8 status
= USB_STATUS_PASS
;
2035 /* Wait for the next buffer to become available */
2036 bh
= fsg
->next_buffhd_to_fill
;
2037 while (bh
->state
!= BUF_STATE_EMPTY
) {
2038 rc
= sleep_thread(fsg
);
2044 sd
= curlun
->sense_data
;
2045 sdinfo
= curlun
->sense_data_info
;
2046 } else if (fsg
->bad_lun_okay
)
2049 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
2051 if (fsg
->phase_error
) {
2052 DBG(fsg
, "sending phase-error status\n");
2053 status
= USB_STATUS_PHASE_ERROR
;
2054 sd
= SS_INVALID_COMMAND
;
2055 } else if (sd
!= SS_NO_SENSE
) {
2056 DBG(fsg
, "sending command-failure status\n");
2057 status
= USB_STATUS_FAIL
;
2058 VDBG(fsg
, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
2060 SK(sd
), ASC(sd
), ASCQ(sd
), sdinfo
);
2063 if (transport_is_bbb()) {
2064 struct bulk_cs_wrap
*csw
= bh
->buf
;
2066 /* Store and send the Bulk-only CSW */
2067 csw
->Signature
= cpu_to_le32(USB_BULK_CS_SIG
);
2068 csw
->Tag
= fsg
->tag
;
2069 csw
->Residue
= cpu_to_le32(fsg
->residue
);
2070 csw
->Status
= status
;
2072 bh
->inreq
->length
= USB_BULK_CS_WRAP_LEN
;
2073 bh
->inreq
->zero
= 0;
2074 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
2075 &bh
->inreq_busy
, &bh
->state
);
2077 } else if (mod_data
.transport_type
== USB_PR_CB
) {
2079 /* Control-Bulk transport has no status phase! */
2082 } else { // USB_PR_CBI
2083 struct interrupt_data
*buf
= bh
->buf
;
2085 /* Store and send the Interrupt data. UFI sends the ASC
2086 * and ASCQ bytes. Everything else sends a Type (which
2087 * is always 0) and the status Value. */
2088 if (mod_data
.protocol_type
== USB_SC_UFI
) {
2089 buf
->bType
= ASC(sd
);
2090 buf
->bValue
= ASCQ(sd
);
2093 buf
->bValue
= status
;
2095 fsg
->intreq
->length
= CBI_INTERRUPT_DATA_LEN
;
2097 fsg
->intr_buffhd
= bh
; // Point to the right buffhd
2098 fsg
->intreq
->buf
= bh
->inreq
->buf
;
2099 fsg
->intreq
->context
= bh
;
2100 start_transfer(fsg
, fsg
->intr_in
, fsg
->intreq
,
2101 &fsg
->intreq_busy
, &bh
->state
);
2104 fsg
->next_buffhd_to_fill
= bh
->next
;
2109 /*-------------------------------------------------------------------------*/
2111 /* Check whether the command is properly formed and whether its data size
2112 * and direction agree with the values we already have. */
2113 static int check_command(struct fsg_dev
*fsg
, int cmnd_size
,
2114 enum data_direction data_dir
, unsigned int mask
,
2115 int needs_medium
, const char *name
)
2118 int lun
= fsg
->cmnd
[1] >> 5;
2119 static const char dirletter
[4] = {'u', 'o', 'i', 'n'};
2121 struct fsg_lun
*curlun
;
2123 /* Adjust the expected cmnd_size for protocol encapsulation padding.
2124 * Transparent SCSI doesn't pad. */
2125 if (protocol_is_scsi())
2128 /* There's some disagreement as to whether RBC pads commands or not.
2129 * We'll play it safe and accept either form. */
2130 else if (mod_data
.protocol_type
== USB_SC_RBC
) {
2131 if (fsg
->cmnd_size
== 12)
2134 /* All the other protocols pad to 12 bytes */
2139 if (fsg
->data_dir
!= DATA_DIR_UNKNOWN
)
2140 sprintf(hdlen
, ", H%c=%u", dirletter
[(int) fsg
->data_dir
],
2142 VDBG(fsg
, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
2143 name
, cmnd_size
, dirletter
[(int) data_dir
],
2144 fsg
->data_size_from_cmnd
, fsg
->cmnd_size
, hdlen
);
2146 /* We can't reply at all until we know the correct data direction
2148 if (fsg
->data_size_from_cmnd
== 0)
2149 data_dir
= DATA_DIR_NONE
;
2150 if (fsg
->data_dir
== DATA_DIR_UNKNOWN
) { // CB or CBI
2151 fsg
->data_dir
= data_dir
;
2152 fsg
->data_size
= fsg
->data_size_from_cmnd
;
2154 } else { // Bulk-only
2155 if (fsg
->data_size
< fsg
->data_size_from_cmnd
) {
2157 /* Host data size < Device data size is a phase error.
2158 * Carry out the command, but only transfer as much
2159 * as we are allowed. */
2160 fsg
->data_size_from_cmnd
= fsg
->data_size
;
2161 fsg
->phase_error
= 1;
2164 fsg
->residue
= fsg
->usb_amount_left
= fsg
->data_size
;
2166 /* Conflicting data directions is a phase error */
2167 if (fsg
->data_dir
!= data_dir
&& fsg
->data_size_from_cmnd
> 0) {
2168 fsg
->phase_error
= 1;
2172 /* Verify the length of the command itself */
2173 if (cmnd_size
!= fsg
->cmnd_size
) {
2175 if (cmnd_size
<= fsg
->cmnd_size
) {
2176 DBG(fsg
, "%s is buggy! Expected length %d "
2177 "but we got %d\n", name
,
2178 cmnd_size
, fsg
->cmnd_size
);
2179 cmnd_size
= fsg
->cmnd_size
;
2181 fsg
->phase_error
= 1;
2186 /* Check that the LUN values are consistent */
2187 if (transport_is_bbb()) {
2188 if (fsg
->lun
!= lun
)
2189 DBG(fsg
, "using LUN %d from CBW, "
2190 "not LUN %d from CDB\n",
2193 fsg
->lun
= lun
; // Use LUN from the command
2196 if (fsg
->lun
>= 0 && fsg
->lun
< fsg
->nluns
) {
2197 fsg
->curlun
= curlun
= &fsg
->luns
[fsg
->lun
];
2198 if (fsg
->cmnd
[0] != SC_REQUEST_SENSE
) {
2199 curlun
->sense_data
= SS_NO_SENSE
;
2200 curlun
->sense_data_info
= 0;
2201 curlun
->info_valid
= 0;
2204 fsg
->curlun
= curlun
= NULL
;
2205 fsg
->bad_lun_okay
= 0;
2207 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
2208 * to use unsupported LUNs; all others may not. */
2209 if (fsg
->cmnd
[0] != SC_INQUIRY
&&
2210 fsg
->cmnd
[0] != SC_REQUEST_SENSE
) {
2211 DBG(fsg
, "unsupported LUN %d\n", fsg
->lun
);
2216 /* If a unit attention condition exists, only INQUIRY and
2217 * REQUEST SENSE commands are allowed; anything else must fail. */
2218 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
&&
2219 fsg
->cmnd
[0] != SC_INQUIRY
&&
2220 fsg
->cmnd
[0] != SC_REQUEST_SENSE
) {
2221 curlun
->sense_data
= curlun
->unit_attention_data
;
2222 curlun
->unit_attention_data
= SS_NO_SENSE
;
2226 /* Check that only command bytes listed in the mask are non-zero */
2227 fsg
->cmnd
[1] &= 0x1f; // Mask away the LUN
2228 for (i
= 1; i
< cmnd_size
; ++i
) {
2229 if (fsg
->cmnd
[i
] && !(mask
& (1 << i
))) {
2231 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
2236 /* If the medium isn't mounted and the command needs to access
2237 * it, return an error. */
2238 if (curlun
&& !fsg_lun_is_open(curlun
) && needs_medium
) {
2239 curlun
->sense_data
= SS_MEDIUM_NOT_PRESENT
;
2247 static int do_scsi_command(struct fsg_dev
*fsg
)
2249 struct fsg_buffhd
*bh
;
2251 int reply
= -EINVAL
;
2253 static char unknown
[16];
2257 /* Wait for the next buffer to become available for data or status */
2258 bh
= fsg
->next_buffhd_to_drain
= fsg
->next_buffhd_to_fill
;
2259 while (bh
->state
!= BUF_STATE_EMPTY
) {
2260 rc
= sleep_thread(fsg
);
2264 fsg
->phase_error
= 0;
2265 fsg
->short_packet_received
= 0;
2267 down_read(&fsg
->filesem
); // We're using the backing file
2268 switch (fsg
->cmnd
[0]) {
2271 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2272 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2275 reply
= do_inquiry(fsg
, bh
);
2278 case SC_MODE_SELECT_6
:
2279 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2280 if ((reply
= check_command(fsg
, 6, DATA_DIR_FROM_HOST
,
2282 "MODE SELECT(6)")) == 0)
2283 reply
= do_mode_select(fsg
, bh
);
2286 case SC_MODE_SELECT_10
:
2287 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2288 if ((reply
= check_command(fsg
, 10, DATA_DIR_FROM_HOST
,
2290 "MODE SELECT(10)")) == 0)
2291 reply
= do_mode_select(fsg
, bh
);
2294 case SC_MODE_SENSE_6
:
2295 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2296 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2297 (1<<1) | (1<<2) | (1<<4), 0,
2298 "MODE SENSE(6)")) == 0)
2299 reply
= do_mode_sense(fsg
, bh
);
2302 case SC_MODE_SENSE_10
:
2303 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2304 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2305 (1<<1) | (1<<2) | (3<<7), 0,
2306 "MODE SENSE(10)")) == 0)
2307 reply
= do_mode_sense(fsg
, bh
);
2310 case SC_PREVENT_ALLOW_MEDIUM_REMOVAL
:
2311 fsg
->data_size_from_cmnd
= 0;
2312 if ((reply
= check_command(fsg
, 6, DATA_DIR_NONE
,
2314 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
2315 reply
= do_prevent_allow(fsg
);
2320 fsg
->data_size_from_cmnd
= (i
== 0 ? 256 : i
) << 9;
2321 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2324 reply
= do_read(fsg
);
2328 fsg
->data_size_from_cmnd
=
2329 get_unaligned_be16(&fsg
->cmnd
[7]) << 9;
2330 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2331 (1<<1) | (0xf<<2) | (3<<7), 1,
2333 reply
= do_read(fsg
);
2337 fsg
->data_size_from_cmnd
=
2338 get_unaligned_be32(&fsg
->cmnd
[6]) << 9;
2339 if ((reply
= check_command(fsg
, 12, DATA_DIR_TO_HOST
,
2340 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2342 reply
= do_read(fsg
);
2345 case SC_READ_CAPACITY
:
2346 fsg
->data_size_from_cmnd
= 8;
2347 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2348 (0xf<<2) | (1<<8), 1,
2349 "READ CAPACITY")) == 0)
2350 reply
= do_read_capacity(fsg
, bh
);
2353 case SC_READ_HEADER
:
2354 if (!mod_data
.cdrom
)
2356 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2357 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2358 (3<<7) | (0x1f<<1), 1,
2359 "READ HEADER")) == 0)
2360 reply
= do_read_header(fsg
, bh
);
2364 if (!mod_data
.cdrom
)
2366 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2367 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2370 reply
= do_read_toc(fsg
, bh
);
2373 case SC_READ_FORMAT_CAPACITIES
:
2374 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2375 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2377 "READ FORMAT CAPACITIES")) == 0)
2378 reply
= do_read_format_capacities(fsg
, bh
);
2381 case SC_REQUEST_SENSE
:
2382 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2383 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2385 "REQUEST SENSE")) == 0)
2386 reply
= do_request_sense(fsg
, bh
);
2389 case SC_START_STOP_UNIT
:
2390 fsg
->data_size_from_cmnd
= 0;
2391 if ((reply
= check_command(fsg
, 6, DATA_DIR_NONE
,
2393 "START-STOP UNIT")) == 0)
2394 reply
= do_start_stop(fsg
);
2397 case SC_SYNCHRONIZE_CACHE
:
2398 fsg
->data_size_from_cmnd
= 0;
2399 if ((reply
= check_command(fsg
, 10, DATA_DIR_NONE
,
2400 (0xf<<2) | (3<<7), 1,
2401 "SYNCHRONIZE CACHE")) == 0)
2402 reply
= do_synchronize_cache(fsg
);
2405 case SC_TEST_UNIT_READY
:
2406 fsg
->data_size_from_cmnd
= 0;
2407 reply
= check_command(fsg
, 6, DATA_DIR_NONE
,
2412 /* Although optional, this command is used by MS-Windows. We
2413 * support a minimal version: BytChk must be 0. */
2415 fsg
->data_size_from_cmnd
= 0;
2416 if ((reply
= check_command(fsg
, 10, DATA_DIR_NONE
,
2417 (1<<1) | (0xf<<2) | (3<<7), 1,
2419 reply
= do_verify(fsg
);
2424 fsg
->data_size_from_cmnd
= (i
== 0 ? 256 : i
) << 9;
2425 if ((reply
= check_command(fsg
, 6, DATA_DIR_FROM_HOST
,
2428 reply
= do_write(fsg
);
2432 fsg
->data_size_from_cmnd
=
2433 get_unaligned_be16(&fsg
->cmnd
[7]) << 9;
2434 if ((reply
= check_command(fsg
, 10, DATA_DIR_FROM_HOST
,
2435 (1<<1) | (0xf<<2) | (3<<7), 1,
2437 reply
= do_write(fsg
);
2441 fsg
->data_size_from_cmnd
=
2442 get_unaligned_be32(&fsg
->cmnd
[6]) << 9;
2443 if ((reply
= check_command(fsg
, 12, DATA_DIR_FROM_HOST
,
2444 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2446 reply
= do_write(fsg
);
2449 /* Some mandatory commands that we recognize but don't implement.
2450 * They don't mean much in this setting. It's left as an exercise
2451 * for anyone interested to implement RESERVE and RELEASE in terms
2452 * of Posix locks. */
2453 case SC_FORMAT_UNIT
:
2456 case SC_SEND_DIAGNOSTIC
:
2461 fsg
->data_size_from_cmnd
= 0;
2462 sprintf(unknown
, "Unknown x%02x", fsg
->cmnd
[0]);
2463 if ((reply
= check_command(fsg
, fsg
->cmnd_size
,
2464 DATA_DIR_UNKNOWN
, 0xff, 0, unknown
)) == 0) {
2465 fsg
->curlun
->sense_data
= SS_INVALID_COMMAND
;
2470 up_read(&fsg
->filesem
);
2472 if (reply
== -EINTR
|| signal_pending(current
))
2475 /* Set up the single reply buffer for finish_reply() */
2476 if (reply
== -EINVAL
)
2477 reply
= 0; // Error reply length
2478 if (reply
>= 0 && fsg
->data_dir
== DATA_DIR_TO_HOST
) {
2479 reply
= min((u32
) reply
, fsg
->data_size_from_cmnd
);
2480 bh
->inreq
->length
= reply
;
2481 bh
->state
= BUF_STATE_FULL
;
2482 fsg
->residue
-= reply
;
2483 } // Otherwise it's already set
2489 /*-------------------------------------------------------------------------*/
2491 static int received_cbw(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
2493 struct usb_request
*req
= bh
->outreq
;
2494 struct fsg_bulk_cb_wrap
*cbw
= req
->buf
;
2496 /* Was this a real packet? Should it be ignored? */
2497 if (req
->status
|| test_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
))
2500 /* Is the CBW valid? */
2501 if (req
->actual
!= USB_BULK_CB_WRAP_LEN
||
2502 cbw
->Signature
!= cpu_to_le32(
2504 DBG(fsg
, "invalid CBW: len %u sig 0x%x\n",
2506 le32_to_cpu(cbw
->Signature
));
2508 /* The Bulk-only spec says we MUST stall the IN endpoint
2509 * (6.6.1), so it's unavoidable. It also says we must
2510 * retain this state until the next reset, but there's
2511 * no way to tell the controller driver it should ignore
2512 * Clear-Feature(HALT) requests.
2514 * We aren't required to halt the OUT endpoint; instead
2515 * we can simply accept and discard any data received
2516 * until the next reset. */
2517 wedge_bulk_in_endpoint(fsg
);
2518 set_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2522 /* Is the CBW meaningful? */
2523 if (cbw
->Lun
>= FSG_MAX_LUNS
|| cbw
->Flags
& ~USB_BULK_IN_FLAG
||
2524 cbw
->Length
<= 0 || cbw
->Length
> MAX_COMMAND_SIZE
) {
2525 DBG(fsg
, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2527 cbw
->Lun
, cbw
->Flags
, cbw
->Length
);
2529 /* We can do anything we want here, so let's stall the
2530 * bulk pipes if we are allowed to. */
2531 if (mod_data
.can_stall
) {
2532 fsg_set_halt(fsg
, fsg
->bulk_out
);
2533 halt_bulk_in_endpoint(fsg
);
2538 /* Save the command for later */
2539 fsg
->cmnd_size
= cbw
->Length
;
2540 memcpy(fsg
->cmnd
, cbw
->CDB
, fsg
->cmnd_size
);
2541 if (cbw
->Flags
& USB_BULK_IN_FLAG
)
2542 fsg
->data_dir
= DATA_DIR_TO_HOST
;
2544 fsg
->data_dir
= DATA_DIR_FROM_HOST
;
2545 fsg
->data_size
= le32_to_cpu(cbw
->DataTransferLength
);
2546 if (fsg
->data_size
== 0)
2547 fsg
->data_dir
= DATA_DIR_NONE
;
2548 fsg
->lun
= cbw
->Lun
;
2549 fsg
->tag
= cbw
->Tag
;
2554 static int get_next_command(struct fsg_dev
*fsg
)
2556 struct fsg_buffhd
*bh
;
2559 if (transport_is_bbb()) {
2561 /* Wait for the next buffer to become available */
2562 bh
= fsg
->next_buffhd_to_fill
;
2563 while (bh
->state
!= BUF_STATE_EMPTY
) {
2564 rc
= sleep_thread(fsg
);
2569 /* Queue a request to read a Bulk-only CBW */
2570 set_bulk_out_req_length(fsg
, bh
, USB_BULK_CB_WRAP_LEN
);
2571 bh
->outreq
->short_not_ok
= 1;
2572 start_transfer(fsg
, fsg
->bulk_out
, bh
->outreq
,
2573 &bh
->outreq_busy
, &bh
->state
);
2575 /* We will drain the buffer in software, which means we
2576 * can reuse it for the next filling. No need to advance
2577 * next_buffhd_to_fill. */
2579 /* Wait for the CBW to arrive */
2580 while (bh
->state
!= BUF_STATE_FULL
) {
2581 rc
= sleep_thread(fsg
);
2586 rc
= received_cbw(fsg
, bh
);
2587 bh
->state
= BUF_STATE_EMPTY
;
2589 } else { // USB_PR_CB or USB_PR_CBI
2591 /* Wait for the next command to arrive */
2592 while (fsg
->cbbuf_cmnd_size
== 0) {
2593 rc
= sleep_thread(fsg
);
2598 /* Is the previous status interrupt request still busy?
2599 * The host is allowed to skip reading the status,
2600 * so we must cancel it. */
2601 if (fsg
->intreq_busy
)
2602 usb_ep_dequeue(fsg
->intr_in
, fsg
->intreq
);
2604 /* Copy the command and mark the buffer empty */
2605 fsg
->data_dir
= DATA_DIR_UNKNOWN
;
2606 spin_lock_irq(&fsg
->lock
);
2607 fsg
->cmnd_size
= fsg
->cbbuf_cmnd_size
;
2608 memcpy(fsg
->cmnd
, fsg
->cbbuf_cmnd
, fsg
->cmnd_size
);
2609 fsg
->cbbuf_cmnd_size
= 0;
2610 spin_unlock_irq(&fsg
->lock
);
2616 /*-------------------------------------------------------------------------*/
2618 static int enable_endpoint(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
2619 const struct usb_endpoint_descriptor
*d
)
2623 ep
->driver_data
= fsg
;
2624 rc
= usb_ep_enable(ep
, d
);
2626 ERROR(fsg
, "can't enable %s, result %d\n", ep
->name
, rc
);
2630 static int alloc_request(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
2631 struct usb_request
**preq
)
2633 *preq
= usb_ep_alloc_request(ep
, GFP_ATOMIC
);
2636 ERROR(fsg
, "can't allocate request for %s\n", ep
->name
);
2641 * Reset interface setting and re-init endpoint state (toggle etc).
2642 * Call with altsetting < 0 to disable the interface. The only other
2643 * available altsetting is 0, which enables the interface.
2645 static int do_set_interface(struct fsg_dev
*fsg
, int altsetting
)
2649 const struct usb_endpoint_descriptor
*d
;
2652 DBG(fsg
, "reset interface\n");
2655 /* Deallocate the requests */
2656 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2657 struct fsg_buffhd
*bh
= &fsg
->buffhds
[i
];
2660 usb_ep_free_request(fsg
->bulk_in
, bh
->inreq
);
2664 usb_ep_free_request(fsg
->bulk_out
, bh
->outreq
);
2669 usb_ep_free_request(fsg
->intr_in
, fsg
->intreq
);
2673 /* Disable the endpoints */
2674 if (fsg
->bulk_in_enabled
) {
2675 usb_ep_disable(fsg
->bulk_in
);
2676 fsg
->bulk_in_enabled
= 0;
2678 if (fsg
->bulk_out_enabled
) {
2679 usb_ep_disable(fsg
->bulk_out
);
2680 fsg
->bulk_out_enabled
= 0;
2682 if (fsg
->intr_in_enabled
) {
2683 usb_ep_disable(fsg
->intr_in
);
2684 fsg
->intr_in_enabled
= 0;
2688 if (altsetting
< 0 || rc
!= 0)
2691 DBG(fsg
, "set interface %d\n", altsetting
);
2693 /* Enable the endpoints */
2694 d
= fsg_ep_desc(fsg
->gadget
,
2695 &fsg_fs_bulk_in_desc
, &fsg_hs_bulk_in_desc
);
2696 if ((rc
= enable_endpoint(fsg
, fsg
->bulk_in
, d
)) != 0)
2698 fsg
->bulk_in_enabled
= 1;
2700 d
= fsg_ep_desc(fsg
->gadget
,
2701 &fsg_fs_bulk_out_desc
, &fsg_hs_bulk_out_desc
);
2702 if ((rc
= enable_endpoint(fsg
, fsg
->bulk_out
, d
)) != 0)
2704 fsg
->bulk_out_enabled
= 1;
2705 fsg
->bulk_out_maxpacket
= le16_to_cpu(d
->wMaxPacketSize
);
2706 clear_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2708 if (transport_is_cbi()) {
2709 d
= fsg_ep_desc(fsg
->gadget
,
2710 &fsg_fs_intr_in_desc
, &fsg_hs_intr_in_desc
);
2711 if ((rc
= enable_endpoint(fsg
, fsg
->intr_in
, d
)) != 0)
2713 fsg
->intr_in_enabled
= 1;
2716 /* Allocate the requests */
2717 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2718 struct fsg_buffhd
*bh
= &fsg
->buffhds
[i
];
2720 if ((rc
= alloc_request(fsg
, fsg
->bulk_in
, &bh
->inreq
)) != 0)
2722 if ((rc
= alloc_request(fsg
, fsg
->bulk_out
, &bh
->outreq
)) != 0)
2724 bh
->inreq
->buf
= bh
->outreq
->buf
= bh
->buf
;
2725 bh
->inreq
->context
= bh
->outreq
->context
= bh
;
2726 bh
->inreq
->complete
= bulk_in_complete
;
2727 bh
->outreq
->complete
= bulk_out_complete
;
2729 if (transport_is_cbi()) {
2730 if ((rc
= alloc_request(fsg
, fsg
->intr_in
, &fsg
->intreq
)) != 0)
2732 fsg
->intreq
->complete
= intr_in_complete
;
2736 for (i
= 0; i
< fsg
->nluns
; ++i
)
2737 fsg
->luns
[i
].unit_attention_data
= SS_RESET_OCCURRED
;
2743 * Change our operational configuration. This code must agree with the code
2744 * that returns config descriptors, and with interface altsetting code.
2746 * It's also responsible for power management interactions. Some
2747 * configurations might not work with our current power sources.
2748 * For now we just assume the gadget is always self-powered.
2750 static int do_set_config(struct fsg_dev
*fsg
, u8 new_config
)
2754 /* Disable the single interface */
2755 if (fsg
->config
!= 0) {
2756 DBG(fsg
, "reset config\n");
2758 rc
= do_set_interface(fsg
, -1);
2761 /* Enable the interface */
2762 if (new_config
!= 0) {
2763 fsg
->config
= new_config
;
2764 if ((rc
= do_set_interface(fsg
, 0)) != 0)
2765 fsg
->config
= 0; // Reset on errors
2769 switch (fsg
->gadget
->speed
) {
2770 case USB_SPEED_LOW
: speed
= "low"; break;
2771 case USB_SPEED_FULL
: speed
= "full"; break;
2772 case USB_SPEED_HIGH
: speed
= "high"; break;
2773 default: speed
= "?"; break;
2775 INFO(fsg
, "%s speed config #%d\n", speed
, fsg
->config
);
2782 /*-------------------------------------------------------------------------*/
2784 static void handle_exception(struct fsg_dev
*fsg
)
2790 struct fsg_buffhd
*bh
;
2791 enum fsg_state old_state
;
2793 struct fsg_lun
*curlun
;
2794 unsigned int exception_req_tag
;
2797 /* Clear the existing signals. Anything but SIGUSR1 is converted
2798 * into a high-priority EXIT exception. */
2800 sig
= dequeue_signal_lock(current
, ¤t
->blocked
, &info
);
2803 if (sig
!= SIGUSR1
) {
2804 if (fsg
->state
< FSG_STATE_EXIT
)
2805 DBG(fsg
, "Main thread exiting on signal\n");
2806 raise_exception(fsg
, FSG_STATE_EXIT
);
2810 /* Cancel all the pending transfers */
2811 if (fsg
->intreq_busy
)
2812 usb_ep_dequeue(fsg
->intr_in
, fsg
->intreq
);
2813 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2814 bh
= &fsg
->buffhds
[i
];
2816 usb_ep_dequeue(fsg
->bulk_in
, bh
->inreq
);
2817 if (bh
->outreq_busy
)
2818 usb_ep_dequeue(fsg
->bulk_out
, bh
->outreq
);
2821 /* Wait until everything is idle */
2823 num_active
= fsg
->intreq_busy
;
2824 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2825 bh
= &fsg
->buffhds
[i
];
2826 num_active
+= bh
->inreq_busy
+ bh
->outreq_busy
;
2828 if (num_active
== 0)
2830 if (sleep_thread(fsg
))
2834 /* Clear out the controller's fifos */
2835 if (fsg
->bulk_in_enabled
)
2836 usb_ep_fifo_flush(fsg
->bulk_in
);
2837 if (fsg
->bulk_out_enabled
)
2838 usb_ep_fifo_flush(fsg
->bulk_out
);
2839 if (fsg
->intr_in_enabled
)
2840 usb_ep_fifo_flush(fsg
->intr_in
);
2842 /* Reset the I/O buffer states and pointers, the SCSI
2843 * state, and the exception. Then invoke the handler. */
2844 spin_lock_irq(&fsg
->lock
);
2846 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2847 bh
= &fsg
->buffhds
[i
];
2848 bh
->state
= BUF_STATE_EMPTY
;
2850 fsg
->next_buffhd_to_fill
= fsg
->next_buffhd_to_drain
=
2853 exception_req_tag
= fsg
->exception_req_tag
;
2854 new_config
= fsg
->new_config
;
2855 old_state
= fsg
->state
;
2857 if (old_state
== FSG_STATE_ABORT_BULK_OUT
)
2858 fsg
->state
= FSG_STATE_STATUS_PHASE
;
2860 for (i
= 0; i
< fsg
->nluns
; ++i
) {
2861 curlun
= &fsg
->luns
[i
];
2862 curlun
->prevent_medium_removal
= 0;
2863 curlun
->sense_data
= curlun
->unit_attention_data
=
2865 curlun
->sense_data_info
= 0;
2866 curlun
->info_valid
= 0;
2868 fsg
->state
= FSG_STATE_IDLE
;
2870 spin_unlock_irq(&fsg
->lock
);
2872 /* Carry out any extra actions required for the exception */
2873 switch (old_state
) {
2877 case FSG_STATE_ABORT_BULK_OUT
:
2879 spin_lock_irq(&fsg
->lock
);
2880 if (fsg
->state
== FSG_STATE_STATUS_PHASE
)
2881 fsg
->state
= FSG_STATE_IDLE
;
2882 spin_unlock_irq(&fsg
->lock
);
2885 case FSG_STATE_RESET
:
2886 /* In case we were forced against our will to halt a
2887 * bulk endpoint, clear the halt now. (The SuperH UDC
2888 * requires this.) */
2889 if (test_and_clear_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
))
2890 usb_ep_clear_halt(fsg
->bulk_in
);
2892 if (transport_is_bbb()) {
2893 if (fsg
->ep0_req_tag
== exception_req_tag
)
2894 ep0_queue(fsg
); // Complete the status stage
2896 } else if (transport_is_cbi())
2897 send_status(fsg
); // Status by interrupt pipe
2899 /* Technically this should go here, but it would only be
2900 * a waste of time. Ditto for the INTERFACE_CHANGE and
2901 * CONFIG_CHANGE cases. */
2902 // for (i = 0; i < fsg->nluns; ++i)
2903 // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2906 case FSG_STATE_INTERFACE_CHANGE
:
2907 rc
= do_set_interface(fsg
, 0);
2908 if (fsg
->ep0_req_tag
!= exception_req_tag
)
2910 if (rc
!= 0) // STALL on errors
2911 fsg_set_halt(fsg
, fsg
->ep0
);
2912 else // Complete the status stage
2916 case FSG_STATE_CONFIG_CHANGE
:
2917 rc
= do_set_config(fsg
, new_config
);
2918 if (fsg
->ep0_req_tag
!= exception_req_tag
)
2920 if (rc
!= 0) // STALL on errors
2921 fsg_set_halt(fsg
, fsg
->ep0
);
2922 else // Complete the status stage
2926 case FSG_STATE_DISCONNECT
:
2927 for (i
= 0; i
< fsg
->nluns
; ++i
)
2928 fsg_lun_fsync_sub(fsg
->luns
+ i
);
2929 do_set_config(fsg
, 0); // Unconfigured state
2932 case FSG_STATE_EXIT
:
2933 case FSG_STATE_TERMINATED
:
2934 do_set_config(fsg
, 0); // Free resources
2935 spin_lock_irq(&fsg
->lock
);
2936 fsg
->state
= FSG_STATE_TERMINATED
; // Stop the thread
2937 spin_unlock_irq(&fsg
->lock
);
2943 /*-------------------------------------------------------------------------*/
2945 static int fsg_main_thread(void *fsg_
)
2947 struct fsg_dev
*fsg
= fsg_
;
2949 /* Allow the thread to be killed by a signal, but set the signal mask
2950 * to block everything but INT, TERM, KILL, and USR1. */
2951 allow_signal(SIGINT
);
2952 allow_signal(SIGTERM
);
2953 allow_signal(SIGKILL
);
2954 allow_signal(SIGUSR1
);
2956 /* Allow the thread to be frozen */
2959 /* Arrange for userspace references to be interpreted as kernel
2960 * pointers. That way we can pass a kernel pointer to a routine
2961 * that expects a __user pointer and it will work okay. */
2965 while (fsg
->state
!= FSG_STATE_TERMINATED
) {
2966 if (exception_in_progress(fsg
) || signal_pending(current
)) {
2967 handle_exception(fsg
);
2971 if (!fsg
->running
) {
2976 if (get_next_command(fsg
))
2979 spin_lock_irq(&fsg
->lock
);
2980 if (!exception_in_progress(fsg
))
2981 fsg
->state
= FSG_STATE_DATA_PHASE
;
2982 spin_unlock_irq(&fsg
->lock
);
2984 if (do_scsi_command(fsg
) || finish_reply(fsg
))
2987 spin_lock_irq(&fsg
->lock
);
2988 if (!exception_in_progress(fsg
))
2989 fsg
->state
= FSG_STATE_STATUS_PHASE
;
2990 spin_unlock_irq(&fsg
->lock
);
2992 if (send_status(fsg
))
2995 spin_lock_irq(&fsg
->lock
);
2996 if (!exception_in_progress(fsg
))
2997 fsg
->state
= FSG_STATE_IDLE
;
2998 spin_unlock_irq(&fsg
->lock
);
3001 spin_lock_irq(&fsg
->lock
);
3002 fsg
->thread_task
= NULL
;
3003 spin_unlock_irq(&fsg
->lock
);
3005 /* If we are exiting because of a signal, unregister the
3007 if (test_and_clear_bit(REGISTERED
, &fsg
->atomic_bitflags
))
3008 usb_gadget_unregister_driver(&fsg_driver
);
3010 /* Let the unbind and cleanup routines know the thread has exited */
3011 complete_and_exit(&fsg
->thread_notifier
, 0);
3015 /*-------------------------------------------------------------------------*/
3018 /* The write permissions and store_xxx pointers are set in fsg_bind() */
3019 static DEVICE_ATTR(ro
, 0444, fsg_show_ro
, NULL
);
3020 static DEVICE_ATTR(nofua
, 0644, fsg_show_nofua
, NULL
);
3021 static DEVICE_ATTR(file
, 0444, fsg_show_file
, NULL
);
3024 /*-------------------------------------------------------------------------*/
3026 static void fsg_release(struct kref
*ref
)
3028 struct fsg_dev
*fsg
= container_of(ref
, struct fsg_dev
, ref
);
3034 static void lun_release(struct device
*dev
)
3036 struct rw_semaphore
*filesem
= dev_get_drvdata(dev
);
3037 struct fsg_dev
*fsg
=
3038 container_of(filesem
, struct fsg_dev
, filesem
);
3040 kref_put(&fsg
->ref
, fsg_release
);
3043 static void /* __init_or_exit */ fsg_unbind(struct usb_gadget
*gadget
)
3045 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
3047 struct fsg_lun
*curlun
;
3048 struct usb_request
*req
= fsg
->ep0req
;
3050 DBG(fsg
, "unbind\n");
3051 clear_bit(REGISTERED
, &fsg
->atomic_bitflags
);
3053 /* Unregister the sysfs attribute files and the LUNs */
3054 for (i
= 0; i
< fsg
->nluns
; ++i
) {
3055 curlun
= &fsg
->luns
[i
];
3056 if (curlun
->registered
) {
3057 device_remove_file(&curlun
->dev
, &dev_attr_ro
);
3058 device_remove_file(&curlun
->dev
, &dev_attr_file
);
3059 fsg_lun_close(curlun
);
3060 device_unregister(&curlun
->dev
);
3061 curlun
->registered
= 0;
3065 /* If the thread isn't already dead, tell it to exit now */
3066 if (fsg
->state
!= FSG_STATE_TERMINATED
) {
3067 raise_exception(fsg
, FSG_STATE_EXIT
);
3068 wait_for_completion(&fsg
->thread_notifier
);
3070 /* The cleanup routine waits for this completion also */
3071 complete(&fsg
->thread_notifier
);
3074 /* Free the data buffers */
3075 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
)
3076 kfree(fsg
->buffhds
[i
].buf
);
3078 /* Free the request and buffer for endpoint 0 */
3081 usb_ep_free_request(fsg
->ep0
, req
);
3084 set_gadget_data(gadget
, NULL
);
3088 static int __init
check_parameters(struct fsg_dev
*fsg
)
3094 /* Store the default values */
3095 mod_data
.transport_type
= USB_PR_BULK
;
3096 mod_data
.transport_name
= "Bulk-only";
3097 mod_data
.protocol_type
= USB_SC_SCSI
;
3098 mod_data
.protocol_name
= "Transparent SCSI";
3100 /* Some peripheral controllers are known not to be able to
3101 * halt bulk endpoints correctly. If one of them is present,
3104 if (gadget_is_at91(fsg
->gadget
))
3105 mod_data
.can_stall
= 0;
3107 if (mod_data
.release
== 0xffff) { // Parameter wasn't set
3108 gcnum
= usb_gadget_controller_number(fsg
->gadget
);
3110 mod_data
.release
= 0x0300 + gcnum
;
3112 WARNING(fsg
, "controller '%s' not recognized\n",
3114 mod_data
.release
= 0x0399;
3118 prot
= simple_strtol(mod_data
.protocol_parm
, NULL
, 0);
3120 #ifdef CONFIG_USB_FILE_STORAGE_TEST
3121 if (strnicmp(mod_data
.transport_parm
, "BBB", 10) == 0) {
3122 ; // Use default setting
3123 } else if (strnicmp(mod_data
.transport_parm
, "CB", 10) == 0) {
3124 mod_data
.transport_type
= USB_PR_CB
;
3125 mod_data
.transport_name
= "Control-Bulk";
3126 } else if (strnicmp(mod_data
.transport_parm
, "CBI", 10) == 0) {
3127 mod_data
.transport_type
= USB_PR_CBI
;
3128 mod_data
.transport_name
= "Control-Bulk-Interrupt";
3130 ERROR(fsg
, "invalid transport: %s\n", mod_data
.transport_parm
);
3134 if (strnicmp(mod_data
.protocol_parm
, "SCSI", 10) == 0 ||
3135 prot
== USB_SC_SCSI
) {
3136 ; // Use default setting
3137 } else if (strnicmp(mod_data
.protocol_parm
, "RBC", 10) == 0 ||
3138 prot
== USB_SC_RBC
) {
3139 mod_data
.protocol_type
= USB_SC_RBC
;
3140 mod_data
.protocol_name
= "RBC";
3141 } else if (strnicmp(mod_data
.protocol_parm
, "8020", 4) == 0 ||
3142 strnicmp(mod_data
.protocol_parm
, "ATAPI", 10) == 0 ||
3143 prot
== USB_SC_8020
) {
3144 mod_data
.protocol_type
= USB_SC_8020
;
3145 mod_data
.protocol_name
= "8020i (ATAPI)";
3146 } else if (strnicmp(mod_data
.protocol_parm
, "QIC", 3) == 0 ||
3147 prot
== USB_SC_QIC
) {
3148 mod_data
.protocol_type
= USB_SC_QIC
;
3149 mod_data
.protocol_name
= "QIC-157";
3150 } else if (strnicmp(mod_data
.protocol_parm
, "UFI", 10) == 0 ||
3151 prot
== USB_SC_UFI
) {
3152 mod_data
.protocol_type
= USB_SC_UFI
;
3153 mod_data
.protocol_name
= "UFI";
3154 } else if (strnicmp(mod_data
.protocol_parm
, "8070", 4) == 0 ||
3155 prot
== USB_SC_8070
) {
3156 mod_data
.protocol_type
= USB_SC_8070
;
3157 mod_data
.protocol_name
= "8070i";
3159 ERROR(fsg
, "invalid protocol: %s\n", mod_data
.protocol_parm
);
3163 mod_data
.buflen
&= PAGE_CACHE_MASK
;
3164 if (mod_data
.buflen
<= 0) {
3165 ERROR(fsg
, "invalid buflen\n");
3169 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
3171 /* Serial string handling.
3172 * On a real device, the serial string would be loaded
3173 * from permanent storage. */
3174 if (mod_data
.serial
) {
3179 * The CB[I] specification limits the serial string to
3180 * 12 uppercase hexadecimal characters.
3181 * BBB need at least 12 uppercase hexadecimal characters,
3182 * with a maximum of 126. */
3183 for (ch
= mod_data
.serial
; *ch
; ++ch
) {
3185 if ((*ch
< '0' || *ch
> '9') &&
3186 (*ch
< 'A' || *ch
> 'F')) { /* not uppercase hex */
3188 "Invalid serial string character: %c; "
3189 "Failing back to default\n",
3195 (mod_data
.transport_type
== USB_PR_BULK
&& len
< 12) ||
3196 (mod_data
.transport_type
!= USB_PR_BULK
&& len
> 12)) {
3198 "Invalid serial string length; "
3199 "Failing back to default\n");
3202 fsg_strings
[FSG_STRING_SERIAL
- 1].s
= mod_data
.serial
;
3205 "Userspace failed to provide serial number; "
3206 "Failing back to default\n");
3208 /* Serial number not specified or invalid, make our own.
3209 * We just encode it from the driver version string,
3210 * 12 characters to comply with both CB[I] and BBB spec.
3211 * Warning : Two devices running the same kernel will have
3212 * the same fallback serial number. */
3213 for (i
= 0; i
< 12; i
+= 2) {
3214 unsigned char c
= DRIVER_VERSION
[i
/ 2];
3218 sprintf(&fsg_string_serial
[i
], "%02X", c
);
3226 static int __ref
fsg_bind(struct usb_gadget
*gadget
)
3228 struct fsg_dev
*fsg
= the_fsg
;
3231 struct fsg_lun
*curlun
;
3233 struct usb_request
*req
;
3236 fsg
->gadget
= gadget
;
3237 set_gadget_data(gadget
, fsg
);
3238 fsg
->ep0
= gadget
->ep0
;
3239 fsg
->ep0
->driver_data
= fsg
;
3241 if ((rc
= check_parameters(fsg
)) != 0)
3244 if (mod_data
.removable
) { // Enable the store_xxx attributes
3245 dev_attr_file
.attr
.mode
= 0644;
3246 dev_attr_file
.store
= fsg_store_file
;
3247 if (!mod_data
.cdrom
) {
3248 dev_attr_ro
.attr
.mode
= 0644;
3249 dev_attr_ro
.store
= fsg_store_ro
;
3253 /* Only for removable media? */
3254 dev_attr_nofua
.attr
.mode
= 0644;
3255 dev_attr_nofua
.store
= fsg_store_nofua
;
3257 /* Find out how many LUNs there should be */
3260 i
= max(mod_data
.num_filenames
, 1u);
3261 if (i
> FSG_MAX_LUNS
) {
3262 ERROR(fsg
, "invalid number of LUNs: %d\n", i
);
3267 /* Create the LUNs, open their backing files, and register the
3268 * LUN devices in sysfs. */
3269 fsg
->luns
= kzalloc(i
* sizeof(struct fsg_lun
), GFP_KERNEL
);
3276 for (i
= 0; i
< fsg
->nluns
; ++i
) {
3277 curlun
= &fsg
->luns
[i
];
3278 curlun
->cdrom
= !!mod_data
.cdrom
;
3279 curlun
->ro
= mod_data
.cdrom
|| mod_data
.ro
[i
];
3280 curlun
->initially_ro
= curlun
->ro
;
3281 curlun
->removable
= mod_data
.removable
;
3282 curlun
->nofua
= mod_data
.nofua
[i
];
3283 curlun
->dev
.release
= lun_release
;
3284 curlun
->dev
.parent
= &gadget
->dev
;
3285 curlun
->dev
.driver
= &fsg_driver
.driver
;
3286 dev_set_drvdata(&curlun
->dev
, &fsg
->filesem
);
3287 dev_set_name(&curlun
->dev
,"%s-lun%d",
3288 dev_name(&gadget
->dev
), i
);
3290 if ((rc
= device_register(&curlun
->dev
)) != 0) {
3291 INFO(fsg
, "failed to register LUN%d: %d\n", i
, rc
);
3294 if ((rc
= device_create_file(&curlun
->dev
,
3295 &dev_attr_ro
)) != 0 ||
3296 (rc
= device_create_file(&curlun
->dev
,
3297 &dev_attr_nofua
)) != 0 ||
3298 (rc
= device_create_file(&curlun
->dev
,
3299 &dev_attr_file
)) != 0) {
3300 device_unregister(&curlun
->dev
);
3303 curlun
->registered
= 1;
3304 kref_get(&fsg
->ref
);
3306 if (mod_data
.file
[i
] && *mod_data
.file
[i
]) {
3307 if ((rc
= fsg_lun_open(curlun
,
3308 mod_data
.file
[i
])) != 0)
3310 } else if (!mod_data
.removable
) {
3311 ERROR(fsg
, "no file given for LUN%d\n", i
);
3317 /* Find all the endpoints we will use */
3318 usb_ep_autoconfig_reset(gadget
);
3319 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_in_desc
);
3322 ep
->driver_data
= fsg
; // claim the endpoint
3325 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_out_desc
);
3328 ep
->driver_data
= fsg
; // claim the endpoint
3331 if (transport_is_cbi()) {
3332 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_intr_in_desc
);
3335 ep
->driver_data
= fsg
; // claim the endpoint
3339 /* Fix up the descriptors */
3340 device_desc
.bMaxPacketSize0
= fsg
->ep0
->maxpacket
;
3341 device_desc
.idVendor
= cpu_to_le16(mod_data
.vendor
);
3342 device_desc
.idProduct
= cpu_to_le16(mod_data
.product
);
3343 device_desc
.bcdDevice
= cpu_to_le16(mod_data
.release
);
3345 i
= (transport_is_cbi() ? 3 : 2); // Number of endpoints
3346 fsg_intf_desc
.bNumEndpoints
= i
;
3347 fsg_intf_desc
.bInterfaceSubClass
= mod_data
.protocol_type
;
3348 fsg_intf_desc
.bInterfaceProtocol
= mod_data
.transport_type
;
3349 fsg_fs_function
[i
+ FSG_FS_FUNCTION_PRE_EP_ENTRIES
] = NULL
;
3351 if (gadget_is_dualspeed(gadget
)) {
3352 fsg_hs_function
[i
+ FSG_HS_FUNCTION_PRE_EP_ENTRIES
] = NULL
;
3354 /* Assume ep0 uses the same maxpacket value for both speeds */
3355 dev_qualifier
.bMaxPacketSize0
= fsg
->ep0
->maxpacket
;
3357 /* Assume endpoint addresses are the same for both speeds */
3358 fsg_hs_bulk_in_desc
.bEndpointAddress
=
3359 fsg_fs_bulk_in_desc
.bEndpointAddress
;
3360 fsg_hs_bulk_out_desc
.bEndpointAddress
=
3361 fsg_fs_bulk_out_desc
.bEndpointAddress
;
3362 fsg_hs_intr_in_desc
.bEndpointAddress
=
3363 fsg_fs_intr_in_desc
.bEndpointAddress
;
3366 if (gadget_is_otg(gadget
))
3367 fsg_otg_desc
.bmAttributes
|= USB_OTG_HNP
;
3371 /* Allocate the request and buffer for endpoint 0 */
3372 fsg
->ep0req
= req
= usb_ep_alloc_request(fsg
->ep0
, GFP_KERNEL
);
3375 req
->buf
= kmalloc(EP0_BUFSIZE
, GFP_KERNEL
);
3378 req
->complete
= ep0_complete
;
3380 /* Allocate the data buffers */
3381 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
3382 struct fsg_buffhd
*bh
= &fsg
->buffhds
[i
];
3384 /* Allocate for the bulk-in endpoint. We assume that
3385 * the buffer will also work with the bulk-out (and
3386 * interrupt-in) endpoint. */
3387 bh
->buf
= kmalloc(mod_data
.buflen
, GFP_KERNEL
);
3392 fsg
->buffhds
[FSG_NUM_BUFFERS
- 1].next
= &fsg
->buffhds
[0];
3394 /* This should reflect the actual gadget power source */
3395 usb_gadget_set_selfpowered(gadget
);
3397 snprintf(fsg_string_manufacturer
, sizeof fsg_string_manufacturer
,
3399 init_utsname()->sysname
, init_utsname()->release
,
3402 fsg
->thread_task
= kthread_create(fsg_main_thread
, fsg
,
3403 "file-storage-gadget");
3404 if (IS_ERR(fsg
->thread_task
)) {
3405 rc
= PTR_ERR(fsg
->thread_task
);
3409 INFO(fsg
, DRIVER_DESC
", version: " DRIVER_VERSION
"\n");
3410 INFO(fsg
, "Number of LUNs=%d\n", fsg
->nluns
);
3412 pathbuf
= kmalloc(PATH_MAX
, GFP_KERNEL
);
3413 for (i
= 0; i
< fsg
->nluns
; ++i
) {
3414 curlun
= &fsg
->luns
[i
];
3415 if (fsg_lun_is_open(curlun
)) {
3418 p
= d_path(&curlun
->filp
->f_path
,
3423 LINFO(curlun
, "ro=%d, nofua=%d, file: %s\n",
3424 curlun
->ro
, curlun
->nofua
, (p
? p
: "(error)"));
3429 DBG(fsg
, "transport=%s (x%02x)\n",
3430 mod_data
.transport_name
, mod_data
.transport_type
);
3431 DBG(fsg
, "protocol=%s (x%02x)\n",
3432 mod_data
.protocol_name
, mod_data
.protocol_type
);
3433 DBG(fsg
, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
3434 mod_data
.vendor
, mod_data
.product
, mod_data
.release
);
3435 DBG(fsg
, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n",
3436 mod_data
.removable
, mod_data
.can_stall
,
3437 mod_data
.cdrom
, mod_data
.buflen
);
3438 DBG(fsg
, "I/O thread pid: %d\n", task_pid_nr(fsg
->thread_task
));
3440 set_bit(REGISTERED
, &fsg
->atomic_bitflags
);
3442 /* Tell the thread to start working */
3443 wake_up_process(fsg
->thread_task
);
3447 ERROR(fsg
, "unable to autoconfigure all endpoints\n");
3451 fsg
->state
= FSG_STATE_TERMINATED
; // The thread is dead
3453 complete(&fsg
->thread_notifier
);
3458 /*-------------------------------------------------------------------------*/
3460 static void fsg_suspend(struct usb_gadget
*gadget
)
3462 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
3464 DBG(fsg
, "suspend\n");
3465 set_bit(SUSPENDED
, &fsg
->atomic_bitflags
);
3468 static void fsg_resume(struct usb_gadget
*gadget
)
3470 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
3472 DBG(fsg
, "resume\n");
3473 clear_bit(SUSPENDED
, &fsg
->atomic_bitflags
);
3477 /*-------------------------------------------------------------------------*/
3479 static struct usb_gadget_driver fsg_driver
= {
3480 #ifdef CONFIG_USB_GADGET_DUALSPEED
3481 .speed
= USB_SPEED_HIGH
,
3483 .speed
= USB_SPEED_FULL
,
3485 .function
= (char *) fsg_string_product
,
3487 .unbind
= fsg_unbind
,
3488 .disconnect
= fsg_disconnect
,
3490 .suspend
= fsg_suspend
,
3491 .resume
= fsg_resume
,
3494 .name
= DRIVER_NAME
,
3495 .owner
= THIS_MODULE
,
3503 static int __init
fsg_alloc(void)
3505 struct fsg_dev
*fsg
;
3507 fsg
= kzalloc(sizeof *fsg
, GFP_KERNEL
);
3510 spin_lock_init(&fsg
->lock
);
3511 init_rwsem(&fsg
->filesem
);
3512 kref_init(&fsg
->ref
);
3513 init_completion(&fsg
->thread_notifier
);
3520 static int __init
fsg_init(void)
3523 struct fsg_dev
*fsg
;
3525 if ((rc
= fsg_alloc()) != 0)
3528 if ((rc
= usb_gadget_register_driver(&fsg_driver
)) != 0)
3529 kref_put(&fsg
->ref
, fsg_release
);
3532 module_init(fsg_init
);
3535 static void __exit
fsg_cleanup(void)
3537 struct fsg_dev
*fsg
= the_fsg
;
3539 /* Unregister the driver iff the thread hasn't already done so */
3540 if (test_and_clear_bit(REGISTERED
, &fsg
->atomic_bitflags
))
3541 usb_gadget_unregister_driver(&fsg_driver
);
3543 /* Wait for the thread to finish up */
3544 wait_for_completion(&fsg
->thread_notifier
);
3546 kref_put(&fsg
->ref
, fsg_release
);
3548 module_exit(fsg_cleanup
);