2 * file_storage.c -- File-backed USB Storage Gadget, for USB development
4 * Copyright (C) 2003-2008 Alan Stern
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions, and the following disclaimer,
12 * without modification.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. The names of the above-listed copyright holders may not be used
17 * to endorse or promote products derived from this software without
18 * specific prior written permission.
20 * ALTERNATIVELY, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") as published by the Free Software
22 * Foundation, either version 2 of that License or (at your option) any
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
40 * The File-backed Storage Gadget acts as a USB Mass Storage device,
41 * appearing to the host as a disk drive or as a CD-ROM drive. In addition
42 * to providing an example of a genuinely useful gadget driver for a USB
43 * device, it also illustrates a technique of double-buffering for increased
44 * throughput. Last but not least, it gives an easy way to probe the
45 * behavior of the Mass Storage drivers in a USB host.
47 * Backing storage is provided by a regular file or a block device, specified
48 * by the "file" module parameter. Access can be limited to read-only by
49 * setting the optional "ro" module parameter. (For CD-ROM emulation,
50 * access is always read-only.) The gadget will indicate that it has
51 * removable media if the optional "removable" module parameter is set.
53 * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
54 * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
55 * by the optional "transport" module parameter. It also supports the
56 * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03),
57 * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by
58 * the optional "protocol" module parameter. In addition, the default
59 * Vendor ID, Product ID, release number and serial number can be overridden.
61 * There is support for multiple logical units (LUNs), each of which has
62 * its own backing file. The number of LUNs can be set using the optional
63 * "luns" module parameter (anywhere from 1 to 8), and the corresponding
64 * files are specified using comma-separated lists for "file" and "ro".
65 * The default number of LUNs is taken from the number of "file" elements;
66 * it is 1 if "file" is not given. If "removable" is not set then a backing
67 * file must be specified for each LUN. If it is set, then an unspecified
68 * or empty backing filename means the LUN's medium is not loaded. Ideally
69 * each LUN would be settable independently as a disk drive or a CD-ROM
70 * drive, but currently all LUNs have to be the same type. The CD-ROM
71 * emulation includes a single data track and no audio tracks; hence there
72 * need be only one backing file per LUN. Note also that the CD-ROM block
73 * length is set to 512 rather than the more common value 2048.
75 * Requirements are modest; only a bulk-in and a bulk-out endpoint are
76 * needed (an interrupt-out endpoint is also needed for CBI). The memory
77 * requirement amounts to two 16K buffers, size configurable by a parameter.
78 * Support is included for both full-speed and high-speed operation.
80 * Note that the driver is slightly non-portable in that it assumes a
81 * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
82 * interrupt-in endpoints. With most device controllers this isn't an
83 * issue, but there may be some with hardware restrictions that prevent
84 * a buffer from being used by more than one endpoint.
88 * file=filename[,filename...]
89 * Required if "removable" is not set, names of
90 * the files or block devices used for
92 * serial=HHHH... Required serial number (string of hex chars)
93 * ro=b[,b...] Default false, booleans for read-only access
94 * removable Default false, boolean for removable media
95 * luns=N Default N = number of filenames, number of
97 * nofua=b[,b...] Default false, booleans for ignore FUA flag
98 * in SCSI WRITE(10,12) commands
99 * stall Default determined according to the type of
100 * USB device controller (usually true),
101 * boolean to permit the driver to halt
103 * cdrom Default false, boolean for whether to emulate
105 * transport=XXX Default BBB, transport name (CB, CBI, or BBB)
106 * protocol=YYY Default SCSI, protocol name (RBC, 8020 or
107 * ATAPI, QIC, UFI, 8070, or SCSI;
109 * vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID
110 * product=0xPPPP Default 0xa4a5 (FSG), USB Product ID
111 * release=0xRRRR Override the USB release number (bcdDevice)
112 * buflen=N Default N=16384, buffer size used (will be
113 * rounded down to a multiple of
116 * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "serial", "ro",
117 * "removable", "luns", "nofua", "stall", and "cdrom" options are available;
118 * default values are used for everything else.
120 * The pathnames of the backing files and the ro settings are available in
121 * the attribute files "file", "nofua", and "ro" in the lun<n> subdirectory of
122 * the gadget's sysfs directory. If the "removable" option is set, writing to
123 * these files will simulate ejecting/loading the medium (writing an empty
124 * line means eject) and adjusting a write-enable tab. Changes to the ro
125 * setting are not allowed when the medium is loaded or if CD-ROM emulation
128 * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
129 * The driver's SCSI command interface was based on the "Information
130 * technology - Small Computer System Interface - 2" document from
131 * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at
132 * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. The single exception
133 * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the
134 * "Universal Serial Bus Mass Storage Class UFI Command Specification"
135 * document, Revision 1.0, December 14, 1998, available at
136 * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
143 * The FSG driver is fairly straightforward. There is a main kernel
144 * thread that handles most of the work. Interrupt routines field
145 * callbacks from the controller driver: bulk- and interrupt-request
146 * completion notifications, endpoint-0 events, and disconnect events.
147 * Completion events are passed to the main thread by wakeup calls. Many
148 * ep0 requests are handled at interrupt time, but SetInterface,
149 * SetConfiguration, and device reset requests are forwarded to the
150 * thread in the form of "exceptions" using SIGUSR1 signals (since they
151 * should interrupt any ongoing file I/O operations).
153 * The thread's main routine implements the standard command/data/status
154 * parts of a SCSI interaction. It and its subroutines are full of tests
155 * for pending signals/exceptions -- all this polling is necessary since
156 * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
157 * indication that the driver really wants to be running in userspace.)
158 * An important point is that so long as the thread is alive it keeps an
159 * open reference to the backing file. This will prevent unmounting
160 * the backing file's underlying filesystem and could cause problems
161 * during system shutdown, for example. To prevent such problems, the
162 * thread catches INT, TERM, and KILL signals and converts them into
165 * In normal operation the main thread is started during the gadget's
166 * fsg_bind() callback and stopped during fsg_unbind(). But it can also
167 * exit when it receives a signal, and there's no point leaving the
168 * gadget running when the thread is dead. So just before the thread
169 * exits, it deregisters the gadget driver. This makes things a little
170 * tricky: The driver is deregistered at two places, and the exiting
171 * thread can indirectly call fsg_unbind() which in turn can tell the
172 * thread to exit. The first problem is resolved through the use of the
173 * REGISTERED atomic bitflag; the driver will only be deregistered once.
174 * The second problem is resolved by having fsg_unbind() check
175 * fsg->state; it won't try to stop the thread if the state is already
176 * FSG_STATE_TERMINATED.
178 * To provide maximum throughput, the driver uses a circular pipeline of
179 * buffer heads (struct fsg_buffhd). In principle the pipeline can be
180 * arbitrarily long; in practice the benefits don't justify having more
181 * than 2 stages (i.e., double buffering). But it helps to think of the
182 * pipeline as being a long one. Each buffer head contains a bulk-in and
183 * a bulk-out request pointer (since the buffer can be used for both
184 * output and input -- directions always are given from the host's
185 * point of view) as well as a pointer to the buffer and various state
188 * Use of the pipeline follows a simple protocol. There is a variable
189 * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
190 * At any time that buffer head may still be in use from an earlier
191 * request, so each buffer head has a state variable indicating whether
192 * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
193 * buffer head to be EMPTY, filling the buffer either by file I/O or by
194 * USB I/O (during which the buffer head is BUSY), and marking the buffer
195 * head FULL when the I/O is complete. Then the buffer will be emptied
196 * (again possibly by USB I/O, during which it is marked BUSY) and
197 * finally marked EMPTY again (possibly by a completion routine).
199 * A module parameter tells the driver to avoid stalling the bulk
200 * endpoints wherever the transport specification allows. This is
201 * necessary for some UDCs like the SuperH, which cannot reliably clear a
202 * halt on a bulk endpoint. However, under certain circumstances the
203 * Bulk-only specification requires a stall. In such cases the driver
204 * will halt the endpoint and set a flag indicating that it should clear
205 * the halt in software during the next device reset. Hopefully this
206 * will permit everything to work correctly. Furthermore, although the
207 * specification allows the bulk-out endpoint to halt when the host sends
208 * too much data, implementing this would cause an unavoidable race.
209 * The driver will always use the "no-stall" approach for OUT transfers.
211 * One subtle point concerns sending status-stage responses for ep0
212 * requests. Some of these requests, such as device reset, can involve
213 * interrupting an ongoing file I/O operation, which might take an
214 * arbitrarily long time. During that delay the host might give up on
215 * the original ep0 request and issue a new one. When that happens the
216 * driver should not notify the host about completion of the original
217 * request, as the host will no longer be waiting for it. So the driver
218 * assigns to each ep0 request a unique tag, and it keeps track of the
219 * tag value of the request associated with a long-running exception
220 * (device-reset, interface-change, or configuration-change). When the
221 * exception handler is finished, the status-stage response is submitted
222 * only if the current ep0 request tag is equal to the exception request
223 * tag. Thus only the most recently received ep0 request will get a
224 * status-stage response.
226 * Warning: This driver source file is too long. It ought to be split up
227 * into a header file plus about 3 separate .c files, to handle the details
228 * of the Gadget, USB Mass Storage, and SCSI protocols.
232 /* #define VERBOSE_DEBUG */
233 /* #define DUMP_MSGS */
236 #include <linux/blkdev.h>
237 #include <linux/completion.h>
238 #include <linux/dcache.h>
239 #include <linux/delay.h>
240 #include <linux/device.h>
241 #include <linux/fcntl.h>
242 #include <linux/file.h>
243 #include <linux/fs.h>
244 #include <linux/kref.h>
245 #include <linux/kthread.h>
246 #include <linux/limits.h>
247 #include <linux/rwsem.h>
248 #include <linux/slab.h>
249 #include <linux/spinlock.h>
250 #include <linux/string.h>
251 #include <linux/freezer.h>
252 #include <linux/utsname.h>
254 #include <linux/usb/ch9.h>
255 #include <linux/usb/gadget.h>
257 #include "gadget_chips.h"
262 * Kbuild is not very cooperative with respect to linking separately
263 * compiled library objects into one module. So for now we won't use
264 * separate compilation ... ensuring init/exit sections work to shrink
265 * the runtime footprint, and giving us at least some parts of what
266 * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
268 #include "usbstring.c"
270 #include "epautoconf.c"
272 /*-------------------------------------------------------------------------*/
274 #define DRIVER_DESC "File-backed Storage Gadget"
275 #define DRIVER_NAME "g_file_storage"
276 #define DRIVER_VERSION "1 September 2010"
278 static char fsg_string_manufacturer
[64];
279 static const char fsg_string_product
[] = DRIVER_DESC
;
280 static const char fsg_string_config
[] = "Self-powered";
281 static const char fsg_string_interface
[] = "Mass Storage";
284 #include "storage_common.c"
287 MODULE_DESCRIPTION(DRIVER_DESC
);
288 MODULE_AUTHOR("Alan Stern");
289 MODULE_LICENSE("Dual BSD/GPL");
292 * This driver assumes self-powered hardware and has no way for users to
293 * trigger remote wakeup. It uses autoconfiguration to select endpoints
294 * and endpoint addresses.
298 /*-------------------------------------------------------------------------*/
301 /* Encapsulate the module parameter settings */
304 char *file
[FSG_MAX_LUNS
];
306 int ro
[FSG_MAX_LUNS
];
307 int nofua
[FSG_MAX_LUNS
];
308 unsigned int num_filenames
;
309 unsigned int num_ros
;
310 unsigned int num_nofuas
;
317 char *transport_parm
;
319 unsigned short vendor
;
320 unsigned short product
;
321 unsigned short release
;
325 char *transport_name
;
329 } mod_data
= { // Default values
330 .transport_parm
= "BBB",
331 .protocol_parm
= "SCSI",
335 .vendor
= FSG_VENDOR_ID
,
336 .product
= FSG_PRODUCT_ID
,
337 .release
= 0xffff, // Use controller chip type
342 module_param_array_named(file
, mod_data
.file
, charp
, &mod_data
.num_filenames
,
344 MODULE_PARM_DESC(file
, "names of backing files or devices");
346 module_param_named(serial
, mod_data
.serial
, charp
, S_IRUGO
);
347 MODULE_PARM_DESC(serial
, "USB serial number");
349 module_param_array_named(ro
, mod_data
.ro
, bool, &mod_data
.num_ros
, S_IRUGO
);
350 MODULE_PARM_DESC(ro
, "true to force read-only");
352 module_param_array_named(nofua
, mod_data
.nofua
, bool, &mod_data
.num_nofuas
,
354 MODULE_PARM_DESC(nofua
, "true to ignore SCSI WRITE(10,12) FUA bit");
356 module_param_named(luns
, mod_data
.nluns
, uint
, S_IRUGO
);
357 MODULE_PARM_DESC(luns
, "number of LUNs");
359 module_param_named(removable
, mod_data
.removable
, bool, S_IRUGO
);
360 MODULE_PARM_DESC(removable
, "true to simulate removable media");
362 module_param_named(stall
, mod_data
.can_stall
, bool, S_IRUGO
);
363 MODULE_PARM_DESC(stall
, "false to prevent bulk stalls");
365 module_param_named(cdrom
, mod_data
.cdrom
, bool, S_IRUGO
);
366 MODULE_PARM_DESC(cdrom
, "true to emulate cdrom instead of disk");
368 /* In the non-TEST version, only the module parameters listed above
370 #ifdef CONFIG_USB_FILE_STORAGE_TEST
372 module_param_named(transport
, mod_data
.transport_parm
, charp
, S_IRUGO
);
373 MODULE_PARM_DESC(transport
, "type of transport (BBB, CBI, or CB)");
375 module_param_named(protocol
, mod_data
.protocol_parm
, charp
, S_IRUGO
);
376 MODULE_PARM_DESC(protocol
, "type of protocol (RBC, 8020, QIC, UFI, "
379 module_param_named(vendor
, mod_data
.vendor
, ushort
, S_IRUGO
);
380 MODULE_PARM_DESC(vendor
, "USB Vendor ID");
382 module_param_named(product
, mod_data
.product
, ushort
, S_IRUGO
);
383 MODULE_PARM_DESC(product
, "USB Product ID");
385 module_param_named(release
, mod_data
.release
, ushort
, S_IRUGO
);
386 MODULE_PARM_DESC(release
, "USB release number");
388 module_param_named(buflen
, mod_data
.buflen
, uint
, S_IRUGO
);
389 MODULE_PARM_DESC(buflen
, "I/O buffer size");
391 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
395 * These definitions will permit the compiler to avoid generating code for
396 * parts of the driver that aren't used in the non-TEST version. Even gcc
397 * can recognize when a test of a constant expression yields a dead code
401 #ifdef CONFIG_USB_FILE_STORAGE_TEST
403 #define transport_is_bbb() (mod_data.transport_type == USB_PR_BULK)
404 #define transport_is_cbi() (mod_data.transport_type == USB_PR_CBI)
405 #define protocol_is_scsi() (mod_data.protocol_type == USB_SC_SCSI)
409 #define transport_is_bbb() 1
410 #define transport_is_cbi() 0
411 #define protocol_is_scsi() 1
413 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
416 /*-------------------------------------------------------------------------*/
420 /* lock protects: state, all the req_busy's, and cbbuf_cmnd */
422 struct usb_gadget
*gadget
;
424 /* filesem protects: backing files in use */
425 struct rw_semaphore filesem
;
427 /* reference counting: wait until all LUNs are released */
430 struct usb_ep
*ep0
; // Handy copy of gadget->ep0
431 struct usb_request
*ep0req
; // For control responses
432 unsigned int ep0_req_tag
;
433 const char *ep0req_name
;
435 struct usb_request
*intreq
; // For interrupt responses
437 struct fsg_buffhd
*intr_buffhd
;
439 unsigned int bulk_out_maxpacket
;
440 enum fsg_state state
; // For exception handling
441 unsigned int exception_req_tag
;
443 u8 config
, new_config
;
445 unsigned int running
: 1;
446 unsigned int bulk_in_enabled
: 1;
447 unsigned int bulk_out_enabled
: 1;
448 unsigned int intr_in_enabled
: 1;
449 unsigned int phase_error
: 1;
450 unsigned int short_packet_received
: 1;
451 unsigned int bad_lun_okay
: 1;
453 unsigned long atomic_bitflags
;
455 #define IGNORE_BULK_OUT 1
458 struct usb_ep
*bulk_in
;
459 struct usb_ep
*bulk_out
;
460 struct usb_ep
*intr_in
;
462 struct fsg_buffhd
*next_buffhd_to_fill
;
463 struct fsg_buffhd
*next_buffhd_to_drain
;
464 struct fsg_buffhd buffhds
[FSG_NUM_BUFFERS
];
466 int thread_wakeup_needed
;
467 struct completion thread_notifier
;
468 struct task_struct
*thread_task
;
471 u8 cmnd
[MAX_COMMAND_SIZE
];
472 enum data_direction data_dir
;
474 u32 data_size_from_cmnd
;
480 /* The CB protocol offers no way for a host to know when a command
481 * has completed. As a result the next command may arrive early,
482 * and we will still have to handle it. For that reason we need
483 * a buffer to store new commands when using CB (or CBI, which
484 * does not oblige a host to wait for command completion either). */
486 u8 cbbuf_cmnd
[MAX_COMMAND_SIZE
];
489 struct fsg_lun
*luns
;
490 struct fsg_lun
*curlun
;
493 typedef void (*fsg_routine_t
)(struct fsg_dev
*);
495 static int exception_in_progress(struct fsg_dev
*fsg
)
497 return (fsg
->state
> FSG_STATE_IDLE
);
500 /* Make bulk-out requests be divisible by the maxpacket size */
501 static void set_bulk_out_req_length(struct fsg_dev
*fsg
,
502 struct fsg_buffhd
*bh
, unsigned int length
)
506 bh
->bulk_out_intended_length
= length
;
507 rem
= length
% fsg
->bulk_out_maxpacket
;
509 length
+= fsg
->bulk_out_maxpacket
- rem
;
510 bh
->outreq
->length
= length
;
513 static struct fsg_dev
*the_fsg
;
514 static struct usb_gadget_driver fsg_driver
;
517 /*-------------------------------------------------------------------------*/
519 static int fsg_set_halt(struct fsg_dev
*fsg
, struct usb_ep
*ep
)
523 if (ep
== fsg
->bulk_in
)
525 else if (ep
== fsg
->bulk_out
)
529 DBG(fsg
, "%s set halt\n", name
);
530 return usb_ep_set_halt(ep
);
534 /*-------------------------------------------------------------------------*/
537 * DESCRIPTORS ... most are static, but strings and (full) configuration
538 * descriptors are built on demand. Also the (static) config and interface
539 * descriptors are adjusted during fsg_bind().
542 /* There is only one configuration. */
543 #define CONFIG_VALUE 1
545 static struct usb_device_descriptor
547 .bLength
= sizeof device_desc
,
548 .bDescriptorType
= USB_DT_DEVICE
,
550 .bcdUSB
= cpu_to_le16(0x0200),
551 .bDeviceClass
= USB_CLASS_PER_INTERFACE
,
553 /* The next three values can be overridden by module parameters */
554 .idVendor
= cpu_to_le16(FSG_VENDOR_ID
),
555 .idProduct
= cpu_to_le16(FSG_PRODUCT_ID
),
556 .bcdDevice
= cpu_to_le16(0xffff),
558 .iManufacturer
= FSG_STRING_MANUFACTURER
,
559 .iProduct
= FSG_STRING_PRODUCT
,
560 .iSerialNumber
= FSG_STRING_SERIAL
,
561 .bNumConfigurations
= 1,
564 static struct usb_config_descriptor
566 .bLength
= sizeof config_desc
,
567 .bDescriptorType
= USB_DT_CONFIG
,
569 /* wTotalLength computed by usb_gadget_config_buf() */
571 .bConfigurationValue
= CONFIG_VALUE
,
572 .iConfiguration
= FSG_STRING_CONFIG
,
573 .bmAttributes
= USB_CONFIG_ATT_ONE
| USB_CONFIG_ATT_SELFPOWER
,
574 .bMaxPower
= CONFIG_USB_GADGET_VBUS_DRAW
/ 2,
578 static struct usb_qualifier_descriptor
580 .bLength
= sizeof dev_qualifier
,
581 .bDescriptorType
= USB_DT_DEVICE_QUALIFIER
,
583 .bcdUSB
= cpu_to_le16(0x0200),
584 .bDeviceClass
= USB_CLASS_PER_INTERFACE
,
586 .bNumConfigurations
= 1,
592 * Config descriptors must agree with the code that sets configurations
593 * and with code managing interfaces and their altsettings. They must
594 * also handle different speeds and other-speed requests.
596 static int populate_config_buf(struct usb_gadget
*gadget
,
597 u8
*buf
, u8 type
, unsigned index
)
599 enum usb_device_speed speed
= gadget
->speed
;
601 const struct usb_descriptor_header
**function
;
606 if (gadget_is_dualspeed(gadget
) && type
== USB_DT_OTHER_SPEED_CONFIG
)
607 speed
= (USB_SPEED_FULL
+ USB_SPEED_HIGH
) - speed
;
608 function
= gadget_is_dualspeed(gadget
) && speed
== USB_SPEED_HIGH
609 ? (const struct usb_descriptor_header
**)fsg_hs_function
610 : (const struct usb_descriptor_header
**)fsg_fs_function
;
612 /* for now, don't advertise srp-only devices */
613 if (!gadget_is_otg(gadget
))
616 len
= usb_gadget_config_buf(&config_desc
, buf
, EP0_BUFSIZE
, function
);
617 ((struct usb_config_descriptor
*) buf
)->bDescriptorType
= type
;
622 /*-------------------------------------------------------------------------*/
624 /* These routines may be called in process context or in_irq */
626 /* Caller must hold fsg->lock */
627 static void wakeup_thread(struct fsg_dev
*fsg
)
629 /* Tell the main thread that something has happened */
630 fsg
->thread_wakeup_needed
= 1;
631 if (fsg
->thread_task
)
632 wake_up_process(fsg
->thread_task
);
636 static void raise_exception(struct fsg_dev
*fsg
, enum fsg_state new_state
)
640 /* Do nothing if a higher-priority exception is already in progress.
641 * If a lower-or-equal priority exception is in progress, preempt it
642 * and notify the main thread by sending it a signal. */
643 spin_lock_irqsave(&fsg
->lock
, flags
);
644 if (fsg
->state
<= new_state
) {
645 fsg
->exception_req_tag
= fsg
->ep0_req_tag
;
646 fsg
->state
= new_state
;
647 if (fsg
->thread_task
)
648 send_sig_info(SIGUSR1
, SEND_SIG_FORCED
,
651 spin_unlock_irqrestore(&fsg
->lock
, flags
);
655 /*-------------------------------------------------------------------------*/
657 /* The disconnect callback and ep0 routines. These always run in_irq,
658 * except that ep0_queue() is called in the main thread to acknowledge
659 * completion of various requests: set config, set interface, and
660 * Bulk-only device reset. */
662 static void fsg_disconnect(struct usb_gadget
*gadget
)
664 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
666 DBG(fsg
, "disconnect or port reset\n");
667 raise_exception(fsg
, FSG_STATE_DISCONNECT
);
671 static int ep0_queue(struct fsg_dev
*fsg
)
675 rc
= usb_ep_queue(fsg
->ep0
, fsg
->ep0req
, GFP_ATOMIC
);
676 if (rc
!= 0 && rc
!= -ESHUTDOWN
) {
678 /* We can't do much more than wait for a reset */
679 WARNING(fsg
, "error in submission: %s --> %d\n",
685 static void ep0_complete(struct usb_ep
*ep
, struct usb_request
*req
)
687 struct fsg_dev
*fsg
= ep
->driver_data
;
690 dump_msg(fsg
, fsg
->ep0req_name
, req
->buf
, req
->actual
);
691 if (req
->status
|| req
->actual
!= req
->length
)
692 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
693 req
->status
, req
->actual
, req
->length
);
694 if (req
->status
== -ECONNRESET
) // Request was cancelled
695 usb_ep_fifo_flush(ep
);
697 if (req
->status
== 0 && req
->context
)
698 ((fsg_routine_t
) (req
->context
))(fsg
);
702 /*-------------------------------------------------------------------------*/
704 /* Bulk and interrupt endpoint completion handlers.
705 * These always run in_irq. */
707 static void bulk_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
709 struct fsg_dev
*fsg
= ep
->driver_data
;
710 struct fsg_buffhd
*bh
= req
->context
;
712 if (req
->status
|| req
->actual
!= req
->length
)
713 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
714 req
->status
, req
->actual
, req
->length
);
715 if (req
->status
== -ECONNRESET
) // Request was cancelled
716 usb_ep_fifo_flush(ep
);
718 /* Hold the lock while we update the request and buffer states */
720 spin_lock(&fsg
->lock
);
722 bh
->state
= BUF_STATE_EMPTY
;
724 spin_unlock(&fsg
->lock
);
727 static void bulk_out_complete(struct usb_ep
*ep
, struct usb_request
*req
)
729 struct fsg_dev
*fsg
= ep
->driver_data
;
730 struct fsg_buffhd
*bh
= req
->context
;
732 dump_msg(fsg
, "bulk-out", req
->buf
, req
->actual
);
733 if (req
->status
|| req
->actual
!= bh
->bulk_out_intended_length
)
734 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
735 req
->status
, req
->actual
,
736 bh
->bulk_out_intended_length
);
737 if (req
->status
== -ECONNRESET
) // Request was cancelled
738 usb_ep_fifo_flush(ep
);
740 /* Hold the lock while we update the request and buffer states */
742 spin_lock(&fsg
->lock
);
744 bh
->state
= BUF_STATE_FULL
;
746 spin_unlock(&fsg
->lock
);
750 #ifdef CONFIG_USB_FILE_STORAGE_TEST
751 static void intr_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
753 struct fsg_dev
*fsg
= ep
->driver_data
;
754 struct fsg_buffhd
*bh
= req
->context
;
756 if (req
->status
|| req
->actual
!= req
->length
)
757 DBG(fsg
, "%s --> %d, %u/%u\n", __func__
,
758 req
->status
, req
->actual
, req
->length
);
759 if (req
->status
== -ECONNRESET
) // Request was cancelled
760 usb_ep_fifo_flush(ep
);
762 /* Hold the lock while we update the request and buffer states */
764 spin_lock(&fsg
->lock
);
765 fsg
->intreq_busy
= 0;
766 bh
->state
= BUF_STATE_EMPTY
;
768 spin_unlock(&fsg
->lock
);
772 static void intr_in_complete(struct usb_ep
*ep
, struct usb_request
*req
)
774 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
777 /*-------------------------------------------------------------------------*/
779 /* Ep0 class-specific handlers. These always run in_irq. */
781 #ifdef CONFIG_USB_FILE_STORAGE_TEST
782 static void received_cbi_adsc(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
784 struct usb_request
*req
= fsg
->ep0req
;
785 static u8 cbi_reset_cmnd
[6] = {
786 SEND_DIAGNOSTIC
, 4, 0xff, 0xff, 0xff, 0xff};
788 /* Error in command transfer? */
789 if (req
->status
|| req
->length
!= req
->actual
||
790 req
->actual
< 6 || req
->actual
> MAX_COMMAND_SIZE
) {
792 /* Not all controllers allow a protocol stall after
793 * receiving control-out data, but we'll try anyway. */
794 fsg_set_halt(fsg
, fsg
->ep0
);
795 return; // Wait for reset
798 /* Is it the special reset command? */
799 if (req
->actual
>= sizeof cbi_reset_cmnd
&&
800 memcmp(req
->buf
, cbi_reset_cmnd
,
801 sizeof cbi_reset_cmnd
) == 0) {
803 /* Raise an exception to stop the current operation
804 * and reinitialize our state. */
805 DBG(fsg
, "cbi reset request\n");
806 raise_exception(fsg
, FSG_STATE_RESET
);
810 VDBG(fsg
, "CB[I] accept device-specific command\n");
811 spin_lock(&fsg
->lock
);
813 /* Save the command for later */
814 if (fsg
->cbbuf_cmnd_size
)
815 WARNING(fsg
, "CB[I] overwriting previous command\n");
816 fsg
->cbbuf_cmnd_size
= req
->actual
;
817 memcpy(fsg
->cbbuf_cmnd
, req
->buf
, fsg
->cbbuf_cmnd_size
);
820 spin_unlock(&fsg
->lock
);
824 static void received_cbi_adsc(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
826 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
829 static int class_setup_req(struct fsg_dev
*fsg
,
830 const struct usb_ctrlrequest
*ctrl
)
832 struct usb_request
*req
= fsg
->ep0req
;
833 int value
= -EOPNOTSUPP
;
834 u16 w_index
= le16_to_cpu(ctrl
->wIndex
);
835 u16 w_value
= le16_to_cpu(ctrl
->wValue
);
836 u16 w_length
= le16_to_cpu(ctrl
->wLength
);
841 /* Handle Bulk-only class-specific requests */
842 if (transport_is_bbb()) {
843 switch (ctrl
->bRequest
) {
845 case USB_BULK_RESET_REQUEST
:
846 if (ctrl
->bRequestType
!= (USB_DIR_OUT
|
847 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
849 if (w_index
!= 0 || w_value
!= 0) {
854 /* Raise an exception to stop the current operation
855 * and reinitialize our state. */
856 DBG(fsg
, "bulk reset request\n");
857 raise_exception(fsg
, FSG_STATE_RESET
);
858 value
= DELAYED_STATUS
;
861 case USB_BULK_GET_MAX_LUN_REQUEST
:
862 if (ctrl
->bRequestType
!= (USB_DIR_IN
|
863 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
865 if (w_index
!= 0 || w_value
!= 0) {
869 VDBG(fsg
, "get max LUN\n");
870 *(u8
*) req
->buf
= fsg
->nluns
- 1;
876 /* Handle CBI class-specific requests */
878 switch (ctrl
->bRequest
) {
880 case USB_CBI_ADSC_REQUEST
:
881 if (ctrl
->bRequestType
!= (USB_DIR_OUT
|
882 USB_TYPE_CLASS
| USB_RECIP_INTERFACE
))
884 if (w_index
!= 0 || w_value
!= 0) {
888 if (w_length
> MAX_COMMAND_SIZE
) {
893 fsg
->ep0req
->context
= received_cbi_adsc
;
898 if (value
== -EOPNOTSUPP
)
900 "unknown class-specific control req "
901 "%02x.%02x v%04x i%04x l%u\n",
902 ctrl
->bRequestType
, ctrl
->bRequest
,
903 le16_to_cpu(ctrl
->wValue
), w_index
, w_length
);
908 /*-------------------------------------------------------------------------*/
910 /* Ep0 standard request handlers. These always run in_irq. */
912 static int standard_setup_req(struct fsg_dev
*fsg
,
913 const struct usb_ctrlrequest
*ctrl
)
915 struct usb_request
*req
= fsg
->ep0req
;
916 int value
= -EOPNOTSUPP
;
917 u16 w_index
= le16_to_cpu(ctrl
->wIndex
);
918 u16 w_value
= le16_to_cpu(ctrl
->wValue
);
920 /* Usually this just stores reply data in the pre-allocated ep0 buffer,
921 * but config change events will also reconfigure hardware. */
922 switch (ctrl
->bRequest
) {
924 case USB_REQ_GET_DESCRIPTOR
:
925 if (ctrl
->bRequestType
!= (USB_DIR_IN
| USB_TYPE_STANDARD
|
928 switch (w_value
>> 8) {
931 VDBG(fsg
, "get device descriptor\n");
932 value
= sizeof device_desc
;
933 memcpy(req
->buf
, &device_desc
, value
);
935 case USB_DT_DEVICE_QUALIFIER
:
936 VDBG(fsg
, "get device qualifier\n");
937 if (!gadget_is_dualspeed(fsg
->gadget
))
939 value
= sizeof dev_qualifier
;
940 memcpy(req
->buf
, &dev_qualifier
, value
);
943 case USB_DT_OTHER_SPEED_CONFIG
:
944 VDBG(fsg
, "get other-speed config descriptor\n");
945 if (!gadget_is_dualspeed(fsg
->gadget
))
949 VDBG(fsg
, "get configuration descriptor\n");
951 value
= populate_config_buf(fsg
->gadget
,
958 VDBG(fsg
, "get string descriptor\n");
960 /* wIndex == language code */
961 value
= usb_gadget_get_string(&fsg_stringtab
,
962 w_value
& 0xff, req
->buf
);
967 /* One config, two speeds */
968 case USB_REQ_SET_CONFIGURATION
:
969 if (ctrl
->bRequestType
!= (USB_DIR_OUT
| USB_TYPE_STANDARD
|
972 VDBG(fsg
, "set configuration\n");
973 if (w_value
== CONFIG_VALUE
|| w_value
== 0) {
974 fsg
->new_config
= w_value
;
976 /* Raise an exception to wipe out previous transaction
977 * state (queued bufs, etc) and set the new config. */
978 raise_exception(fsg
, FSG_STATE_CONFIG_CHANGE
);
979 value
= DELAYED_STATUS
;
982 case USB_REQ_GET_CONFIGURATION
:
983 if (ctrl
->bRequestType
!= (USB_DIR_IN
| USB_TYPE_STANDARD
|
986 VDBG(fsg
, "get configuration\n");
987 *(u8
*) req
->buf
= fsg
->config
;
991 case USB_REQ_SET_INTERFACE
:
992 if (ctrl
->bRequestType
!= (USB_DIR_OUT
| USB_TYPE_STANDARD
|
993 USB_RECIP_INTERFACE
))
995 if (fsg
->config
&& w_index
== 0) {
997 /* Raise an exception to wipe out previous transaction
998 * state (queued bufs, etc) and install the new
999 * interface altsetting. */
1000 raise_exception(fsg
, FSG_STATE_INTERFACE_CHANGE
);
1001 value
= DELAYED_STATUS
;
1004 case USB_REQ_GET_INTERFACE
:
1005 if (ctrl
->bRequestType
!= (USB_DIR_IN
| USB_TYPE_STANDARD
|
1006 USB_RECIP_INTERFACE
))
1014 VDBG(fsg
, "get interface\n");
1015 *(u8
*) req
->buf
= 0;
1021 "unknown control req %02x.%02x v%04x i%04x l%u\n",
1022 ctrl
->bRequestType
, ctrl
->bRequest
,
1023 w_value
, w_index
, le16_to_cpu(ctrl
->wLength
));
1030 static int fsg_setup(struct usb_gadget
*gadget
,
1031 const struct usb_ctrlrequest
*ctrl
)
1033 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
1035 int w_length
= le16_to_cpu(ctrl
->wLength
);
1037 ++fsg
->ep0_req_tag
; // Record arrival of a new request
1038 fsg
->ep0req
->context
= NULL
;
1039 fsg
->ep0req
->length
= 0;
1040 dump_msg(fsg
, "ep0-setup", (u8
*) ctrl
, sizeof(*ctrl
));
1042 if ((ctrl
->bRequestType
& USB_TYPE_MASK
) == USB_TYPE_CLASS
)
1043 rc
= class_setup_req(fsg
, ctrl
);
1045 rc
= standard_setup_req(fsg
, ctrl
);
1047 /* Respond with data/status or defer until later? */
1048 if (rc
>= 0 && rc
!= DELAYED_STATUS
) {
1049 rc
= min(rc
, w_length
);
1050 fsg
->ep0req
->length
= rc
;
1051 fsg
->ep0req
->zero
= rc
< w_length
;
1052 fsg
->ep0req_name
= (ctrl
->bRequestType
& USB_DIR_IN
?
1053 "ep0-in" : "ep0-out");
1054 rc
= ep0_queue(fsg
);
1057 /* Device either stalls (rc < 0) or reports success */
1062 /*-------------------------------------------------------------------------*/
1064 /* All the following routines run in process context */
1067 /* Use this for bulk or interrupt transfers, not ep0 */
1068 static void start_transfer(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
1069 struct usb_request
*req
, int *pbusy
,
1070 enum fsg_buffer_state
*state
)
1074 if (ep
== fsg
->bulk_in
)
1075 dump_msg(fsg
, "bulk-in", req
->buf
, req
->length
);
1076 else if (ep
== fsg
->intr_in
)
1077 dump_msg(fsg
, "intr-in", req
->buf
, req
->length
);
1079 spin_lock_irq(&fsg
->lock
);
1081 *state
= BUF_STATE_BUSY
;
1082 spin_unlock_irq(&fsg
->lock
);
1083 rc
= usb_ep_queue(ep
, req
, GFP_KERNEL
);
1086 *state
= BUF_STATE_EMPTY
;
1088 /* We can't do much more than wait for a reset */
1090 /* Note: currently the net2280 driver fails zero-length
1091 * submissions if DMA is enabled. */
1092 if (rc
!= -ESHUTDOWN
&& !(rc
== -EOPNOTSUPP
&&
1094 WARNING(fsg
, "error in submission: %s --> %d\n",
1100 static int sleep_thread(struct fsg_dev
*fsg
)
1104 /* Wait until a signal arrives or we are woken up */
1107 set_current_state(TASK_INTERRUPTIBLE
);
1108 if (signal_pending(current
)) {
1112 if (fsg
->thread_wakeup_needed
)
1116 __set_current_state(TASK_RUNNING
);
1117 fsg
->thread_wakeup_needed
= 0;
1122 /*-------------------------------------------------------------------------*/
1124 static int do_read(struct fsg_dev
*fsg
)
1126 struct fsg_lun
*curlun
= fsg
->curlun
;
1128 struct fsg_buffhd
*bh
;
1131 loff_t file_offset
, file_offset_tmp
;
1132 unsigned int amount
;
1133 unsigned int partial_page
;
1136 /* Get the starting Logical Block Address and check that it's
1138 if (fsg
->cmnd
[0] == READ_6
)
1139 lba
= get_unaligned_be24(&fsg
->cmnd
[1]);
1141 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1143 /* We allow DPO (Disable Page Out = don't save data in the
1144 * cache) and FUA (Force Unit Access = don't read from the
1145 * cache), but we don't implement them. */
1146 if ((fsg
->cmnd
[1] & ~0x18) != 0) {
1147 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1151 if (lba
>= curlun
->num_sectors
) {
1152 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1155 file_offset
= ((loff_t
) lba
) << 9;
1157 /* Carry out the file reads */
1158 amount_left
= fsg
->data_size_from_cmnd
;
1159 if (unlikely(amount_left
== 0))
1160 return -EIO
; // No default reply
1164 /* Figure out how much we need to read:
1165 * Try to read the remaining amount.
1166 * But don't read more than the buffer size.
1167 * And don't try to read past the end of the file.
1168 * Finally, if we're not at a page boundary, don't read past
1170 * If this means reading 0 then we were asked to read past
1171 * the end of file. */
1172 amount
= min((unsigned int) amount_left
, mod_data
.buflen
);
1173 amount
= min((loff_t
) amount
,
1174 curlun
->file_length
- file_offset
);
1175 partial_page
= file_offset
& (PAGE_CACHE_SIZE
- 1);
1176 if (partial_page
> 0)
1177 amount
= min(amount
, (unsigned int) PAGE_CACHE_SIZE
-
1180 /* Wait for the next buffer to become available */
1181 bh
= fsg
->next_buffhd_to_fill
;
1182 while (bh
->state
!= BUF_STATE_EMPTY
) {
1183 rc
= sleep_thread(fsg
);
1188 /* If we were asked to read past the end of file,
1189 * end with an empty buffer. */
1191 curlun
->sense_data
=
1192 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1193 curlun
->sense_data_info
= file_offset
>> 9;
1194 curlun
->info_valid
= 1;
1195 bh
->inreq
->length
= 0;
1196 bh
->state
= BUF_STATE_FULL
;
1200 /* Perform the read */
1201 file_offset_tmp
= file_offset
;
1202 nread
= vfs_read(curlun
->filp
,
1203 (char __user
*) bh
->buf
,
1204 amount
, &file_offset_tmp
);
1205 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
1206 (unsigned long long) file_offset
,
1208 if (signal_pending(current
))
1212 LDBG(curlun
, "error in file read: %d\n",
1215 } else if (nread
< amount
) {
1216 LDBG(curlun
, "partial file read: %d/%u\n",
1217 (int) nread
, amount
);
1218 nread
-= (nread
& 511); // Round down to a block
1220 file_offset
+= nread
;
1221 amount_left
-= nread
;
1222 fsg
->residue
-= nread
;
1223 bh
->inreq
->length
= nread
;
1224 bh
->state
= BUF_STATE_FULL
;
1226 /* If an error occurred, report it and its position */
1227 if (nread
< amount
) {
1228 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
1229 curlun
->sense_data_info
= file_offset
>> 9;
1230 curlun
->info_valid
= 1;
1234 if (amount_left
== 0)
1235 break; // No more left to read
1237 /* Send this buffer and go read some more */
1238 bh
->inreq
->zero
= 0;
1239 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
1240 &bh
->inreq_busy
, &bh
->state
);
1241 fsg
->next_buffhd_to_fill
= bh
->next
;
1244 return -EIO
; // No default reply
1248 /*-------------------------------------------------------------------------*/
1250 static int do_write(struct fsg_dev
*fsg
)
1252 struct fsg_lun
*curlun
= fsg
->curlun
;
1254 struct fsg_buffhd
*bh
;
1256 u32 amount_left_to_req
, amount_left_to_write
;
1257 loff_t usb_offset
, file_offset
, file_offset_tmp
;
1258 unsigned int amount
;
1259 unsigned int partial_page
;
1264 curlun
->sense_data
= SS_WRITE_PROTECTED
;
1267 spin_lock(&curlun
->filp
->f_lock
);
1268 curlun
->filp
->f_flags
&= ~O_SYNC
; // Default is not to wait
1269 spin_unlock(&curlun
->filp
->f_lock
);
1271 /* Get the starting Logical Block Address and check that it's
1273 if (fsg
->cmnd
[0] == WRITE_6
)
1274 lba
= get_unaligned_be24(&fsg
->cmnd
[1]);
1276 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1278 /* We allow DPO (Disable Page Out = don't save data in the
1279 * cache) and FUA (Force Unit Access = write directly to the
1280 * medium). We don't implement DPO; we implement FUA by
1281 * performing synchronous output. */
1282 if ((fsg
->cmnd
[1] & ~0x18) != 0) {
1283 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1287 if (!curlun
->nofua
&& (fsg
->cmnd
[1] & 0x08)) {
1288 spin_lock(&curlun
->filp
->f_lock
);
1289 curlun
->filp
->f_flags
|= O_DSYNC
;
1290 spin_unlock(&curlun
->filp
->f_lock
);
1293 if (lba
>= curlun
->num_sectors
) {
1294 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1298 /* Carry out the file writes */
1300 file_offset
= usb_offset
= ((loff_t
) lba
) << 9;
1301 amount_left_to_req
= amount_left_to_write
= fsg
->data_size_from_cmnd
;
1303 while (amount_left_to_write
> 0) {
1305 /* Queue a request for more data from the host */
1306 bh
= fsg
->next_buffhd_to_fill
;
1307 if (bh
->state
== BUF_STATE_EMPTY
&& get_some_more
) {
1309 /* Figure out how much we want to get:
1310 * Try to get the remaining amount.
1311 * But don't get more than the buffer size.
1312 * And don't try to go past the end of the file.
1313 * If we're not at a page boundary,
1314 * don't go past the next page.
1315 * If this means getting 0, then we were asked
1316 * to write past the end of file.
1317 * Finally, round down to a block boundary. */
1318 amount
= min(amount_left_to_req
, mod_data
.buflen
);
1319 amount
= min((loff_t
) amount
, curlun
->file_length
-
1321 partial_page
= usb_offset
& (PAGE_CACHE_SIZE
- 1);
1322 if (partial_page
> 0)
1323 amount
= min(amount
,
1324 (unsigned int) PAGE_CACHE_SIZE
- partial_page
);
1328 curlun
->sense_data
=
1329 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1330 curlun
->sense_data_info
= usb_offset
>> 9;
1331 curlun
->info_valid
= 1;
1334 amount
-= (amount
& 511);
1337 /* Why were we were asked to transfer a
1343 /* Get the next buffer */
1344 usb_offset
+= amount
;
1345 fsg
->usb_amount_left
-= amount
;
1346 amount_left_to_req
-= amount
;
1347 if (amount_left_to_req
== 0)
1350 /* amount is always divisible by 512, hence by
1351 * the bulk-out maxpacket size */
1352 bh
->outreq
->length
= bh
->bulk_out_intended_length
=
1354 bh
->outreq
->short_not_ok
= 1;
1355 start_transfer(fsg
, fsg
->bulk_out
, bh
->outreq
,
1356 &bh
->outreq_busy
, &bh
->state
);
1357 fsg
->next_buffhd_to_fill
= bh
->next
;
1361 /* Write the received data to the backing file */
1362 bh
= fsg
->next_buffhd_to_drain
;
1363 if (bh
->state
== BUF_STATE_EMPTY
&& !get_some_more
)
1364 break; // We stopped early
1365 if (bh
->state
== BUF_STATE_FULL
) {
1367 fsg
->next_buffhd_to_drain
= bh
->next
;
1368 bh
->state
= BUF_STATE_EMPTY
;
1370 /* Did something go wrong with the transfer? */
1371 if (bh
->outreq
->status
!= 0) {
1372 curlun
->sense_data
= SS_COMMUNICATION_FAILURE
;
1373 curlun
->sense_data_info
= file_offset
>> 9;
1374 curlun
->info_valid
= 1;
1378 amount
= bh
->outreq
->actual
;
1379 if (curlun
->file_length
- file_offset
< amount
) {
1381 "write %u @ %llu beyond end %llu\n",
1382 amount
, (unsigned long long) file_offset
,
1383 (unsigned long long) curlun
->file_length
);
1384 amount
= curlun
->file_length
- file_offset
;
1387 /* Perform the write */
1388 file_offset_tmp
= file_offset
;
1389 nwritten
= vfs_write(curlun
->filp
,
1390 (char __user
*) bh
->buf
,
1391 amount
, &file_offset_tmp
);
1392 VLDBG(curlun
, "file write %u @ %llu -> %d\n", amount
,
1393 (unsigned long long) file_offset
,
1395 if (signal_pending(current
))
1396 return -EINTR
; // Interrupted!
1399 LDBG(curlun
, "error in file write: %d\n",
1402 } else if (nwritten
< amount
) {
1403 LDBG(curlun
, "partial file write: %d/%u\n",
1404 (int) nwritten
, amount
);
1405 nwritten
-= (nwritten
& 511);
1406 // Round down to a block
1408 file_offset
+= nwritten
;
1409 amount_left_to_write
-= nwritten
;
1410 fsg
->residue
-= nwritten
;
1412 /* If an error occurred, report it and its position */
1413 if (nwritten
< amount
) {
1414 curlun
->sense_data
= SS_WRITE_ERROR
;
1415 curlun
->sense_data_info
= file_offset
>> 9;
1416 curlun
->info_valid
= 1;
1420 /* Did the host decide to stop early? */
1421 if (bh
->outreq
->actual
!= bh
->outreq
->length
) {
1422 fsg
->short_packet_received
= 1;
1428 /* Wait for something to happen */
1429 rc
= sleep_thread(fsg
);
1434 return -EIO
; // No default reply
1438 /*-------------------------------------------------------------------------*/
1440 static int do_synchronize_cache(struct fsg_dev
*fsg
)
1442 struct fsg_lun
*curlun
= fsg
->curlun
;
1445 /* We ignore the requested LBA and write out all file's
1446 * dirty data buffers. */
1447 rc
= fsg_lun_fsync_sub(curlun
);
1449 curlun
->sense_data
= SS_WRITE_ERROR
;
1454 /*-------------------------------------------------------------------------*/
1456 static void invalidate_sub(struct fsg_lun
*curlun
)
1458 struct file
*filp
= curlun
->filp
;
1459 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1462 rc
= invalidate_mapping_pages(inode
->i_mapping
, 0, -1);
1463 VLDBG(curlun
, "invalidate_mapping_pages -> %ld\n", rc
);
1466 static int do_verify(struct fsg_dev
*fsg
)
1468 struct fsg_lun
*curlun
= fsg
->curlun
;
1470 u32 verification_length
;
1471 struct fsg_buffhd
*bh
= fsg
->next_buffhd_to_fill
;
1472 loff_t file_offset
, file_offset_tmp
;
1474 unsigned int amount
;
1477 /* Get the starting Logical Block Address and check that it's
1479 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1480 if (lba
>= curlun
->num_sectors
) {
1481 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1485 /* We allow DPO (Disable Page Out = don't save data in the
1486 * cache) but we don't implement it. */
1487 if ((fsg
->cmnd
[1] & ~0x10) != 0) {
1488 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1492 verification_length
= get_unaligned_be16(&fsg
->cmnd
[7]);
1493 if (unlikely(verification_length
== 0))
1494 return -EIO
; // No default reply
1496 /* Prepare to carry out the file verify */
1497 amount_left
= verification_length
<< 9;
1498 file_offset
= ((loff_t
) lba
) << 9;
1500 /* Write out all the dirty buffers before invalidating them */
1501 fsg_lun_fsync_sub(curlun
);
1502 if (signal_pending(current
))
1505 invalidate_sub(curlun
);
1506 if (signal_pending(current
))
1509 /* Just try to read the requested blocks */
1510 while (amount_left
> 0) {
1512 /* Figure out how much we need to read:
1513 * Try to read the remaining amount, but not more than
1515 * And don't try to read past the end of the file.
1516 * If this means reading 0 then we were asked to read
1517 * past the end of file. */
1518 amount
= min((unsigned int) amount_left
, mod_data
.buflen
);
1519 amount
= min((loff_t
) amount
,
1520 curlun
->file_length
- file_offset
);
1522 curlun
->sense_data
=
1523 SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1524 curlun
->sense_data_info
= file_offset
>> 9;
1525 curlun
->info_valid
= 1;
1529 /* Perform the read */
1530 file_offset_tmp
= file_offset
;
1531 nread
= vfs_read(curlun
->filp
,
1532 (char __user
*) bh
->buf
,
1533 amount
, &file_offset_tmp
);
1534 VLDBG(curlun
, "file read %u @ %llu -> %d\n", amount
,
1535 (unsigned long long) file_offset
,
1537 if (signal_pending(current
))
1541 LDBG(curlun
, "error in file verify: %d\n",
1544 } else if (nread
< amount
) {
1545 LDBG(curlun
, "partial file verify: %d/%u\n",
1546 (int) nread
, amount
);
1547 nread
-= (nread
& 511); // Round down to a sector
1550 curlun
->sense_data
= SS_UNRECOVERED_READ_ERROR
;
1551 curlun
->sense_data_info
= file_offset
>> 9;
1552 curlun
->info_valid
= 1;
1555 file_offset
+= nread
;
1556 amount_left
-= nread
;
1562 /*-------------------------------------------------------------------------*/
1564 static int do_inquiry(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1566 u8
*buf
= (u8
*) bh
->buf
;
1568 static char vendor_id
[] = "Linux ";
1569 static char product_disk_id
[] = "File-Stor Gadget";
1570 static char product_cdrom_id
[] = "File-CD Gadget ";
1572 if (!fsg
->curlun
) { // Unsupported LUNs are okay
1573 fsg
->bad_lun_okay
= 1;
1575 buf
[0] = 0x7f; // Unsupported, no device-type
1576 buf
[4] = 31; // Additional length
1581 buf
[0] = (mod_data
.cdrom
? TYPE_ROM
: TYPE_DISK
);
1582 if (mod_data
.removable
)
1584 buf
[2] = 2; // ANSI SCSI level 2
1585 buf
[3] = 2; // SCSI-2 INQUIRY data format
1586 buf
[4] = 31; // Additional length
1587 // No special options
1588 sprintf(buf
+ 8, "%-8s%-16s%04x", vendor_id
,
1589 (mod_data
.cdrom
? product_cdrom_id
:
1596 static int do_request_sense(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1598 struct fsg_lun
*curlun
= fsg
->curlun
;
1599 u8
*buf
= (u8
*) bh
->buf
;
1604 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1606 * If a REQUEST SENSE command is received from an initiator
1607 * with a pending unit attention condition (before the target
1608 * generates the contingent allegiance condition), then the
1609 * target shall either:
1610 * a) report any pending sense data and preserve the unit
1611 * attention condition on the logical unit, or,
1612 * b) report the unit attention condition, may discard any
1613 * pending sense data, and clear the unit attention
1614 * condition on the logical unit for that initiator.
1616 * FSG normally uses option a); enable this code to use option b).
1619 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
) {
1620 curlun
->sense_data
= curlun
->unit_attention_data
;
1621 curlun
->unit_attention_data
= SS_NO_SENSE
;
1625 if (!curlun
) { // Unsupported LUNs are okay
1626 fsg
->bad_lun_okay
= 1;
1627 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
1631 sd
= curlun
->sense_data
;
1632 sdinfo
= curlun
->sense_data_info
;
1633 valid
= curlun
->info_valid
<< 7;
1634 curlun
->sense_data
= SS_NO_SENSE
;
1635 curlun
->sense_data_info
= 0;
1636 curlun
->info_valid
= 0;
1640 buf
[0] = valid
| 0x70; // Valid, current error
1642 put_unaligned_be32(sdinfo
, &buf
[3]); /* Sense information */
1643 buf
[7] = 18 - 8; // Additional sense length
1650 static int do_read_capacity(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1652 struct fsg_lun
*curlun
= fsg
->curlun
;
1653 u32 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1654 int pmi
= fsg
->cmnd
[8];
1655 u8
*buf
= (u8
*) bh
->buf
;
1657 /* Check the PMI and LBA fields */
1658 if (pmi
> 1 || (pmi
== 0 && lba
!= 0)) {
1659 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1663 put_unaligned_be32(curlun
->num_sectors
- 1, &buf
[0]);
1664 /* Max logical block */
1665 put_unaligned_be32(512, &buf
[4]); /* Block length */
1670 static int do_read_header(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1672 struct fsg_lun
*curlun
= fsg
->curlun
;
1673 int msf
= fsg
->cmnd
[1] & 0x02;
1674 u32 lba
= get_unaligned_be32(&fsg
->cmnd
[2]);
1675 u8
*buf
= (u8
*) bh
->buf
;
1677 if ((fsg
->cmnd
[1] & ~0x02) != 0) { /* Mask away MSF */
1678 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1681 if (lba
>= curlun
->num_sectors
) {
1682 curlun
->sense_data
= SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE
;
1687 buf
[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1688 store_cdrom_address(&buf
[4], msf
, lba
);
1693 static int do_read_toc(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1695 struct fsg_lun
*curlun
= fsg
->curlun
;
1696 int msf
= fsg
->cmnd
[1] & 0x02;
1697 int start_track
= fsg
->cmnd
[6];
1698 u8
*buf
= (u8
*) bh
->buf
;
1700 if ((fsg
->cmnd
[1] & ~0x02) != 0 || /* Mask away MSF */
1702 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1707 buf
[1] = (20-2); /* TOC data length */
1708 buf
[2] = 1; /* First track number */
1709 buf
[3] = 1; /* Last track number */
1710 buf
[5] = 0x16; /* Data track, copying allowed */
1711 buf
[6] = 0x01; /* Only track is number 1 */
1712 store_cdrom_address(&buf
[8], msf
, 0);
1714 buf
[13] = 0x16; /* Lead-out track is data */
1715 buf
[14] = 0xAA; /* Lead-out track number */
1716 store_cdrom_address(&buf
[16], msf
, curlun
->num_sectors
);
1721 static int do_mode_sense(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1723 struct fsg_lun
*curlun
= fsg
->curlun
;
1724 int mscmnd
= fsg
->cmnd
[0];
1725 u8
*buf
= (u8
*) bh
->buf
;
1728 int changeable_values
, all_pages
;
1732 if ((fsg
->cmnd
[1] & ~0x08) != 0) { // Mask away DBD
1733 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1736 pc
= fsg
->cmnd
[2] >> 6;
1737 page_code
= fsg
->cmnd
[2] & 0x3f;
1739 curlun
->sense_data
= SS_SAVING_PARAMETERS_NOT_SUPPORTED
;
1742 changeable_values
= (pc
== 1);
1743 all_pages
= (page_code
== 0x3f);
1745 /* Write the mode parameter header. Fixed values are: default
1746 * medium type, no cache control (DPOFUA), and no block descriptors.
1747 * The only variable value is the WriteProtect bit. We will fill in
1748 * the mode data length later. */
1750 if (mscmnd
== MODE_SENSE
) {
1751 buf
[2] = (curlun
->ro
? 0x80 : 0x00); // WP, DPOFUA
1754 } else { // MODE_SENSE_10
1755 buf
[3] = (curlun
->ro
? 0x80 : 0x00); // WP, DPOFUA
1757 limit
= 65535; // Should really be mod_data.buflen
1760 /* No block descriptors */
1762 /* The mode pages, in numerical order. The only page we support
1763 * is the Caching page. */
1764 if (page_code
== 0x08 || all_pages
) {
1766 buf
[0] = 0x08; // Page code
1767 buf
[1] = 10; // Page length
1768 memset(buf
+2, 0, 10); // None of the fields are changeable
1770 if (!changeable_values
) {
1771 buf
[2] = 0x04; // Write cache enable,
1772 // Read cache not disabled
1773 // No cache retention priorities
1774 put_unaligned_be16(0xffff, &buf
[4]);
1775 /* Don't disable prefetch */
1776 /* Minimum prefetch = 0 */
1777 put_unaligned_be16(0xffff, &buf
[8]);
1778 /* Maximum prefetch */
1779 put_unaligned_be16(0xffff, &buf
[10]);
1780 /* Maximum prefetch ceiling */
1785 /* Check that a valid page was requested and the mode data length
1786 * isn't too long. */
1788 if (!valid_page
|| len
> limit
) {
1789 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1793 /* Store the mode data length */
1794 if (mscmnd
== MODE_SENSE
)
1797 put_unaligned_be16(len
- 2, buf0
);
1802 static int do_start_stop(struct fsg_dev
*fsg
)
1804 struct fsg_lun
*curlun
= fsg
->curlun
;
1807 if (!mod_data
.removable
) {
1808 curlun
->sense_data
= SS_INVALID_COMMAND
;
1812 // int immed = fsg->cmnd[1] & 0x01;
1813 loej
= fsg
->cmnd
[4] & 0x02;
1814 start
= fsg
->cmnd
[4] & 0x01;
1816 #ifdef CONFIG_USB_FILE_STORAGE_TEST
1817 if ((fsg
->cmnd
[1] & ~0x01) != 0 || // Mask away Immed
1818 (fsg
->cmnd
[4] & ~0x03) != 0) { // Mask LoEj, Start
1819 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1825 /* Are we allowed to unload the media? */
1826 if (curlun
->prevent_medium_removal
) {
1827 LDBG(curlun
, "unload attempt prevented\n");
1828 curlun
->sense_data
= SS_MEDIUM_REMOVAL_PREVENTED
;
1831 if (loej
) { // Simulate an unload/eject
1832 up_read(&fsg
->filesem
);
1833 down_write(&fsg
->filesem
);
1834 fsg_lun_close(curlun
);
1835 up_write(&fsg
->filesem
);
1836 down_read(&fsg
->filesem
);
1840 /* Our emulation doesn't support mounting; the medium is
1841 * available for use as soon as it is loaded. */
1842 if (!fsg_lun_is_open(curlun
)) {
1843 curlun
->sense_data
= SS_MEDIUM_NOT_PRESENT
;
1852 static int do_prevent_allow(struct fsg_dev
*fsg
)
1854 struct fsg_lun
*curlun
= fsg
->curlun
;
1857 if (!mod_data
.removable
) {
1858 curlun
->sense_data
= SS_INVALID_COMMAND
;
1862 prevent
= fsg
->cmnd
[4] & 0x01;
1863 if ((fsg
->cmnd
[4] & ~0x01) != 0) { // Mask away Prevent
1864 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
1868 if (curlun
->prevent_medium_removal
&& !prevent
)
1869 fsg_lun_fsync_sub(curlun
);
1870 curlun
->prevent_medium_removal
= prevent
;
1875 static int do_read_format_capacities(struct fsg_dev
*fsg
,
1876 struct fsg_buffhd
*bh
)
1878 struct fsg_lun
*curlun
= fsg
->curlun
;
1879 u8
*buf
= (u8
*) bh
->buf
;
1881 buf
[0] = buf
[1] = buf
[2] = 0;
1882 buf
[3] = 8; // Only the Current/Maximum Capacity Descriptor
1885 put_unaligned_be32(curlun
->num_sectors
, &buf
[0]);
1886 /* Number of blocks */
1887 put_unaligned_be32(512, &buf
[4]); /* Block length */
1888 buf
[4] = 0x02; /* Current capacity */
1893 static int do_mode_select(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
1895 struct fsg_lun
*curlun
= fsg
->curlun
;
1897 /* We don't support MODE SELECT */
1898 curlun
->sense_data
= SS_INVALID_COMMAND
;
1903 /*-------------------------------------------------------------------------*/
1905 static int halt_bulk_in_endpoint(struct fsg_dev
*fsg
)
1909 rc
= fsg_set_halt(fsg
, fsg
->bulk_in
);
1911 VDBG(fsg
, "delayed bulk-in endpoint halt\n");
1913 if (rc
!= -EAGAIN
) {
1914 WARNING(fsg
, "usb_ep_set_halt -> %d\n", rc
);
1919 /* Wait for a short time and then try again */
1920 if (msleep_interruptible(100) != 0)
1922 rc
= usb_ep_set_halt(fsg
->bulk_in
);
1927 static int wedge_bulk_in_endpoint(struct fsg_dev
*fsg
)
1931 DBG(fsg
, "bulk-in set wedge\n");
1932 rc
= usb_ep_set_wedge(fsg
->bulk_in
);
1934 VDBG(fsg
, "delayed bulk-in endpoint wedge\n");
1936 if (rc
!= -EAGAIN
) {
1937 WARNING(fsg
, "usb_ep_set_wedge -> %d\n", rc
);
1942 /* Wait for a short time and then try again */
1943 if (msleep_interruptible(100) != 0)
1945 rc
= usb_ep_set_wedge(fsg
->bulk_in
);
1950 static int throw_away_data(struct fsg_dev
*fsg
)
1952 struct fsg_buffhd
*bh
;
1956 while ((bh
= fsg
->next_buffhd_to_drain
)->state
!= BUF_STATE_EMPTY
||
1957 fsg
->usb_amount_left
> 0) {
1959 /* Throw away the data in a filled buffer */
1960 if (bh
->state
== BUF_STATE_FULL
) {
1962 bh
->state
= BUF_STATE_EMPTY
;
1963 fsg
->next_buffhd_to_drain
= bh
->next
;
1965 /* A short packet or an error ends everything */
1966 if (bh
->outreq
->actual
!= bh
->outreq
->length
||
1967 bh
->outreq
->status
!= 0) {
1968 raise_exception(fsg
, FSG_STATE_ABORT_BULK_OUT
);
1974 /* Try to submit another request if we need one */
1975 bh
= fsg
->next_buffhd_to_fill
;
1976 if (bh
->state
== BUF_STATE_EMPTY
&& fsg
->usb_amount_left
> 0) {
1977 amount
= min(fsg
->usb_amount_left
,
1978 (u32
) mod_data
.buflen
);
1980 /* amount is always divisible by 512, hence by
1981 * the bulk-out maxpacket size */
1982 bh
->outreq
->length
= bh
->bulk_out_intended_length
=
1984 bh
->outreq
->short_not_ok
= 1;
1985 start_transfer(fsg
, fsg
->bulk_out
, bh
->outreq
,
1986 &bh
->outreq_busy
, &bh
->state
);
1987 fsg
->next_buffhd_to_fill
= bh
->next
;
1988 fsg
->usb_amount_left
-= amount
;
1992 /* Otherwise wait for something to happen */
1993 rc
= sleep_thread(fsg
);
2001 static int finish_reply(struct fsg_dev
*fsg
)
2003 struct fsg_buffhd
*bh
= fsg
->next_buffhd_to_fill
;
2006 switch (fsg
->data_dir
) {
2008 break; // Nothing to send
2010 /* If we don't know whether the host wants to read or write,
2011 * this must be CB or CBI with an unknown command. We mustn't
2012 * try to send or receive any data. So stall both bulk pipes
2013 * if we can and wait for a reset. */
2014 case DATA_DIR_UNKNOWN
:
2015 if (mod_data
.can_stall
) {
2016 fsg_set_halt(fsg
, fsg
->bulk_out
);
2017 rc
= halt_bulk_in_endpoint(fsg
);
2021 /* All but the last buffer of data must have already been sent */
2022 case DATA_DIR_TO_HOST
:
2023 if (fsg
->data_size
== 0)
2024 ; // Nothing to send
2026 /* If there's no residue, simply send the last buffer */
2027 else if (fsg
->residue
== 0) {
2028 bh
->inreq
->zero
= 0;
2029 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
2030 &bh
->inreq_busy
, &bh
->state
);
2031 fsg
->next_buffhd_to_fill
= bh
->next
;
2034 /* There is a residue. For CB and CBI, simply mark the end
2035 * of the data with a short packet. However, if we are
2036 * allowed to stall, there was no data at all (residue ==
2037 * data_size), and the command failed (invalid LUN or
2038 * sense data is set), then halt the bulk-in endpoint
2040 else if (!transport_is_bbb()) {
2041 if (mod_data
.can_stall
&&
2042 fsg
->residue
== fsg
->data_size
&&
2043 (!fsg
->curlun
|| fsg
->curlun
->sense_data
!= SS_NO_SENSE
)) {
2044 bh
->state
= BUF_STATE_EMPTY
;
2045 rc
= halt_bulk_in_endpoint(fsg
);
2047 bh
->inreq
->zero
= 1;
2048 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
2049 &bh
->inreq_busy
, &bh
->state
);
2050 fsg
->next_buffhd_to_fill
= bh
->next
;
2055 * For Bulk-only, mark the end of the data with a short
2056 * packet. If we are allowed to stall, halt the bulk-in
2057 * endpoint. (Note: This violates the Bulk-Only Transport
2058 * specification, which requires us to pad the data if we
2059 * don't halt the endpoint. Presumably nobody will mind.)
2062 bh
->inreq
->zero
= 1;
2063 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
2064 &bh
->inreq_busy
, &bh
->state
);
2065 fsg
->next_buffhd_to_fill
= bh
->next
;
2066 if (mod_data
.can_stall
)
2067 rc
= halt_bulk_in_endpoint(fsg
);
2071 /* We have processed all we want from the data the host has sent.
2072 * There may still be outstanding bulk-out requests. */
2073 case DATA_DIR_FROM_HOST
:
2074 if (fsg
->residue
== 0)
2075 ; // Nothing to receive
2077 /* Did the host stop sending unexpectedly early? */
2078 else if (fsg
->short_packet_received
) {
2079 raise_exception(fsg
, FSG_STATE_ABORT_BULK_OUT
);
2083 /* We haven't processed all the incoming data. Even though
2084 * we may be allowed to stall, doing so would cause a race.
2085 * The controller may already have ACK'ed all the remaining
2086 * bulk-out packets, in which case the host wouldn't see a
2087 * STALL. Not realizing the endpoint was halted, it wouldn't
2088 * clear the halt -- leading to problems later on. */
2090 else if (mod_data
.can_stall
) {
2091 fsg_set_halt(fsg
, fsg
->bulk_out
);
2092 raise_exception(fsg
, FSG_STATE_ABORT_BULK_OUT
);
2097 /* We can't stall. Read in the excess data and throw it
2100 rc
= throw_away_data(fsg
);
2107 static int send_status(struct fsg_dev
*fsg
)
2109 struct fsg_lun
*curlun
= fsg
->curlun
;
2110 struct fsg_buffhd
*bh
;
2112 u8 status
= USB_STATUS_PASS
;
2115 /* Wait for the next buffer to become available */
2116 bh
= fsg
->next_buffhd_to_fill
;
2117 while (bh
->state
!= BUF_STATE_EMPTY
) {
2118 rc
= sleep_thread(fsg
);
2124 sd
= curlun
->sense_data
;
2125 sdinfo
= curlun
->sense_data_info
;
2126 } else if (fsg
->bad_lun_okay
)
2129 sd
= SS_LOGICAL_UNIT_NOT_SUPPORTED
;
2131 if (fsg
->phase_error
) {
2132 DBG(fsg
, "sending phase-error status\n");
2133 status
= USB_STATUS_PHASE_ERROR
;
2134 sd
= SS_INVALID_COMMAND
;
2135 } else if (sd
!= SS_NO_SENSE
) {
2136 DBG(fsg
, "sending command-failure status\n");
2137 status
= USB_STATUS_FAIL
;
2138 VDBG(fsg
, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
2140 SK(sd
), ASC(sd
), ASCQ(sd
), sdinfo
);
2143 if (transport_is_bbb()) {
2144 struct bulk_cs_wrap
*csw
= bh
->buf
;
2146 /* Store and send the Bulk-only CSW */
2147 csw
->Signature
= cpu_to_le32(USB_BULK_CS_SIG
);
2148 csw
->Tag
= fsg
->tag
;
2149 csw
->Residue
= cpu_to_le32(fsg
->residue
);
2150 csw
->Status
= status
;
2152 bh
->inreq
->length
= USB_BULK_CS_WRAP_LEN
;
2153 bh
->inreq
->zero
= 0;
2154 start_transfer(fsg
, fsg
->bulk_in
, bh
->inreq
,
2155 &bh
->inreq_busy
, &bh
->state
);
2157 } else if (mod_data
.transport_type
== USB_PR_CB
) {
2159 /* Control-Bulk transport has no status phase! */
2162 } else { // USB_PR_CBI
2163 struct interrupt_data
*buf
= bh
->buf
;
2165 /* Store and send the Interrupt data. UFI sends the ASC
2166 * and ASCQ bytes. Everything else sends a Type (which
2167 * is always 0) and the status Value. */
2168 if (mod_data
.protocol_type
== USB_SC_UFI
) {
2169 buf
->bType
= ASC(sd
);
2170 buf
->bValue
= ASCQ(sd
);
2173 buf
->bValue
= status
;
2175 fsg
->intreq
->length
= CBI_INTERRUPT_DATA_LEN
;
2177 fsg
->intr_buffhd
= bh
; // Point to the right buffhd
2178 fsg
->intreq
->buf
= bh
->inreq
->buf
;
2179 fsg
->intreq
->context
= bh
;
2180 start_transfer(fsg
, fsg
->intr_in
, fsg
->intreq
,
2181 &fsg
->intreq_busy
, &bh
->state
);
2184 fsg
->next_buffhd_to_fill
= bh
->next
;
2189 /*-------------------------------------------------------------------------*/
2191 /* Check whether the command is properly formed and whether its data size
2192 * and direction agree with the values we already have. */
2193 static int check_command(struct fsg_dev
*fsg
, int cmnd_size
,
2194 enum data_direction data_dir
, unsigned int mask
,
2195 int needs_medium
, const char *name
)
2198 int lun
= fsg
->cmnd
[1] >> 5;
2199 static const char dirletter
[4] = {'u', 'o', 'i', 'n'};
2201 struct fsg_lun
*curlun
;
2203 /* Adjust the expected cmnd_size for protocol encapsulation padding.
2204 * Transparent SCSI doesn't pad. */
2205 if (protocol_is_scsi())
2208 /* There's some disagreement as to whether RBC pads commands or not.
2209 * We'll play it safe and accept either form. */
2210 else if (mod_data
.protocol_type
== USB_SC_RBC
) {
2211 if (fsg
->cmnd_size
== 12)
2214 /* All the other protocols pad to 12 bytes */
2219 if (fsg
->data_dir
!= DATA_DIR_UNKNOWN
)
2220 sprintf(hdlen
, ", H%c=%u", dirletter
[(int) fsg
->data_dir
],
2222 VDBG(fsg
, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
2223 name
, cmnd_size
, dirletter
[(int) data_dir
],
2224 fsg
->data_size_from_cmnd
, fsg
->cmnd_size
, hdlen
);
2226 /* We can't reply at all until we know the correct data direction
2228 if (fsg
->data_size_from_cmnd
== 0)
2229 data_dir
= DATA_DIR_NONE
;
2230 if (fsg
->data_dir
== DATA_DIR_UNKNOWN
) { // CB or CBI
2231 fsg
->data_dir
= data_dir
;
2232 fsg
->data_size
= fsg
->data_size_from_cmnd
;
2234 } else { // Bulk-only
2235 if (fsg
->data_size
< fsg
->data_size_from_cmnd
) {
2237 /* Host data size < Device data size is a phase error.
2238 * Carry out the command, but only transfer as much
2239 * as we are allowed. */
2240 fsg
->data_size_from_cmnd
= fsg
->data_size
;
2241 fsg
->phase_error
= 1;
2244 fsg
->residue
= fsg
->usb_amount_left
= fsg
->data_size
;
2246 /* Conflicting data directions is a phase error */
2247 if (fsg
->data_dir
!= data_dir
&& fsg
->data_size_from_cmnd
> 0) {
2248 fsg
->phase_error
= 1;
2252 /* Verify the length of the command itself */
2253 if (cmnd_size
!= fsg
->cmnd_size
) {
2255 /* Special case workaround: There are plenty of buggy SCSI
2256 * implementations. Many have issues with cbw->Length
2257 * field passing a wrong command size. For those cases we
2258 * always try to work around the problem by using the length
2259 * sent by the host side provided it is at least as large
2260 * as the correct command length.
2261 * Examples of such cases would be MS-Windows, which issues
2262 * REQUEST SENSE with cbw->Length == 12 where it should
2263 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
2264 * REQUEST SENSE with cbw->Length == 10 where it should
2267 if (cmnd_size
<= fsg
->cmnd_size
) {
2268 DBG(fsg
, "%s is buggy! Expected length %d "
2269 "but we got %d\n", name
,
2270 cmnd_size
, fsg
->cmnd_size
);
2271 cmnd_size
= fsg
->cmnd_size
;
2273 fsg
->phase_error
= 1;
2278 /* Check that the LUN values are consistent */
2279 if (transport_is_bbb()) {
2280 if (fsg
->lun
!= lun
)
2281 DBG(fsg
, "using LUN %d from CBW, "
2282 "not LUN %d from CDB\n",
2285 fsg
->lun
= lun
; // Use LUN from the command
2288 if (fsg
->lun
< fsg
->nluns
) {
2289 fsg
->curlun
= curlun
= &fsg
->luns
[fsg
->lun
];
2290 if (fsg
->cmnd
[0] != REQUEST_SENSE
) {
2291 curlun
->sense_data
= SS_NO_SENSE
;
2292 curlun
->sense_data_info
= 0;
2293 curlun
->info_valid
= 0;
2296 fsg
->curlun
= curlun
= NULL
;
2297 fsg
->bad_lun_okay
= 0;
2299 /* INQUIRY and REQUEST SENSE commands are explicitly allowed
2300 * to use unsupported LUNs; all others may not. */
2301 if (fsg
->cmnd
[0] != INQUIRY
&&
2302 fsg
->cmnd
[0] != REQUEST_SENSE
) {
2303 DBG(fsg
, "unsupported LUN %d\n", fsg
->lun
);
2308 /* If a unit attention condition exists, only INQUIRY and
2309 * REQUEST SENSE commands are allowed; anything else must fail. */
2310 if (curlun
&& curlun
->unit_attention_data
!= SS_NO_SENSE
&&
2311 fsg
->cmnd
[0] != INQUIRY
&&
2312 fsg
->cmnd
[0] != REQUEST_SENSE
) {
2313 curlun
->sense_data
= curlun
->unit_attention_data
;
2314 curlun
->unit_attention_data
= SS_NO_SENSE
;
2318 /* Check that only command bytes listed in the mask are non-zero */
2319 fsg
->cmnd
[1] &= 0x1f; // Mask away the LUN
2320 for (i
= 1; i
< cmnd_size
; ++i
) {
2321 if (fsg
->cmnd
[i
] && !(mask
& (1 << i
))) {
2323 curlun
->sense_data
= SS_INVALID_FIELD_IN_CDB
;
2328 /* If the medium isn't mounted and the command needs to access
2329 * it, return an error. */
2330 if (curlun
&& !fsg_lun_is_open(curlun
) && needs_medium
) {
2331 curlun
->sense_data
= SS_MEDIUM_NOT_PRESENT
;
2339 static int do_scsi_command(struct fsg_dev
*fsg
)
2341 struct fsg_buffhd
*bh
;
2343 int reply
= -EINVAL
;
2345 static char unknown
[16];
2349 /* Wait for the next buffer to become available for data or status */
2350 bh
= fsg
->next_buffhd_to_drain
= fsg
->next_buffhd_to_fill
;
2351 while (bh
->state
!= BUF_STATE_EMPTY
) {
2352 rc
= sleep_thread(fsg
);
2356 fsg
->phase_error
= 0;
2357 fsg
->short_packet_received
= 0;
2359 down_read(&fsg
->filesem
); // We're using the backing file
2360 switch (fsg
->cmnd
[0]) {
2363 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2364 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2367 reply
= do_inquiry(fsg
, bh
);
2371 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2372 if ((reply
= check_command(fsg
, 6, DATA_DIR_FROM_HOST
,
2374 "MODE SELECT(6)")) == 0)
2375 reply
= do_mode_select(fsg
, bh
);
2378 case MODE_SELECT_10
:
2379 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2380 if ((reply
= check_command(fsg
, 10, DATA_DIR_FROM_HOST
,
2382 "MODE SELECT(10)")) == 0)
2383 reply
= do_mode_select(fsg
, bh
);
2387 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2388 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2389 (1<<1) | (1<<2) | (1<<4), 0,
2390 "MODE SENSE(6)")) == 0)
2391 reply
= do_mode_sense(fsg
, bh
);
2395 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2396 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2397 (1<<1) | (1<<2) | (3<<7), 0,
2398 "MODE SENSE(10)")) == 0)
2399 reply
= do_mode_sense(fsg
, bh
);
2402 case ALLOW_MEDIUM_REMOVAL
:
2403 fsg
->data_size_from_cmnd
= 0;
2404 if ((reply
= check_command(fsg
, 6, DATA_DIR_NONE
,
2406 "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
2407 reply
= do_prevent_allow(fsg
);
2412 fsg
->data_size_from_cmnd
= (i
== 0 ? 256 : i
) << 9;
2413 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2416 reply
= do_read(fsg
);
2420 fsg
->data_size_from_cmnd
=
2421 get_unaligned_be16(&fsg
->cmnd
[7]) << 9;
2422 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2423 (1<<1) | (0xf<<2) | (3<<7), 1,
2425 reply
= do_read(fsg
);
2429 fsg
->data_size_from_cmnd
=
2430 get_unaligned_be32(&fsg
->cmnd
[6]) << 9;
2431 if ((reply
= check_command(fsg
, 12, DATA_DIR_TO_HOST
,
2432 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2434 reply
= do_read(fsg
);
2438 fsg
->data_size_from_cmnd
= 8;
2439 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2440 (0xf<<2) | (1<<8), 1,
2441 "READ CAPACITY")) == 0)
2442 reply
= do_read_capacity(fsg
, bh
);
2446 if (!mod_data
.cdrom
)
2448 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2449 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2450 (3<<7) | (0x1f<<1), 1,
2451 "READ HEADER")) == 0)
2452 reply
= do_read_header(fsg
, bh
);
2456 if (!mod_data
.cdrom
)
2458 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2459 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2462 reply
= do_read_toc(fsg
, bh
);
2465 case READ_FORMAT_CAPACITIES
:
2466 fsg
->data_size_from_cmnd
= get_unaligned_be16(&fsg
->cmnd
[7]);
2467 if ((reply
= check_command(fsg
, 10, DATA_DIR_TO_HOST
,
2469 "READ FORMAT CAPACITIES")) == 0)
2470 reply
= do_read_format_capacities(fsg
, bh
);
2474 fsg
->data_size_from_cmnd
= fsg
->cmnd
[4];
2475 if ((reply
= check_command(fsg
, 6, DATA_DIR_TO_HOST
,
2477 "REQUEST SENSE")) == 0)
2478 reply
= do_request_sense(fsg
, bh
);
2482 fsg
->data_size_from_cmnd
= 0;
2483 if ((reply
= check_command(fsg
, 6, DATA_DIR_NONE
,
2485 "START-STOP UNIT")) == 0)
2486 reply
= do_start_stop(fsg
);
2489 case SYNCHRONIZE_CACHE
:
2490 fsg
->data_size_from_cmnd
= 0;
2491 if ((reply
= check_command(fsg
, 10, DATA_DIR_NONE
,
2492 (0xf<<2) | (3<<7), 1,
2493 "SYNCHRONIZE CACHE")) == 0)
2494 reply
= do_synchronize_cache(fsg
);
2497 case TEST_UNIT_READY
:
2498 fsg
->data_size_from_cmnd
= 0;
2499 reply
= check_command(fsg
, 6, DATA_DIR_NONE
,
2504 /* Although optional, this command is used by MS-Windows. We
2505 * support a minimal version: BytChk must be 0. */
2507 fsg
->data_size_from_cmnd
= 0;
2508 if ((reply
= check_command(fsg
, 10, DATA_DIR_NONE
,
2509 (1<<1) | (0xf<<2) | (3<<7), 1,
2511 reply
= do_verify(fsg
);
2516 fsg
->data_size_from_cmnd
= (i
== 0 ? 256 : i
) << 9;
2517 if ((reply
= check_command(fsg
, 6, DATA_DIR_FROM_HOST
,
2520 reply
= do_write(fsg
);
2524 fsg
->data_size_from_cmnd
=
2525 get_unaligned_be16(&fsg
->cmnd
[7]) << 9;
2526 if ((reply
= check_command(fsg
, 10, DATA_DIR_FROM_HOST
,
2527 (1<<1) | (0xf<<2) | (3<<7), 1,
2529 reply
= do_write(fsg
);
2533 fsg
->data_size_from_cmnd
=
2534 get_unaligned_be32(&fsg
->cmnd
[6]) << 9;
2535 if ((reply
= check_command(fsg
, 12, DATA_DIR_FROM_HOST
,
2536 (1<<1) | (0xf<<2) | (0xf<<6), 1,
2538 reply
= do_write(fsg
);
2541 /* Some mandatory commands that we recognize but don't implement.
2542 * They don't mean much in this setting. It's left as an exercise
2543 * for anyone interested to implement RESERVE and RELEASE in terms
2544 * of Posix locks. */
2548 case SEND_DIAGNOSTIC
:
2553 fsg
->data_size_from_cmnd
= 0;
2554 sprintf(unknown
, "Unknown x%02x", fsg
->cmnd
[0]);
2555 if ((reply
= check_command(fsg
, fsg
->cmnd_size
,
2556 DATA_DIR_UNKNOWN
, 0xff, 0, unknown
)) == 0) {
2557 fsg
->curlun
->sense_data
= SS_INVALID_COMMAND
;
2562 up_read(&fsg
->filesem
);
2564 if (reply
== -EINTR
|| signal_pending(current
))
2567 /* Set up the single reply buffer for finish_reply() */
2568 if (reply
== -EINVAL
)
2569 reply
= 0; // Error reply length
2570 if (reply
>= 0 && fsg
->data_dir
== DATA_DIR_TO_HOST
) {
2571 reply
= min((u32
) reply
, fsg
->data_size_from_cmnd
);
2572 bh
->inreq
->length
= reply
;
2573 bh
->state
= BUF_STATE_FULL
;
2574 fsg
->residue
-= reply
;
2575 } // Otherwise it's already set
2581 /*-------------------------------------------------------------------------*/
2583 static int received_cbw(struct fsg_dev
*fsg
, struct fsg_buffhd
*bh
)
2585 struct usb_request
*req
= bh
->outreq
;
2586 struct fsg_bulk_cb_wrap
*cbw
= req
->buf
;
2588 /* Was this a real packet? Should it be ignored? */
2589 if (req
->status
|| test_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
))
2592 /* Is the CBW valid? */
2593 if (req
->actual
!= USB_BULK_CB_WRAP_LEN
||
2594 cbw
->Signature
!= cpu_to_le32(
2596 DBG(fsg
, "invalid CBW: len %u sig 0x%x\n",
2598 le32_to_cpu(cbw
->Signature
));
2600 /* The Bulk-only spec says we MUST stall the IN endpoint
2601 * (6.6.1), so it's unavoidable. It also says we must
2602 * retain this state until the next reset, but there's
2603 * no way to tell the controller driver it should ignore
2604 * Clear-Feature(HALT) requests.
2606 * We aren't required to halt the OUT endpoint; instead
2607 * we can simply accept and discard any data received
2608 * until the next reset. */
2609 wedge_bulk_in_endpoint(fsg
);
2610 set_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2614 /* Is the CBW meaningful? */
2615 if (cbw
->Lun
>= FSG_MAX_LUNS
|| cbw
->Flags
& ~USB_BULK_IN_FLAG
||
2616 cbw
->Length
<= 0 || cbw
->Length
> MAX_COMMAND_SIZE
) {
2617 DBG(fsg
, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2619 cbw
->Lun
, cbw
->Flags
, cbw
->Length
);
2621 /* We can do anything we want here, so let's stall the
2622 * bulk pipes if we are allowed to. */
2623 if (mod_data
.can_stall
) {
2624 fsg_set_halt(fsg
, fsg
->bulk_out
);
2625 halt_bulk_in_endpoint(fsg
);
2630 /* Save the command for later */
2631 fsg
->cmnd_size
= cbw
->Length
;
2632 memcpy(fsg
->cmnd
, cbw
->CDB
, fsg
->cmnd_size
);
2633 if (cbw
->Flags
& USB_BULK_IN_FLAG
)
2634 fsg
->data_dir
= DATA_DIR_TO_HOST
;
2636 fsg
->data_dir
= DATA_DIR_FROM_HOST
;
2637 fsg
->data_size
= le32_to_cpu(cbw
->DataTransferLength
);
2638 if (fsg
->data_size
== 0)
2639 fsg
->data_dir
= DATA_DIR_NONE
;
2640 fsg
->lun
= cbw
->Lun
;
2641 fsg
->tag
= cbw
->Tag
;
2646 static int get_next_command(struct fsg_dev
*fsg
)
2648 struct fsg_buffhd
*bh
;
2651 if (transport_is_bbb()) {
2653 /* Wait for the next buffer to become available */
2654 bh
= fsg
->next_buffhd_to_fill
;
2655 while (bh
->state
!= BUF_STATE_EMPTY
) {
2656 rc
= sleep_thread(fsg
);
2661 /* Queue a request to read a Bulk-only CBW */
2662 set_bulk_out_req_length(fsg
, bh
, USB_BULK_CB_WRAP_LEN
);
2663 bh
->outreq
->short_not_ok
= 1;
2664 start_transfer(fsg
, fsg
->bulk_out
, bh
->outreq
,
2665 &bh
->outreq_busy
, &bh
->state
);
2667 /* We will drain the buffer in software, which means we
2668 * can reuse it for the next filling. No need to advance
2669 * next_buffhd_to_fill. */
2671 /* Wait for the CBW to arrive */
2672 while (bh
->state
!= BUF_STATE_FULL
) {
2673 rc
= sleep_thread(fsg
);
2678 rc
= received_cbw(fsg
, bh
);
2679 bh
->state
= BUF_STATE_EMPTY
;
2681 } else { // USB_PR_CB or USB_PR_CBI
2683 /* Wait for the next command to arrive */
2684 while (fsg
->cbbuf_cmnd_size
== 0) {
2685 rc
= sleep_thread(fsg
);
2690 /* Is the previous status interrupt request still busy?
2691 * The host is allowed to skip reading the status,
2692 * so we must cancel it. */
2693 if (fsg
->intreq_busy
)
2694 usb_ep_dequeue(fsg
->intr_in
, fsg
->intreq
);
2696 /* Copy the command and mark the buffer empty */
2697 fsg
->data_dir
= DATA_DIR_UNKNOWN
;
2698 spin_lock_irq(&fsg
->lock
);
2699 fsg
->cmnd_size
= fsg
->cbbuf_cmnd_size
;
2700 memcpy(fsg
->cmnd
, fsg
->cbbuf_cmnd
, fsg
->cmnd_size
);
2701 fsg
->cbbuf_cmnd_size
= 0;
2702 spin_unlock_irq(&fsg
->lock
);
2708 /*-------------------------------------------------------------------------*/
2710 static int enable_endpoint(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
2711 const struct usb_endpoint_descriptor
*d
)
2715 ep
->driver_data
= fsg
;
2716 rc
= usb_ep_enable(ep
, d
);
2718 ERROR(fsg
, "can't enable %s, result %d\n", ep
->name
, rc
);
2722 static int alloc_request(struct fsg_dev
*fsg
, struct usb_ep
*ep
,
2723 struct usb_request
**preq
)
2725 *preq
= usb_ep_alloc_request(ep
, GFP_ATOMIC
);
2728 ERROR(fsg
, "can't allocate request for %s\n", ep
->name
);
2733 * Reset interface setting and re-init endpoint state (toggle etc).
2734 * Call with altsetting < 0 to disable the interface. The only other
2735 * available altsetting is 0, which enables the interface.
2737 static int do_set_interface(struct fsg_dev
*fsg
, int altsetting
)
2741 const struct usb_endpoint_descriptor
*d
;
2744 DBG(fsg
, "reset interface\n");
2747 /* Deallocate the requests */
2748 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2749 struct fsg_buffhd
*bh
= &fsg
->buffhds
[i
];
2752 usb_ep_free_request(fsg
->bulk_in
, bh
->inreq
);
2756 usb_ep_free_request(fsg
->bulk_out
, bh
->outreq
);
2761 usb_ep_free_request(fsg
->intr_in
, fsg
->intreq
);
2765 /* Disable the endpoints */
2766 if (fsg
->bulk_in_enabled
) {
2767 usb_ep_disable(fsg
->bulk_in
);
2768 fsg
->bulk_in_enabled
= 0;
2770 if (fsg
->bulk_out_enabled
) {
2771 usb_ep_disable(fsg
->bulk_out
);
2772 fsg
->bulk_out_enabled
= 0;
2774 if (fsg
->intr_in_enabled
) {
2775 usb_ep_disable(fsg
->intr_in
);
2776 fsg
->intr_in_enabled
= 0;
2780 if (altsetting
< 0 || rc
!= 0)
2783 DBG(fsg
, "set interface %d\n", altsetting
);
2785 /* Enable the endpoints */
2786 d
= fsg_ep_desc(fsg
->gadget
,
2787 &fsg_fs_bulk_in_desc
, &fsg_hs_bulk_in_desc
);
2788 if ((rc
= enable_endpoint(fsg
, fsg
->bulk_in
, d
)) != 0)
2790 fsg
->bulk_in_enabled
= 1;
2792 d
= fsg_ep_desc(fsg
->gadget
,
2793 &fsg_fs_bulk_out_desc
, &fsg_hs_bulk_out_desc
);
2794 if ((rc
= enable_endpoint(fsg
, fsg
->bulk_out
, d
)) != 0)
2796 fsg
->bulk_out_enabled
= 1;
2797 fsg
->bulk_out_maxpacket
= le16_to_cpu(d
->wMaxPacketSize
);
2798 clear_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
);
2800 if (transport_is_cbi()) {
2801 d
= fsg_ep_desc(fsg
->gadget
,
2802 &fsg_fs_intr_in_desc
, &fsg_hs_intr_in_desc
);
2803 if ((rc
= enable_endpoint(fsg
, fsg
->intr_in
, d
)) != 0)
2805 fsg
->intr_in_enabled
= 1;
2808 /* Allocate the requests */
2809 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2810 struct fsg_buffhd
*bh
= &fsg
->buffhds
[i
];
2812 if ((rc
= alloc_request(fsg
, fsg
->bulk_in
, &bh
->inreq
)) != 0)
2814 if ((rc
= alloc_request(fsg
, fsg
->bulk_out
, &bh
->outreq
)) != 0)
2816 bh
->inreq
->buf
= bh
->outreq
->buf
= bh
->buf
;
2817 bh
->inreq
->context
= bh
->outreq
->context
= bh
;
2818 bh
->inreq
->complete
= bulk_in_complete
;
2819 bh
->outreq
->complete
= bulk_out_complete
;
2821 if (transport_is_cbi()) {
2822 if ((rc
= alloc_request(fsg
, fsg
->intr_in
, &fsg
->intreq
)) != 0)
2824 fsg
->intreq
->complete
= intr_in_complete
;
2828 for (i
= 0; i
< fsg
->nluns
; ++i
)
2829 fsg
->luns
[i
].unit_attention_data
= SS_RESET_OCCURRED
;
2835 * Change our operational configuration. This code must agree with the code
2836 * that returns config descriptors, and with interface altsetting code.
2838 * It's also responsible for power management interactions. Some
2839 * configurations might not work with our current power sources.
2840 * For now we just assume the gadget is always self-powered.
2842 static int do_set_config(struct fsg_dev
*fsg
, u8 new_config
)
2846 /* Disable the single interface */
2847 if (fsg
->config
!= 0) {
2848 DBG(fsg
, "reset config\n");
2850 rc
= do_set_interface(fsg
, -1);
2853 /* Enable the interface */
2854 if (new_config
!= 0) {
2855 fsg
->config
= new_config
;
2856 if ((rc
= do_set_interface(fsg
, 0)) != 0)
2857 fsg
->config
= 0; // Reset on errors
2861 switch (fsg
->gadget
->speed
) {
2862 case USB_SPEED_LOW
: speed
= "low"; break;
2863 case USB_SPEED_FULL
: speed
= "full"; break;
2864 case USB_SPEED_HIGH
: speed
= "high"; break;
2865 default: speed
= "?"; break;
2867 INFO(fsg
, "%s speed config #%d\n", speed
, fsg
->config
);
2874 /*-------------------------------------------------------------------------*/
2876 static void handle_exception(struct fsg_dev
*fsg
)
2882 struct fsg_buffhd
*bh
;
2883 enum fsg_state old_state
;
2885 struct fsg_lun
*curlun
;
2886 unsigned int exception_req_tag
;
2889 /* Clear the existing signals. Anything but SIGUSR1 is converted
2890 * into a high-priority EXIT exception. */
2892 sig
= dequeue_signal_lock(current
, ¤t
->blocked
, &info
);
2895 if (sig
!= SIGUSR1
) {
2896 if (fsg
->state
< FSG_STATE_EXIT
)
2897 DBG(fsg
, "Main thread exiting on signal\n");
2898 raise_exception(fsg
, FSG_STATE_EXIT
);
2902 /* Cancel all the pending transfers */
2903 if (fsg
->intreq_busy
)
2904 usb_ep_dequeue(fsg
->intr_in
, fsg
->intreq
);
2905 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2906 bh
= &fsg
->buffhds
[i
];
2908 usb_ep_dequeue(fsg
->bulk_in
, bh
->inreq
);
2909 if (bh
->outreq_busy
)
2910 usb_ep_dequeue(fsg
->bulk_out
, bh
->outreq
);
2913 /* Wait until everything is idle */
2915 num_active
= fsg
->intreq_busy
;
2916 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2917 bh
= &fsg
->buffhds
[i
];
2918 num_active
+= bh
->inreq_busy
+ bh
->outreq_busy
;
2920 if (num_active
== 0)
2922 if (sleep_thread(fsg
))
2926 /* Clear out the controller's fifos */
2927 if (fsg
->bulk_in_enabled
)
2928 usb_ep_fifo_flush(fsg
->bulk_in
);
2929 if (fsg
->bulk_out_enabled
)
2930 usb_ep_fifo_flush(fsg
->bulk_out
);
2931 if (fsg
->intr_in_enabled
)
2932 usb_ep_fifo_flush(fsg
->intr_in
);
2934 /* Reset the I/O buffer states and pointers, the SCSI
2935 * state, and the exception. Then invoke the handler. */
2936 spin_lock_irq(&fsg
->lock
);
2938 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
2939 bh
= &fsg
->buffhds
[i
];
2940 bh
->state
= BUF_STATE_EMPTY
;
2942 fsg
->next_buffhd_to_fill
= fsg
->next_buffhd_to_drain
=
2945 exception_req_tag
= fsg
->exception_req_tag
;
2946 new_config
= fsg
->new_config
;
2947 old_state
= fsg
->state
;
2949 if (old_state
== FSG_STATE_ABORT_BULK_OUT
)
2950 fsg
->state
= FSG_STATE_STATUS_PHASE
;
2952 for (i
= 0; i
< fsg
->nluns
; ++i
) {
2953 curlun
= &fsg
->luns
[i
];
2954 curlun
->prevent_medium_removal
= 0;
2955 curlun
->sense_data
= curlun
->unit_attention_data
=
2957 curlun
->sense_data_info
= 0;
2958 curlun
->info_valid
= 0;
2960 fsg
->state
= FSG_STATE_IDLE
;
2962 spin_unlock_irq(&fsg
->lock
);
2964 /* Carry out any extra actions required for the exception */
2965 switch (old_state
) {
2969 case FSG_STATE_ABORT_BULK_OUT
:
2971 spin_lock_irq(&fsg
->lock
);
2972 if (fsg
->state
== FSG_STATE_STATUS_PHASE
)
2973 fsg
->state
= FSG_STATE_IDLE
;
2974 spin_unlock_irq(&fsg
->lock
);
2977 case FSG_STATE_RESET
:
2978 /* In case we were forced against our will to halt a
2979 * bulk endpoint, clear the halt now. (The SuperH UDC
2980 * requires this.) */
2981 if (test_and_clear_bit(IGNORE_BULK_OUT
, &fsg
->atomic_bitflags
))
2982 usb_ep_clear_halt(fsg
->bulk_in
);
2984 if (transport_is_bbb()) {
2985 if (fsg
->ep0_req_tag
== exception_req_tag
)
2986 ep0_queue(fsg
); // Complete the status stage
2988 } else if (transport_is_cbi())
2989 send_status(fsg
); // Status by interrupt pipe
2991 /* Technically this should go here, but it would only be
2992 * a waste of time. Ditto for the INTERFACE_CHANGE and
2993 * CONFIG_CHANGE cases. */
2994 // for (i = 0; i < fsg->nluns; ++i)
2995 // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2998 case FSG_STATE_INTERFACE_CHANGE
:
2999 rc
= do_set_interface(fsg
, 0);
3000 if (fsg
->ep0_req_tag
!= exception_req_tag
)
3002 if (rc
!= 0) // STALL on errors
3003 fsg_set_halt(fsg
, fsg
->ep0
);
3004 else // Complete the status stage
3008 case FSG_STATE_CONFIG_CHANGE
:
3009 rc
= do_set_config(fsg
, new_config
);
3010 if (fsg
->ep0_req_tag
!= exception_req_tag
)
3012 if (rc
!= 0) // STALL on errors
3013 fsg_set_halt(fsg
, fsg
->ep0
);
3014 else // Complete the status stage
3018 case FSG_STATE_DISCONNECT
:
3019 for (i
= 0; i
< fsg
->nluns
; ++i
)
3020 fsg_lun_fsync_sub(fsg
->luns
+ i
);
3021 do_set_config(fsg
, 0); // Unconfigured state
3024 case FSG_STATE_EXIT
:
3025 case FSG_STATE_TERMINATED
:
3026 do_set_config(fsg
, 0); // Free resources
3027 spin_lock_irq(&fsg
->lock
);
3028 fsg
->state
= FSG_STATE_TERMINATED
; // Stop the thread
3029 spin_unlock_irq(&fsg
->lock
);
3035 /*-------------------------------------------------------------------------*/
3037 static int fsg_main_thread(void *fsg_
)
3039 struct fsg_dev
*fsg
= fsg_
;
3041 /* Allow the thread to be killed by a signal, but set the signal mask
3042 * to block everything but INT, TERM, KILL, and USR1. */
3043 allow_signal(SIGINT
);
3044 allow_signal(SIGTERM
);
3045 allow_signal(SIGKILL
);
3046 allow_signal(SIGUSR1
);
3048 /* Allow the thread to be frozen */
3051 /* Arrange for userspace references to be interpreted as kernel
3052 * pointers. That way we can pass a kernel pointer to a routine
3053 * that expects a __user pointer and it will work okay. */
3057 while (fsg
->state
!= FSG_STATE_TERMINATED
) {
3058 if (exception_in_progress(fsg
) || signal_pending(current
)) {
3059 handle_exception(fsg
);
3063 if (!fsg
->running
) {
3068 if (get_next_command(fsg
))
3071 spin_lock_irq(&fsg
->lock
);
3072 if (!exception_in_progress(fsg
))
3073 fsg
->state
= FSG_STATE_DATA_PHASE
;
3074 spin_unlock_irq(&fsg
->lock
);
3076 if (do_scsi_command(fsg
) || finish_reply(fsg
))
3079 spin_lock_irq(&fsg
->lock
);
3080 if (!exception_in_progress(fsg
))
3081 fsg
->state
= FSG_STATE_STATUS_PHASE
;
3082 spin_unlock_irq(&fsg
->lock
);
3084 if (send_status(fsg
))
3087 spin_lock_irq(&fsg
->lock
);
3088 if (!exception_in_progress(fsg
))
3089 fsg
->state
= FSG_STATE_IDLE
;
3090 spin_unlock_irq(&fsg
->lock
);
3093 spin_lock_irq(&fsg
->lock
);
3094 fsg
->thread_task
= NULL
;
3095 spin_unlock_irq(&fsg
->lock
);
3097 /* If we are exiting because of a signal, unregister the
3099 if (test_and_clear_bit(REGISTERED
, &fsg
->atomic_bitflags
))
3100 usb_gadget_unregister_driver(&fsg_driver
);
3102 /* Let the unbind and cleanup routines know the thread has exited */
3103 complete_and_exit(&fsg
->thread_notifier
, 0);
3107 /*-------------------------------------------------------------------------*/
3110 /* The write permissions and store_xxx pointers are set in fsg_bind() */
3111 static DEVICE_ATTR(ro
, 0444, fsg_show_ro
, NULL
);
3112 static DEVICE_ATTR(nofua
, 0644, fsg_show_nofua
, NULL
);
3113 static DEVICE_ATTR(file
, 0444, fsg_show_file
, NULL
);
3116 /*-------------------------------------------------------------------------*/
3118 static void fsg_release(struct kref
*ref
)
3120 struct fsg_dev
*fsg
= container_of(ref
, struct fsg_dev
, ref
);
3126 static void lun_release(struct device
*dev
)
3128 struct rw_semaphore
*filesem
= dev_get_drvdata(dev
);
3129 struct fsg_dev
*fsg
=
3130 container_of(filesem
, struct fsg_dev
, filesem
);
3132 kref_put(&fsg
->ref
, fsg_release
);
3135 static void /* __init_or_exit */ fsg_unbind(struct usb_gadget
*gadget
)
3137 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
3139 struct fsg_lun
*curlun
;
3140 struct usb_request
*req
= fsg
->ep0req
;
3142 DBG(fsg
, "unbind\n");
3143 clear_bit(REGISTERED
, &fsg
->atomic_bitflags
);
3145 /* Unregister the sysfs attribute files and the LUNs */
3146 for (i
= 0; i
< fsg
->nluns
; ++i
) {
3147 curlun
= &fsg
->luns
[i
];
3148 if (curlun
->registered
) {
3149 device_remove_file(&curlun
->dev
, &dev_attr_nofua
);
3150 device_remove_file(&curlun
->dev
, &dev_attr_ro
);
3151 device_remove_file(&curlun
->dev
, &dev_attr_file
);
3152 fsg_lun_close(curlun
);
3153 device_unregister(&curlun
->dev
);
3154 curlun
->registered
= 0;
3158 /* If the thread isn't already dead, tell it to exit now */
3159 if (fsg
->state
!= FSG_STATE_TERMINATED
) {
3160 raise_exception(fsg
, FSG_STATE_EXIT
);
3161 wait_for_completion(&fsg
->thread_notifier
);
3163 /* The cleanup routine waits for this completion also */
3164 complete(&fsg
->thread_notifier
);
3167 /* Free the data buffers */
3168 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
)
3169 kfree(fsg
->buffhds
[i
].buf
);
3171 /* Free the request and buffer for endpoint 0 */
3174 usb_ep_free_request(fsg
->ep0
, req
);
3177 set_gadget_data(gadget
, NULL
);
3181 static int __init
check_parameters(struct fsg_dev
*fsg
)
3186 /* Store the default values */
3187 mod_data
.transport_type
= USB_PR_BULK
;
3188 mod_data
.transport_name
= "Bulk-only";
3189 mod_data
.protocol_type
= USB_SC_SCSI
;
3190 mod_data
.protocol_name
= "Transparent SCSI";
3192 /* Some peripheral controllers are known not to be able to
3193 * halt bulk endpoints correctly. If one of them is present,
3196 if (gadget_is_at91(fsg
->gadget
))
3197 mod_data
.can_stall
= 0;
3199 if (mod_data
.release
== 0xffff) { // Parameter wasn't set
3200 gcnum
= usb_gadget_controller_number(fsg
->gadget
);
3202 mod_data
.release
= 0x0300 + gcnum
;
3204 WARNING(fsg
, "controller '%s' not recognized\n",
3206 mod_data
.release
= 0x0399;
3210 prot
= simple_strtol(mod_data
.protocol_parm
, NULL
, 0);
3212 #ifdef CONFIG_USB_FILE_STORAGE_TEST
3213 if (strnicmp(mod_data
.transport_parm
, "BBB", 10) == 0) {
3214 ; // Use default setting
3215 } else if (strnicmp(mod_data
.transport_parm
, "CB", 10) == 0) {
3216 mod_data
.transport_type
= USB_PR_CB
;
3217 mod_data
.transport_name
= "Control-Bulk";
3218 } else if (strnicmp(mod_data
.transport_parm
, "CBI", 10) == 0) {
3219 mod_data
.transport_type
= USB_PR_CBI
;
3220 mod_data
.transport_name
= "Control-Bulk-Interrupt";
3222 ERROR(fsg
, "invalid transport: %s\n", mod_data
.transport_parm
);
3226 if (strnicmp(mod_data
.protocol_parm
, "SCSI", 10) == 0 ||
3227 prot
== USB_SC_SCSI
) {
3228 ; // Use default setting
3229 } else if (strnicmp(mod_data
.protocol_parm
, "RBC", 10) == 0 ||
3230 prot
== USB_SC_RBC
) {
3231 mod_data
.protocol_type
= USB_SC_RBC
;
3232 mod_data
.protocol_name
= "RBC";
3233 } else if (strnicmp(mod_data
.protocol_parm
, "8020", 4) == 0 ||
3234 strnicmp(mod_data
.protocol_parm
, "ATAPI", 10) == 0 ||
3235 prot
== USB_SC_8020
) {
3236 mod_data
.protocol_type
= USB_SC_8020
;
3237 mod_data
.protocol_name
= "8020i (ATAPI)";
3238 } else if (strnicmp(mod_data
.protocol_parm
, "QIC", 3) == 0 ||
3239 prot
== USB_SC_QIC
) {
3240 mod_data
.protocol_type
= USB_SC_QIC
;
3241 mod_data
.protocol_name
= "QIC-157";
3242 } else if (strnicmp(mod_data
.protocol_parm
, "UFI", 10) == 0 ||
3243 prot
== USB_SC_UFI
) {
3244 mod_data
.protocol_type
= USB_SC_UFI
;
3245 mod_data
.protocol_name
= "UFI";
3246 } else if (strnicmp(mod_data
.protocol_parm
, "8070", 4) == 0 ||
3247 prot
== USB_SC_8070
) {
3248 mod_data
.protocol_type
= USB_SC_8070
;
3249 mod_data
.protocol_name
= "8070i";
3251 ERROR(fsg
, "invalid protocol: %s\n", mod_data
.protocol_parm
);
3255 mod_data
.buflen
&= PAGE_CACHE_MASK
;
3256 if (mod_data
.buflen
<= 0) {
3257 ERROR(fsg
, "invalid buflen\n");
3261 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
3263 /* Serial string handling.
3264 * On a real device, the serial string would be loaded
3265 * from permanent storage. */
3266 if (mod_data
.serial
) {
3271 * The CB[I] specification limits the serial string to
3272 * 12 uppercase hexadecimal characters.
3273 * BBB need at least 12 uppercase hexadecimal characters,
3274 * with a maximum of 126. */
3275 for (ch
= mod_data
.serial
; *ch
; ++ch
) {
3277 if ((*ch
< '0' || *ch
> '9') &&
3278 (*ch
< 'A' || *ch
> 'F')) { /* not uppercase hex */
3280 "Invalid serial string character: %c\n",
3286 (mod_data
.transport_type
== USB_PR_BULK
&& len
< 12) ||
3287 (mod_data
.transport_type
!= USB_PR_BULK
&& len
> 12)) {
3288 WARNING(fsg
, "Invalid serial string length!\n");
3291 fsg_strings
[FSG_STRING_SERIAL
- 1].s
= mod_data
.serial
;
3293 WARNING(fsg
, "No serial-number string provided!\n");
3295 device_desc
.iSerialNumber
= 0;
3302 static int __init
fsg_bind(struct usb_gadget
*gadget
)
3304 struct fsg_dev
*fsg
= the_fsg
;
3307 struct fsg_lun
*curlun
;
3309 struct usb_request
*req
;
3312 fsg
->gadget
= gadget
;
3313 set_gadget_data(gadget
, fsg
);
3314 fsg
->ep0
= gadget
->ep0
;
3315 fsg
->ep0
->driver_data
= fsg
;
3317 if ((rc
= check_parameters(fsg
)) != 0)
3320 if (mod_data
.removable
) { // Enable the store_xxx attributes
3321 dev_attr_file
.attr
.mode
= 0644;
3322 dev_attr_file
.store
= fsg_store_file
;
3323 if (!mod_data
.cdrom
) {
3324 dev_attr_ro
.attr
.mode
= 0644;
3325 dev_attr_ro
.store
= fsg_store_ro
;
3329 /* Only for removable media? */
3330 dev_attr_nofua
.attr
.mode
= 0644;
3331 dev_attr_nofua
.store
= fsg_store_nofua
;
3333 /* Find out how many LUNs there should be */
3336 i
= max(mod_data
.num_filenames
, 1u);
3337 if (i
> FSG_MAX_LUNS
) {
3338 ERROR(fsg
, "invalid number of LUNs: %d\n", i
);
3343 /* Create the LUNs, open their backing files, and register the
3344 * LUN devices in sysfs. */
3345 fsg
->luns
= kzalloc(i
* sizeof(struct fsg_lun
), GFP_KERNEL
);
3352 for (i
= 0; i
< fsg
->nluns
; ++i
) {
3353 curlun
= &fsg
->luns
[i
];
3354 curlun
->cdrom
= !!mod_data
.cdrom
;
3355 curlun
->ro
= mod_data
.cdrom
|| mod_data
.ro
[i
];
3356 curlun
->initially_ro
= curlun
->ro
;
3357 curlun
->removable
= mod_data
.removable
;
3358 curlun
->nofua
= mod_data
.nofua
[i
];
3359 curlun
->dev
.release
= lun_release
;
3360 curlun
->dev
.parent
= &gadget
->dev
;
3361 curlun
->dev
.driver
= &fsg_driver
.driver
;
3362 dev_set_drvdata(&curlun
->dev
, &fsg
->filesem
);
3363 dev_set_name(&curlun
->dev
,"%s-lun%d",
3364 dev_name(&gadget
->dev
), i
);
3366 kref_get(&fsg
->ref
);
3367 rc
= device_register(&curlun
->dev
);
3369 INFO(fsg
, "failed to register LUN%d: %d\n", i
, rc
);
3370 put_device(&curlun
->dev
);
3373 curlun
->registered
= 1;
3375 rc
= device_create_file(&curlun
->dev
, &dev_attr_ro
);
3378 rc
= device_create_file(&curlun
->dev
, &dev_attr_nofua
);
3381 rc
= device_create_file(&curlun
->dev
, &dev_attr_file
);
3385 if (mod_data
.file
[i
] && *mod_data
.file
[i
]) {
3386 rc
= fsg_lun_open(curlun
, mod_data
.file
[i
]);
3389 } else if (!mod_data
.removable
) {
3390 ERROR(fsg
, "no file given for LUN%d\n", i
);
3396 /* Find all the endpoints we will use */
3397 usb_ep_autoconfig_reset(gadget
);
3398 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_in_desc
);
3401 ep
->driver_data
= fsg
; // claim the endpoint
3404 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_bulk_out_desc
);
3407 ep
->driver_data
= fsg
; // claim the endpoint
3410 if (transport_is_cbi()) {
3411 ep
= usb_ep_autoconfig(gadget
, &fsg_fs_intr_in_desc
);
3414 ep
->driver_data
= fsg
; // claim the endpoint
3418 /* Fix up the descriptors */
3419 device_desc
.bMaxPacketSize0
= fsg
->ep0
->maxpacket
;
3420 device_desc
.idVendor
= cpu_to_le16(mod_data
.vendor
);
3421 device_desc
.idProduct
= cpu_to_le16(mod_data
.product
);
3422 device_desc
.bcdDevice
= cpu_to_le16(mod_data
.release
);
3424 i
= (transport_is_cbi() ? 3 : 2); // Number of endpoints
3425 fsg_intf_desc
.bNumEndpoints
= i
;
3426 fsg_intf_desc
.bInterfaceSubClass
= mod_data
.protocol_type
;
3427 fsg_intf_desc
.bInterfaceProtocol
= mod_data
.transport_type
;
3428 fsg_fs_function
[i
+ FSG_FS_FUNCTION_PRE_EP_ENTRIES
] = NULL
;
3430 if (gadget_is_dualspeed(gadget
)) {
3431 fsg_hs_function
[i
+ FSG_HS_FUNCTION_PRE_EP_ENTRIES
] = NULL
;
3433 /* Assume ep0 uses the same maxpacket value for both speeds */
3434 dev_qualifier
.bMaxPacketSize0
= fsg
->ep0
->maxpacket
;
3436 /* Assume endpoint addresses are the same for both speeds */
3437 fsg_hs_bulk_in_desc
.bEndpointAddress
=
3438 fsg_fs_bulk_in_desc
.bEndpointAddress
;
3439 fsg_hs_bulk_out_desc
.bEndpointAddress
=
3440 fsg_fs_bulk_out_desc
.bEndpointAddress
;
3441 fsg_hs_intr_in_desc
.bEndpointAddress
=
3442 fsg_fs_intr_in_desc
.bEndpointAddress
;
3445 if (gadget_is_otg(gadget
))
3446 fsg_otg_desc
.bmAttributes
|= USB_OTG_HNP
;
3450 /* Allocate the request and buffer for endpoint 0 */
3451 fsg
->ep0req
= req
= usb_ep_alloc_request(fsg
->ep0
, GFP_KERNEL
);
3454 req
->buf
= kmalloc(EP0_BUFSIZE
, GFP_KERNEL
);
3457 req
->complete
= ep0_complete
;
3459 /* Allocate the data buffers */
3460 for (i
= 0; i
< FSG_NUM_BUFFERS
; ++i
) {
3461 struct fsg_buffhd
*bh
= &fsg
->buffhds
[i
];
3463 /* Allocate for the bulk-in endpoint. We assume that
3464 * the buffer will also work with the bulk-out (and
3465 * interrupt-in) endpoint. */
3466 bh
->buf
= kmalloc(mod_data
.buflen
, GFP_KERNEL
);
3471 fsg
->buffhds
[FSG_NUM_BUFFERS
- 1].next
= &fsg
->buffhds
[0];
3473 /* This should reflect the actual gadget power source */
3474 usb_gadget_set_selfpowered(gadget
);
3476 snprintf(fsg_string_manufacturer
, sizeof fsg_string_manufacturer
,
3478 init_utsname()->sysname
, init_utsname()->release
,
3481 fsg
->thread_task
= kthread_create(fsg_main_thread
, fsg
,
3482 "file-storage-gadget");
3483 if (IS_ERR(fsg
->thread_task
)) {
3484 rc
= PTR_ERR(fsg
->thread_task
);
3488 INFO(fsg
, DRIVER_DESC
", version: " DRIVER_VERSION
"\n");
3489 INFO(fsg
, "Number of LUNs=%d\n", fsg
->nluns
);
3491 pathbuf
= kmalloc(PATH_MAX
, GFP_KERNEL
);
3492 for (i
= 0; i
< fsg
->nluns
; ++i
) {
3493 curlun
= &fsg
->luns
[i
];
3494 if (fsg_lun_is_open(curlun
)) {
3497 p
= d_path(&curlun
->filp
->f_path
,
3502 LINFO(curlun
, "ro=%d, nofua=%d, file: %s\n",
3503 curlun
->ro
, curlun
->nofua
, (p
? p
: "(error)"));
3508 DBG(fsg
, "transport=%s (x%02x)\n",
3509 mod_data
.transport_name
, mod_data
.transport_type
);
3510 DBG(fsg
, "protocol=%s (x%02x)\n",
3511 mod_data
.protocol_name
, mod_data
.protocol_type
);
3512 DBG(fsg
, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
3513 mod_data
.vendor
, mod_data
.product
, mod_data
.release
);
3514 DBG(fsg
, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n",
3515 mod_data
.removable
, mod_data
.can_stall
,
3516 mod_data
.cdrom
, mod_data
.buflen
);
3517 DBG(fsg
, "I/O thread pid: %d\n", task_pid_nr(fsg
->thread_task
));
3519 set_bit(REGISTERED
, &fsg
->atomic_bitflags
);
3521 /* Tell the thread to start working */
3522 wake_up_process(fsg
->thread_task
);
3526 ERROR(fsg
, "unable to autoconfigure all endpoints\n");
3530 fsg
->state
= FSG_STATE_TERMINATED
; // The thread is dead
3532 complete(&fsg
->thread_notifier
);
3537 /*-------------------------------------------------------------------------*/
3539 static void fsg_suspend(struct usb_gadget
*gadget
)
3541 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
3543 DBG(fsg
, "suspend\n");
3544 set_bit(SUSPENDED
, &fsg
->atomic_bitflags
);
3547 static void fsg_resume(struct usb_gadget
*gadget
)
3549 struct fsg_dev
*fsg
= get_gadget_data(gadget
);
3551 DBG(fsg
, "resume\n");
3552 clear_bit(SUSPENDED
, &fsg
->atomic_bitflags
);
3556 /*-------------------------------------------------------------------------*/
3558 static struct usb_gadget_driver fsg_driver
= {
3559 #ifdef CONFIG_USB_GADGET_DUALSPEED
3560 .speed
= USB_SPEED_HIGH
,
3562 .speed
= USB_SPEED_FULL
,
3564 .function
= (char *) fsg_string_product
,
3565 .unbind
= fsg_unbind
,
3566 .disconnect
= fsg_disconnect
,
3568 .suspend
= fsg_suspend
,
3569 .resume
= fsg_resume
,
3572 .name
= DRIVER_NAME
,
3573 .owner
= THIS_MODULE
,
3581 static int __init
fsg_alloc(void)
3583 struct fsg_dev
*fsg
;
3585 fsg
= kzalloc(sizeof *fsg
, GFP_KERNEL
);
3588 spin_lock_init(&fsg
->lock
);
3589 init_rwsem(&fsg
->filesem
);
3590 kref_init(&fsg
->ref
);
3591 init_completion(&fsg
->thread_notifier
);
3598 static int __init
fsg_init(void)
3601 struct fsg_dev
*fsg
;
3603 if ((rc
= fsg_alloc()) != 0)
3606 if ((rc
= usb_gadget_probe_driver(&fsg_driver
, fsg_bind
)) != 0)
3607 kref_put(&fsg
->ref
, fsg_release
);
3610 module_init(fsg_init
);
3613 static void __exit
fsg_cleanup(void)
3615 struct fsg_dev
*fsg
= the_fsg
;
3617 /* Unregister the driver iff the thread hasn't already done so */
3618 if (test_and_clear_bit(REGISTERED
, &fsg
->atomic_bitflags
))
3619 usb_gadget_unregister_driver(&fsg_driver
);
3621 /* Wait for the thread to finish up */
3622 wait_for_completion(&fsg
->thread_notifier
);
3624 kref_put(&fsg
->ref
, fsg_release
);
3626 module_exit(fsg_cleanup
);