Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
file_storage.c
Go to the documentation of this file.
1 /*
2  * file_storage.c -- File-backed USB Storage Gadget, for USB development
3  *
4  * Copyright (C) 2003-2008 Alan Stern
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  * notice, this list of conditions, and the following disclaimer,
12  * without modification.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  * notice, this list of conditions and the following disclaimer in the
15  * documentation and/or other materials provided with the distribution.
16  * 3. The names of the above-listed copyright holders may not be used
17  * to endorse or promote products derived from this software without
18  * specific prior written permission.
19  *
20  * ALTERNATIVELY, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") as published by the Free Software
22  * Foundation, either version 2 of that License or (at your option) any
23  * later version.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 
39 /*
40  * The File-backed Storage Gadget acts as a USB Mass Storage device,
41  * appearing to the host as a disk drive or as a CD-ROM drive. In addition
42  * to providing an example of a genuinely useful gadget driver for a USB
43  * device, it also illustrates a technique of double-buffering for increased
44  * throughput. Last but not least, it gives an easy way to probe the
45  * behavior of the Mass Storage drivers in a USB host.
46  *
47  * Backing storage is provided by a regular file or a block device, specified
48  * by the "file" module parameter. Access can be limited to read-only by
49  * setting the optional "ro" module parameter. (For CD-ROM emulation,
50  * access is always read-only.) The gadget will indicate that it has
51  * removable media if the optional "removable" module parameter is set.
52  *
53  * The gadget supports the Control-Bulk (CB), Control-Bulk-Interrupt (CBI),
54  * and Bulk-Only (also known as Bulk-Bulk-Bulk or BBB) transports, selected
55  * by the optional "transport" module parameter. It also supports the
56  * following protocols: RBC (0x01), ATAPI or SFF-8020i (0x02), QIC-157 (0c03),
57  * UFI (0x04), SFF-8070i (0x05), and transparent SCSI (0x06), selected by
58  * the optional "protocol" module parameter. In addition, the default
59  * Vendor ID, Product ID, release number and serial number can be overridden.
60  *
61  * There is support for multiple logical units (LUNs), each of which has
62  * its own backing file. The number of LUNs can be set using the optional
63  * "luns" module parameter (anywhere from 1 to 8), and the corresponding
64  * files are specified using comma-separated lists for "file" and "ro".
65  * The default number of LUNs is taken from the number of "file" elements;
66  * it is 1 if "file" is not given. If "removable" is not set then a backing
67  * file must be specified for each LUN. If it is set, then an unspecified
68  * or empty backing filename means the LUN's medium is not loaded. Ideally
69  * each LUN would be settable independently as a disk drive or a CD-ROM
70  * drive, but currently all LUNs have to be the same type. The CD-ROM
71  * emulation includes a single data track and no audio tracks; hence there
72  * need be only one backing file per LUN.
73  *
74  * Requirements are modest; only a bulk-in and a bulk-out endpoint are
75  * needed (an interrupt-out endpoint is also needed for CBI). The memory
76  * requirement amounts to two 16K buffers, size configurable by a parameter.
77  * Support is included for both full-speed and high-speed operation.
78  *
79  * Note that the driver is slightly non-portable in that it assumes a
80  * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
81  * interrupt-in endpoints. With most device controllers this isn't an
82  * issue, but there may be some with hardware restrictions that prevent
83  * a buffer from being used by more than one endpoint.
84  *
85  * Module options:
86  *
87  * file=filename[,filename...]
88  * Required if "removable" is not set, names of
89  * the files or block devices used for
90  * backing storage
91  * serial=HHHH... Required serial number (string of hex chars)
92  * ro=b[,b...] Default false, booleans for read-only access
93  * removable Default false, boolean for removable media
94  * luns=N Default N = number of filenames, number of
95  * LUNs to support
96  * nofua=b[,b...] Default false, booleans for ignore FUA flag
97  * in SCSI WRITE(10,12) commands
98  * stall Default determined according to the type of
99  * USB device controller (usually true),
100  * boolean to permit the driver to halt
101  * bulk endpoints
102  * cdrom Default false, boolean for whether to emulate
103  * a CD-ROM drive
104  * transport=XXX Default BBB, transport name (CB, CBI, or BBB)
105  * protocol=YYY Default SCSI, protocol name (RBC, 8020 or
106  * ATAPI, QIC, UFI, 8070, or SCSI;
107  * also 1 - 6)
108  * vendor=0xVVVV Default 0x0525 (NetChip), USB Vendor ID
109  * product=0xPPPP Default 0xa4a5 (FSG), USB Product ID
110  * release=0xRRRR Override the USB release number (bcdDevice)
111  * buflen=N Default N=16384, buffer size used (will be
112  * rounded down to a multiple of
113  * PAGE_CACHE_SIZE)
114  *
115  * If CONFIG_USB_FILE_STORAGE_TEST is not set, only the "file", "serial", "ro",
116  * "removable", "luns", "nofua", "stall", and "cdrom" options are available;
117  * default values are used for everything else.
118  *
119  * The pathnames of the backing files and the ro settings are available in
120  * the attribute files "file", "nofua", and "ro" in the lun<n> subdirectory of
121  * the gadget's sysfs directory. If the "removable" option is set, writing to
122  * these files will simulate ejecting/loading the medium (writing an empty
123  * line means eject) and adjusting a write-enable tab. Changes to the ro
124  * setting are not allowed when the medium is loaded or if CD-ROM emulation
125  * is being used.
126  *
127  * This gadget driver is heavily based on "Gadget Zero" by David Brownell.
128  * The driver's SCSI command interface was based on the "Information
129  * technology - Small Computer System Interface - 2" document from
130  * X3T9.2 Project 375D, Revision 10L, 7-SEP-93, available at
131  * <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>. The single exception
132  * is opcode 0x23 (READ FORMAT CAPACITIES), which was based on the
133  * "Universal Serial Bus Mass Storage Class UFI Command Specification"
134  * document, Revision 1.0, December 14, 1998, available at
135  * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
136  */
137 
138 
139 /*
140  * Driver Design
141  *
142  * The FSG driver is fairly straightforward. There is a main kernel
143  * thread that handles most of the work. Interrupt routines field
144  * callbacks from the controller driver: bulk- and interrupt-request
145  * completion notifications, endpoint-0 events, and disconnect events.
146  * Completion events are passed to the main thread by wakeup calls. Many
147  * ep0 requests are handled at interrupt time, but SetInterface,
148  * SetConfiguration, and device reset requests are forwarded to the
149  * thread in the form of "exceptions" using SIGUSR1 signals (since they
150  * should interrupt any ongoing file I/O operations).
151  *
152  * The thread's main routine implements the standard command/data/status
153  * parts of a SCSI interaction. It and its subroutines are full of tests
154  * for pending signals/exceptions -- all this polling is necessary since
155  * the kernel has no setjmp/longjmp equivalents. (Maybe this is an
156  * indication that the driver really wants to be running in userspace.)
157  * An important point is that so long as the thread is alive it keeps an
158  * open reference to the backing file. This will prevent unmounting
159  * the backing file's underlying filesystem and could cause problems
160  * during system shutdown, for example. To prevent such problems, the
161  * thread catches INT, TERM, and KILL signals and converts them into
162  * an EXIT exception.
163  *
164  * In normal operation the main thread is started during the gadget's
165  * fsg_bind() callback and stopped during fsg_unbind(). But it can also
166  * exit when it receives a signal, and there's no point leaving the
167  * gadget running when the thread is dead. So just before the thread
168  * exits, it deregisters the gadget driver. This makes things a little
169  * tricky: The driver is deregistered at two places, and the exiting
170  * thread can indirectly call fsg_unbind() which in turn can tell the
171  * thread to exit. The first problem is resolved through the use of the
172  * REGISTERED atomic bitflag; the driver will only be deregistered once.
173  * The second problem is resolved by having fsg_unbind() check
174  * fsg->state; it won't try to stop the thread if the state is already
175  * FSG_STATE_TERMINATED.
176  *
177  * To provide maximum throughput, the driver uses a circular pipeline of
178  * buffer heads (struct fsg_buffhd). In principle the pipeline can be
179  * arbitrarily long; in practice the benefits don't justify having more
180  * than 2 stages (i.e., double buffering). But it helps to think of the
181  * pipeline as being a long one. Each buffer head contains a bulk-in and
182  * a bulk-out request pointer (since the buffer can be used for both
183  * output and input -- directions always are given from the host's
184  * point of view) as well as a pointer to the buffer and various state
185  * variables.
186  *
187  * Use of the pipeline follows a simple protocol. There is a variable
188  * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
189  * At any time that buffer head may still be in use from an earlier
190  * request, so each buffer head has a state variable indicating whether
191  * it is EMPTY, FULL, or BUSY. Typical use involves waiting for the
192  * buffer head to be EMPTY, filling the buffer either by file I/O or by
193  * USB I/O (during which the buffer head is BUSY), and marking the buffer
194  * head FULL when the I/O is complete. Then the buffer will be emptied
195  * (again possibly by USB I/O, during which it is marked BUSY) and
196  * finally marked EMPTY again (possibly by a completion routine).
197  *
198  * A module parameter tells the driver to avoid stalling the bulk
199  * endpoints wherever the transport specification allows. This is
200  * necessary for some UDCs like the SuperH, which cannot reliably clear a
201  * halt on a bulk endpoint. However, under certain circumstances the
202  * Bulk-only specification requires a stall. In such cases the driver
203  * will halt the endpoint and set a flag indicating that it should clear
204  * the halt in software during the next device reset. Hopefully this
205  * will permit everything to work correctly. Furthermore, although the
206  * specification allows the bulk-out endpoint to halt when the host sends
207  * too much data, implementing this would cause an unavoidable race.
208  * The driver will always use the "no-stall" approach for OUT transfers.
209  *
210  * One subtle point concerns sending status-stage responses for ep0
211  * requests. Some of these requests, such as device reset, can involve
212  * interrupting an ongoing file I/O operation, which might take an
213  * arbitrarily long time. During that delay the host might give up on
214  * the original ep0 request and issue a new one. When that happens the
215  * driver should not notify the host about completion of the original
216  * request, as the host will no longer be waiting for it. So the driver
217  * assigns to each ep0 request a unique tag, and it keeps track of the
218  * tag value of the request associated with a long-running exception
219  * (device-reset, interface-change, or configuration-change). When the
220  * exception handler is finished, the status-stage response is submitted
221  * only if the current ep0 request tag is equal to the exception request
222  * tag. Thus only the most recently received ep0 request will get a
223  * status-stage response.
224  *
225  * Warning: This driver source file is too long. It ought to be split up
226  * into a header file plus about 3 separate .c files, to handle the details
227  * of the Gadget, USB Mass Storage, and SCSI protocols.
228  */
229 
230 
231 /* #define VERBOSE_DEBUG */
232 /* #define DUMP_MSGS */
233 
234 
235 #include <linux/blkdev.h>
236 #include <linux/completion.h>
237 #include <linux/dcache.h>
238 #include <linux/delay.h>
239 #include <linux/device.h>
240 #include <linux/fcntl.h>
241 #include <linux/file.h>
242 #include <linux/fs.h>
243 #include <linux/kref.h>
244 #include <linux/kthread.h>
245 #include <linux/limits.h>
246 #include <linux/module.h>
247 #include <linux/rwsem.h>
248 #include <linux/slab.h>
249 #include <linux/spinlock.h>
250 #include <linux/string.h>
251 #include <linux/freezer.h>
252 #include <linux/utsname.h>
253 
254 #include <linux/usb/composite.h>
255 #include <linux/usb/ch9.h>
256 #include <linux/usb/gadget.h>
257 
258 #include "gadget_chips.h"
259 
260 #define DRIVER_DESC "File-backed Storage Gadget"
261 #define DRIVER_NAME "g_file_storage"
262 #define DRIVER_VERSION "1 September 2010"
263 
264 static char fsg_string_manufacturer[64];
265 static const char fsg_string_product[] = DRIVER_DESC;
266 static const char fsg_string_config[] = "Self-powered";
267 static const char fsg_string_interface[] = "Mass Storage";
268 
269 
270 #include "storage_common.c"
271 
272 
274 MODULE_AUTHOR("Alan Stern");
275 MODULE_LICENSE("Dual BSD/GPL");
276 
277 /*
278  * This driver assumes self-powered hardware and has no way for users to
279  * trigger remote wakeup. It uses autoconfiguration to select endpoints
280  * and endpoint addresses.
281  */
282 
283 
284 /*-------------------------------------------------------------------------*/
285 
286 
287 /* Encapsulate the module parameter settings */
288 
289 static struct {
291  char *serial;
294  unsigned int num_filenames;
295  unsigned int num_ros;
296  unsigned int num_nofuas;
297  unsigned int nluns;
298 
299  bool removable;
300  bool can_stall;
301  bool cdrom;
302 
305  unsigned short vendor;
306  unsigned short product;
307  unsigned short release;
308  unsigned int buflen;
309 
314 
315 } mod_data = { // Default values
316  .transport_parm = "BBB",
317  .protocol_parm = "SCSI",
318  .removable = 0,
319  .can_stall = 1,
320  .cdrom = 0,
321  .vendor = FSG_VENDOR_ID,
322  .product = FSG_PRODUCT_ID,
323  .release = 0xffff, // Use controller chip type
324  .buflen = 16384,
325  };
326 
327 
328 module_param_array_named(file, mod_data.file, charp, &mod_data.num_filenames,
329  S_IRUGO);
330 MODULE_PARM_DESC(file, "names of backing files or devices");
331 
332 module_param_named(serial, mod_data.serial, charp, S_IRUGO);
333 MODULE_PARM_DESC(serial, "USB serial number");
334 
335 module_param_array_named(ro, mod_data.ro, bool, &mod_data.num_ros, S_IRUGO);
336 MODULE_PARM_DESC(ro, "true to force read-only");
337 
338 module_param_array_named(nofua, mod_data.nofua, bool, &mod_data.num_nofuas,
339  S_IRUGO);
340 MODULE_PARM_DESC(nofua, "true to ignore SCSI WRITE(10,12) FUA bit");
341 
342 module_param_named(luns, mod_data.nluns, uint, S_IRUGO);
343 MODULE_PARM_DESC(luns, "number of LUNs");
344 
345 module_param_named(removable, mod_data.removable, bool, S_IRUGO);
346 MODULE_PARM_DESC(removable, "true to simulate removable media");
347 
348 module_param_named(stall, mod_data.can_stall, bool, S_IRUGO);
349 MODULE_PARM_DESC(stall, "false to prevent bulk stalls");
350 
351 module_param_named(cdrom, mod_data.cdrom, bool, S_IRUGO);
352 MODULE_PARM_DESC(cdrom, "true to emulate cdrom instead of disk");
353 
354 /* In the non-TEST version, only the module parameters listed above
355  * are available. */
356 #ifdef CONFIG_USB_FILE_STORAGE_TEST
357 
358 module_param_named(transport, mod_data.transport_parm, charp, S_IRUGO);
359 MODULE_PARM_DESC(transport, "type of transport (BBB, CBI, or CB)");
360 
361 module_param_named(protocol, mod_data.protocol_parm, charp, S_IRUGO);
362 MODULE_PARM_DESC(protocol, "type of protocol (RBC, 8020, QIC, UFI, "
363  "8070, or SCSI)");
364 
365 module_param_named(vendor, mod_data.vendor, ushort, S_IRUGO);
366 MODULE_PARM_DESC(vendor, "USB Vendor ID");
367 
368 module_param_named(product, mod_data.product, ushort, S_IRUGO);
369 MODULE_PARM_DESC(product, "USB Product ID");
370 
371 module_param_named(release, mod_data.release, ushort, S_IRUGO);
372 MODULE_PARM_DESC(release, "USB release number");
373 
374 module_param_named(buflen, mod_data.buflen, uint, S_IRUGO);
375 MODULE_PARM_DESC(buflen, "I/O buffer size");
376 
377 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
378 
379 
380 /*
381  * These definitions will permit the compiler to avoid generating code for
382  * parts of the driver that aren't used in the non-TEST version. Even gcc
383  * can recognize when a test of a constant expression yields a dead code
384  * path.
385  */
386 
387 #ifdef CONFIG_USB_FILE_STORAGE_TEST
388 
389 #define transport_is_bbb() (mod_data.transport_type == USB_PR_BULK)
390 #define transport_is_cbi() (mod_data.transport_type == USB_PR_CBI)
391 #define protocol_is_scsi() (mod_data.protocol_type == USB_SC_SCSI)
392 
393 #else
394 
395 #define transport_is_bbb() 1
396 #define transport_is_cbi() 0
397 #define protocol_is_scsi() 1
398 
399 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
400 
401 
402 /*-------------------------------------------------------------------------*/
403 
404 
405 struct fsg_dev {
406  /* lock protects: state, all the req_busy's, and cbbuf_cmnd */
408  struct usb_gadget *gadget;
409 
410  /* filesem protects: backing files in use */
412 
413  /* reference counting: wait until all LUNs are released */
414  struct kref ref;
415 
416  struct usb_ep *ep0; // Handy copy of gadget->ep0
417  struct usb_request *ep0req; // For control responses
418  unsigned int ep0_req_tag;
419  const char *ep0req_name;
420 
421  struct usb_request *intreq; // For interrupt responses
424 
425  unsigned int bulk_out_maxpacket;
426  enum fsg_state state; // For exception handling
427  unsigned int exception_req_tag;
428 
430 
431  unsigned int running : 1;
432  unsigned int bulk_in_enabled : 1;
433  unsigned int bulk_out_enabled : 1;
434  unsigned int intr_in_enabled : 1;
435  unsigned int phase_error : 1;
436  unsigned int short_packet_received : 1;
437  unsigned int bad_lun_okay : 1;
438 
439  unsigned long atomic_bitflags;
440 #define REGISTERED 0
441 #define IGNORE_BULK_OUT 1
442 #define SUSPENDED 2
443 
444  struct usb_ep *bulk_in;
445  struct usb_ep *bulk_out;
446  struct usb_ep *intr_in;
447 
450 
454 
461  unsigned int lun;
464 
465  /* The CB protocol offers no way for a host to know when a command
466  * has completed. As a result the next command may arrive early,
467  * and we will still have to handle it. For that reason we need
468  * a buffer to store new commands when using CB (or CBI, which
469  * does not oblige a host to wait for command completion either). */
472 
473  unsigned int nluns;
474  struct fsg_lun *luns;
475  struct fsg_lun *curlun;
476  /* Must be the last entry */
477  struct fsg_buffhd buffhds[];
478 };
479 
480 typedef void (*fsg_routine_t)(struct fsg_dev *);
481 
482 static int exception_in_progress(struct fsg_dev *fsg)
483 {
484  return (fsg->state > FSG_STATE_IDLE);
485 }
486 
487 /* Make bulk-out requests be divisible by the maxpacket size */
488 static void set_bulk_out_req_length(struct fsg_dev *fsg,
489  struct fsg_buffhd *bh, unsigned int length)
490 {
491  unsigned int rem;
492 
494  rem = length % fsg->bulk_out_maxpacket;
495  if (rem > 0)
496  length += fsg->bulk_out_maxpacket - rem;
497  bh->outreq->length = length;
498 }
499 
500 static struct fsg_dev *the_fsg;
501 static struct usb_gadget_driver fsg_driver;
502 
503 
504 /*-------------------------------------------------------------------------*/
505 
506 static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
507 {
508  const char *name;
509 
510  if (ep == fsg->bulk_in)
511  name = "bulk-in";
512  else if (ep == fsg->bulk_out)
513  name = "bulk-out";
514  else
515  name = ep->name;
516  DBG(fsg, "%s set halt\n", name);
517  return usb_ep_set_halt(ep);
518 }
519 
520 
521 /*-------------------------------------------------------------------------*/
522 
523 /*
524  * DESCRIPTORS ... most are static, but strings and (full) configuration
525  * descriptors are built on demand. Also the (static) config and interface
526  * descriptors are adjusted during fsg_bind().
527  */
528 
529 /* There is only one configuration. */
530 #define CONFIG_VALUE 1
531 
532 static struct usb_device_descriptor
533 device_desc = {
534  .bLength = sizeof device_desc,
536 
537  .bcdUSB = cpu_to_le16(0x0200),
538  .bDeviceClass = USB_CLASS_PER_INTERFACE,
539 
540  /* The next three values can be overridden by module parameters */
541  .idVendor = cpu_to_le16(FSG_VENDOR_ID),
542  .idProduct = cpu_to_le16(FSG_PRODUCT_ID),
543  .bcdDevice = cpu_to_le16(0xffff),
544 
545  .iManufacturer = FSG_STRING_MANUFACTURER,
546  .iProduct = FSG_STRING_PRODUCT,
547  .iSerialNumber = FSG_STRING_SERIAL,
548  .bNumConfigurations = 1,
549 };
550 
551 static struct usb_config_descriptor
552 config_desc = {
553  .bLength = sizeof config_desc,
555 
556  /* wTotalLength computed by usb_gadget_config_buf() */
557  .bNumInterfaces = 1,
558  .bConfigurationValue = CONFIG_VALUE,
559  .iConfiguration = FSG_STRING_CONFIG,
561  .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2,
562 };
563 
564 
565 static struct usb_qualifier_descriptor
566 dev_qualifier = {
567  .bLength = sizeof dev_qualifier,
569 
570  .bcdUSB = cpu_to_le16(0x0200),
571  .bDeviceClass = USB_CLASS_PER_INTERFACE,
572 
573  .bNumConfigurations = 1,
574 };
575 
576 static int populate_bos(struct fsg_dev *fsg, u8 *buf)
577 {
578  memcpy(buf, &fsg_bos_desc, USB_DT_BOS_SIZE);
579  buf += USB_DT_BOS_SIZE;
580 
581  memcpy(buf, &fsg_ext_cap_desc, USB_DT_USB_EXT_CAP_SIZE);
583 
584  memcpy(buf, &fsg_ss_cap_desc, USB_DT_USB_SS_CAP_SIZE);
585 
588 }
589 
590 /*
591  * Config descriptors must agree with the code that sets configurations
592  * and with code managing interfaces and their altsettings. They must
593  * also handle different speeds and other-speed requests.
594  */
595 static int populate_config_buf(struct usb_gadget *gadget,
596  u8 *buf, u8 type, unsigned index)
597 {
598  enum usb_device_speed speed = gadget->speed;
599  int len;
600  const struct usb_descriptor_header **function;
601 
602  if (index > 0)
603  return -EINVAL;
604 
605  if (gadget_is_dualspeed(gadget) && type == USB_DT_OTHER_SPEED_CONFIG)
606  speed = (USB_SPEED_FULL + USB_SPEED_HIGH) - speed;
607  function = gadget_is_dualspeed(gadget) && speed == USB_SPEED_HIGH
608  ? (const struct usb_descriptor_header **)fsg_hs_function
609  : (const struct usb_descriptor_header **)fsg_fs_function;
610 
611  /* for now, don't advertise srp-only devices */
612  if (!gadget_is_otg(gadget))
613  function++;
614 
615  len = usb_gadget_config_buf(&config_desc, buf, EP0_BUFSIZE, function);
616  ((struct usb_config_descriptor *) buf)->bDescriptorType = type;
617  return len;
618 }
619 
620 
621 /*-------------------------------------------------------------------------*/
622 
623 /* These routines may be called in process context or in_irq */
624 
625 /* Caller must hold fsg->lock */
626 static void wakeup_thread(struct fsg_dev *fsg)
627 {
628  /* Tell the main thread that something has happened */
629  fsg->thread_wakeup_needed = 1;
630  if (fsg->thread_task)
631  wake_up_process(fsg->thread_task);
632 }
633 
634 
635 static void raise_exception(struct fsg_dev *fsg, enum fsg_state new_state)
636 {
637  unsigned long flags;
638 
639  /* Do nothing if a higher-priority exception is already in progress.
640  * If a lower-or-equal priority exception is in progress, preempt it
641  * and notify the main thread by sending it a signal. */
642  spin_lock_irqsave(&fsg->lock, flags);
643  if (fsg->state <= new_state) {
644  fsg->exception_req_tag = fsg->ep0_req_tag;
645  fsg->state = new_state;
646  if (fsg->thread_task)
648  fsg->thread_task);
649  }
650  spin_unlock_irqrestore(&fsg->lock, flags);
651 }
652 
653 
654 /*-------------------------------------------------------------------------*/
655 
656 /* The disconnect callback and ep0 routines. These always run in_irq,
657  * except that ep0_queue() is called in the main thread to acknowledge
658  * completion of various requests: set config, set interface, and
659  * Bulk-only device reset. */
660 
661 static void fsg_disconnect(struct usb_gadget *gadget)
662 {
663  struct fsg_dev *fsg = get_gadget_data(gadget);
664 
665  DBG(fsg, "disconnect or port reset\n");
666  raise_exception(fsg, FSG_STATE_DISCONNECT);
667 }
668 
669 
670 static int ep0_queue(struct fsg_dev *fsg)
671 {
672  int rc;
673 
674  rc = usb_ep_queue(fsg->ep0, fsg->ep0req, GFP_ATOMIC);
675  if (rc != 0 && rc != -ESHUTDOWN) {
676 
677  /* We can't do much more than wait for a reset */
678  WARNING(fsg, "error in submission: %s --> %d\n",
679  fsg->ep0->name, rc);
680  }
681  return rc;
682 }
683 
684 static void ep0_complete(struct usb_ep *ep, struct usb_request *req)
685 {
686  struct fsg_dev *fsg = ep->driver_data;
687 
688  if (req->actual > 0)
689  dump_msg(fsg, fsg->ep0req_name, req->buf, req->actual);
690  if (req->status || req->actual != req->length)
691  DBG(fsg, "%s --> %d, %u/%u\n", __func__,
692  req->status, req->actual, req->length);
693  if (req->status == -ECONNRESET) // Request was cancelled
694  usb_ep_fifo_flush(ep);
695 
696  if (req->status == 0 && req->context)
697  ((fsg_routine_t) (req->context))(fsg);
698 }
699 
700 
701 /*-------------------------------------------------------------------------*/
702 
703 /* Bulk and interrupt endpoint completion handlers.
704  * These always run in_irq. */
705 
706 static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
707 {
708  struct fsg_dev *fsg = ep->driver_data;
709  struct fsg_buffhd *bh = req->context;
710 
711  if (req->status || req->actual != req->length)
712  DBG(fsg, "%s --> %d, %u/%u\n", __func__,
713  req->status, req->actual, req->length);
714  if (req->status == -ECONNRESET) // Request was cancelled
715  usb_ep_fifo_flush(ep);
716 
717  /* Hold the lock while we update the request and buffer states */
718  smp_wmb();
719  spin_lock(&fsg->lock);
720  bh->inreq_busy = 0;
721  bh->state = BUF_STATE_EMPTY;
722  wakeup_thread(fsg);
723  spin_unlock(&fsg->lock);
724 }
725 
726 static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
727 {
728  struct fsg_dev *fsg = ep->driver_data;
729  struct fsg_buffhd *bh = req->context;
730 
731  dump_msg(fsg, "bulk-out", req->buf, req->actual);
732  if (req->status || req->actual != bh->bulk_out_intended_length)
733  DBG(fsg, "%s --> %d, %u/%u\n", __func__,
734  req->status, req->actual,
736  if (req->status == -ECONNRESET) // Request was cancelled
737  usb_ep_fifo_flush(ep);
738 
739  /* Hold the lock while we update the request and buffer states */
740  smp_wmb();
741  spin_lock(&fsg->lock);
742  bh->outreq_busy = 0;
743  bh->state = BUF_STATE_FULL;
744  wakeup_thread(fsg);
745  spin_unlock(&fsg->lock);
746 }
747 
748 
749 #ifdef CONFIG_USB_FILE_STORAGE_TEST
750 static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
751 {
752  struct fsg_dev *fsg = ep->driver_data;
753  struct fsg_buffhd *bh = req->context;
754 
755  if (req->status || req->actual != req->length)
756  DBG(fsg, "%s --> %d, %u/%u\n", __func__,
757  req->status, req->actual, req->length);
758  if (req->status == -ECONNRESET) // Request was cancelled
759  usb_ep_fifo_flush(ep);
760 
761  /* Hold the lock while we update the request and buffer states */
762  smp_wmb();
763  spin_lock(&fsg->lock);
764  fsg->intreq_busy = 0;
765  bh->state = BUF_STATE_EMPTY;
766  wakeup_thread(fsg);
767  spin_unlock(&fsg->lock);
768 }
769 
770 #else
771 static void intr_in_complete(struct usb_ep *ep, struct usb_request *req)
772 {}
773 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
774 
775 
776 /*-------------------------------------------------------------------------*/
777 
778 /* Ep0 class-specific handlers. These always run in_irq. */
779 
780 #ifdef CONFIG_USB_FILE_STORAGE_TEST
781 static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
782 {
783  struct usb_request *req = fsg->ep0req;
784  static u8 cbi_reset_cmnd[6] = {
785  SEND_DIAGNOSTIC, 4, 0xff, 0xff, 0xff, 0xff};
786 
787  /* Error in command transfer? */
788  if (req->status || req->length != req->actual ||
789  req->actual < 6 || req->actual > MAX_COMMAND_SIZE) {
790 
791  /* Not all controllers allow a protocol stall after
792  * receiving control-out data, but we'll try anyway. */
793  fsg_set_halt(fsg, fsg->ep0);
794  return; // Wait for reset
795  }
796 
797  /* Is it the special reset command? */
798  if (req->actual >= sizeof cbi_reset_cmnd &&
799  memcmp(req->buf, cbi_reset_cmnd,
800  sizeof cbi_reset_cmnd) == 0) {
801 
802  /* Raise an exception to stop the current operation
803  * and reinitialize our state. */
804  DBG(fsg, "cbi reset request\n");
805  raise_exception(fsg, FSG_STATE_RESET);
806  return;
807  }
808 
809  VDBG(fsg, "CB[I] accept device-specific command\n");
810  spin_lock(&fsg->lock);
811 
812  /* Save the command for later */
813  if (fsg->cbbuf_cmnd_size)
814  WARNING(fsg, "CB[I] overwriting previous command\n");
815  fsg->cbbuf_cmnd_size = req->actual;
816  memcpy(fsg->cbbuf_cmnd, req->buf, fsg->cbbuf_cmnd_size);
817 
818  wakeup_thread(fsg);
819  spin_unlock(&fsg->lock);
820 }
821 
822 #else
823 static void received_cbi_adsc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
824 {}
825 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
826 
827 
828 static int class_setup_req(struct fsg_dev *fsg,
829  const struct usb_ctrlrequest *ctrl)
830 {
831  struct usb_request *req = fsg->ep0req;
832  int value = -EOPNOTSUPP;
833  u16 w_index = le16_to_cpu(ctrl->wIndex);
834  u16 w_value = le16_to_cpu(ctrl->wValue);
835  u16 w_length = le16_to_cpu(ctrl->wLength);
836 
837  if (!fsg->config)
838  return value;
839 
840  /* Handle Bulk-only class-specific requests */
841  if (transport_is_bbb()) {
842  switch (ctrl->bRequest) {
843 
845  if (ctrl->bRequestType != (USB_DIR_OUT |
847  break;
848  if (w_index != 0 || w_value != 0 || w_length != 0) {
849  value = -EDOM;
850  break;
851  }
852 
853  /* Raise an exception to stop the current operation
854  * and reinitialize our state. */
855  DBG(fsg, "bulk reset request\n");
856  raise_exception(fsg, FSG_STATE_RESET);
857  value = DELAYED_STATUS;
858  break;
859 
860  case US_BULK_GET_MAX_LUN:
861  if (ctrl->bRequestType != (USB_DIR_IN |
863  break;
864  if (w_index != 0 || w_value != 0 || w_length != 1) {
865  value = -EDOM;
866  break;
867  }
868  VDBG(fsg, "get max LUN\n");
869  *(u8 *) req->buf = fsg->nluns - 1;
870  value = 1;
871  break;
872  }
873  }
874 
875  /* Handle CBI class-specific requests */
876  else {
877  switch (ctrl->bRequest) {
878 
880  if (ctrl->bRequestType != (USB_DIR_OUT |
882  break;
883  if (w_index != 0 || w_value != 0) {
884  value = -EDOM;
885  break;
886  }
887  if (w_length > MAX_COMMAND_SIZE) {
888  value = -EOVERFLOW;
889  break;
890  }
891  value = w_length;
892  fsg->ep0req->context = received_cbi_adsc;
893  break;
894  }
895  }
896 
897  if (value == -EOPNOTSUPP)
898  VDBG(fsg,
899  "unknown class-specific control req "
900  "%02x.%02x v%04x i%04x l%u\n",
901  ctrl->bRequestType, ctrl->bRequest,
902  le16_to_cpu(ctrl->wValue), w_index, w_length);
903  return value;
904 }
905 
906 
907 /*-------------------------------------------------------------------------*/
908 
909 /* Ep0 standard request handlers. These always run in_irq. */
910 
911 static int standard_setup_req(struct fsg_dev *fsg,
912  const struct usb_ctrlrequest *ctrl)
913 {
914  struct usb_request *req = fsg->ep0req;
915  int value = -EOPNOTSUPP;
916  u16 w_index = le16_to_cpu(ctrl->wIndex);
917  u16 w_value = le16_to_cpu(ctrl->wValue);
918 
919  /* Usually this just stores reply data in the pre-allocated ep0 buffer,
920  * but config change events will also reconfigure hardware. */
921  switch (ctrl->bRequest) {
922 
924  if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
926  break;
927  switch (w_value >> 8) {
928 
929  case USB_DT_DEVICE:
930  VDBG(fsg, "get device descriptor\n");
931  device_desc.bMaxPacketSize0 = fsg->ep0->maxpacket;
932  value = sizeof device_desc;
933  memcpy(req->buf, &device_desc, value);
934  break;
936  VDBG(fsg, "get device qualifier\n");
937  if (!gadget_is_dualspeed(fsg->gadget) ||
938  fsg->gadget->speed == USB_SPEED_SUPER)
939  break;
940  /*
941  * Assume ep0 uses the same maxpacket value for both
942  * speeds
943  */
944  dev_qualifier.bMaxPacketSize0 = fsg->ep0->maxpacket;
945  value = sizeof dev_qualifier;
946  memcpy(req->buf, &dev_qualifier, value);
947  break;
948 
950  VDBG(fsg, "get other-speed config descriptor\n");
951  if (!gadget_is_dualspeed(fsg->gadget) ||
952  fsg->gadget->speed == USB_SPEED_SUPER)
953  break;
954  goto get_config;
955  case USB_DT_CONFIG:
956  VDBG(fsg, "get configuration descriptor\n");
957 get_config:
958  value = populate_config_buf(fsg->gadget,
959  req->buf,
960  w_value >> 8,
961  w_value & 0xff);
962  break;
963 
964  case USB_DT_STRING:
965  VDBG(fsg, "get string descriptor\n");
966 
967  /* wIndex == language code */
968  value = usb_gadget_get_string(&fsg_stringtab,
969  w_value & 0xff, req->buf);
970  break;
971 
972  case USB_DT_BOS:
973  VDBG(fsg, "get bos descriptor\n");
974 
975  if (gadget_is_superspeed(fsg->gadget))
976  value = populate_bos(fsg, req->buf);
977  break;
978  }
979 
980  break;
981 
982  /* One config, two speeds */
984  if (ctrl->bRequestType != (USB_DIR_OUT | USB_TYPE_STANDARD |
986  break;
987  VDBG(fsg, "set configuration\n");
988  if (w_value == CONFIG_VALUE || w_value == 0) {
989  fsg->new_config = w_value;
990 
991  /* Raise an exception to wipe out previous transaction
992  * state (queued bufs, etc) and set the new config. */
993  raise_exception(fsg, FSG_STATE_CONFIG_CHANGE);
994  value = DELAYED_STATUS;
995  }
996  break;
998  if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
1000  break;
1001  VDBG(fsg, "get configuration\n");
1002  *(u8 *) req->buf = fsg->config;
1003  value = 1;
1004  break;
1005 
1006  case USB_REQ_SET_INTERFACE:
1009  break;
1010  if (fsg->config && w_index == 0) {
1011 
1012  /* Raise an exception to wipe out previous transaction
1013  * state (queued bufs, etc) and install the new
1014  * interface altsetting. */
1015  raise_exception(fsg, FSG_STATE_INTERFACE_CHANGE);
1016  value = DELAYED_STATUS;
1017  }
1018  break;
1019  case USB_REQ_GET_INTERFACE:
1020  if (ctrl->bRequestType != (USB_DIR_IN | USB_TYPE_STANDARD |
1022  break;
1023  if (!fsg->config)
1024  break;
1025  if (w_index != 0) {
1026  value = -EDOM;
1027  break;
1028  }
1029  VDBG(fsg, "get interface\n");
1030  *(u8 *) req->buf = 0;
1031  value = 1;
1032  break;
1033 
1034  default:
1035  VDBG(fsg,
1036  "unknown control req %02x.%02x v%04x i%04x l%u\n",
1037  ctrl->bRequestType, ctrl->bRequest,
1038  w_value, w_index, le16_to_cpu(ctrl->wLength));
1039  }
1040 
1041  return value;
1042 }
1043 
1044 
1045 static int fsg_setup(struct usb_gadget *gadget,
1046  const struct usb_ctrlrequest *ctrl)
1047 {
1048  struct fsg_dev *fsg = get_gadget_data(gadget);
1049  int rc;
1050  int w_length = le16_to_cpu(ctrl->wLength);
1051 
1052  ++fsg->ep0_req_tag; // Record arrival of a new request
1053  fsg->ep0req->context = NULL;
1054  fsg->ep0req->length = 0;
1055  dump_msg(fsg, "ep0-setup", (u8 *) ctrl, sizeof(*ctrl));
1056 
1057  if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS)
1058  rc = class_setup_req(fsg, ctrl);
1059  else
1060  rc = standard_setup_req(fsg, ctrl);
1061 
1062  /* Respond with data/status or defer until later? */
1063  if (rc >= 0 && rc != DELAYED_STATUS) {
1064  rc = min(rc, w_length);
1065  fsg->ep0req->length = rc;
1066  fsg->ep0req->zero = rc < w_length;
1067  fsg->ep0req_name = (ctrl->bRequestType & USB_DIR_IN ?
1068  "ep0-in" : "ep0-out");
1069  rc = ep0_queue(fsg);
1070  }
1071 
1072  /* Device either stalls (rc < 0) or reports success */
1073  return rc;
1074 }
1075 
1076 
1077 /*-------------------------------------------------------------------------*/
1078 
1079 /* All the following routines run in process context */
1080 
1081 
1082 /* Use this for bulk or interrupt transfers, not ep0 */
1083 static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
1084  struct usb_request *req, int *pbusy,
1085  enum fsg_buffer_state *state)
1086 {
1087  int rc;
1088 
1089  if (ep == fsg->bulk_in)
1090  dump_msg(fsg, "bulk-in", req->buf, req->length);
1091  else if (ep == fsg->intr_in)
1092  dump_msg(fsg, "intr-in", req->buf, req->length);
1093 
1094  spin_lock_irq(&fsg->lock);
1095  *pbusy = 1;
1096  *state = BUF_STATE_BUSY;
1097  spin_unlock_irq(&fsg->lock);
1098  rc = usb_ep_queue(ep, req, GFP_KERNEL);
1099  if (rc != 0) {
1100  *pbusy = 0;
1101  *state = BUF_STATE_EMPTY;
1102 
1103  /* We can't do much more than wait for a reset */
1104 
1105  /* Note: currently the net2280 driver fails zero-length
1106  * submissions if DMA is enabled. */
1107  if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
1108  req->length == 0))
1109  WARNING(fsg, "error in submission: %s --> %d\n",
1110  ep->name, rc);
1111  }
1112 }
1113 
1114 
1115 static int sleep_thread(struct fsg_dev *fsg)
1116 {
1117  int rc = 0;
1118 
1119  /* Wait until a signal arrives or we are woken up */
1120  for (;;) {
1121  try_to_freeze();
1123  if (signal_pending(current)) {
1124  rc = -EINTR;
1125  break;
1126  }
1127  if (fsg->thread_wakeup_needed)
1128  break;
1129  schedule();
1130  }
1132  fsg->thread_wakeup_needed = 0;
1133  return rc;
1134 }
1135 
1136 
1137 /*-------------------------------------------------------------------------*/
1138 
1139 static int do_read(struct fsg_dev *fsg)
1140 {
1141  struct fsg_lun *curlun = fsg->curlun;
1142  u32 lba;
1143  struct fsg_buffhd *bh;
1144  int rc;
1145  u32 amount_left;
1146  loff_t file_offset, file_offset_tmp;
1147  unsigned int amount;
1148  ssize_t nread;
1149 
1150  /* Get the starting Logical Block Address and check that it's
1151  * not too big */
1152  if (fsg->cmnd[0] == READ_6)
1153  lba = get_unaligned_be24(&fsg->cmnd[1]);
1154  else {
1155  lba = get_unaligned_be32(&fsg->cmnd[2]);
1156 
1157  /* We allow DPO (Disable Page Out = don't save data in the
1158  * cache) and FUA (Force Unit Access = don't read from the
1159  * cache), but we don't implement them. */
1160  if ((fsg->cmnd[1] & ~0x18) != 0) {
1162  return -EINVAL;
1163  }
1164  }
1165  if (lba >= curlun->num_sectors) {
1167  return -EINVAL;
1168  }
1169  file_offset = ((loff_t) lba) << curlun->blkbits;
1170 
1171  /* Carry out the file reads */
1172  amount_left = fsg->data_size_from_cmnd;
1173  if (unlikely(amount_left == 0))
1174  return -EIO; // No default reply
1175 
1176  for (;;) {
1177 
1178  /* Figure out how much we need to read:
1179  * Try to read the remaining amount.
1180  * But don't read more than the buffer size.
1181  * And don't try to read past the end of the file.
1182  */
1183  amount = min((unsigned int) amount_left, mod_data.buflen);
1184  amount = min((loff_t) amount,
1185  curlun->file_length - file_offset);
1186 
1187  /* Wait for the next buffer to become available */
1188  bh = fsg->next_buffhd_to_fill;
1189  while (bh->state != BUF_STATE_EMPTY) {
1190  rc = sleep_thread(fsg);
1191  if (rc)
1192  return rc;
1193  }
1194 
1195  /* If we were asked to read past the end of file,
1196  * end with an empty buffer. */
1197  if (amount == 0) {
1198  curlun->sense_data =
1200  curlun->sense_data_info = file_offset >> curlun->blkbits;
1201  curlun->info_valid = 1;
1202  bh->inreq->length = 0;
1203  bh->state = BUF_STATE_FULL;
1204  break;
1205  }
1206 
1207  /* Perform the read */
1208  file_offset_tmp = file_offset;
1209  nread = vfs_read(curlun->filp,
1210  (char __user *) bh->buf,
1211  amount, &file_offset_tmp);
1212  VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1213  (unsigned long long) file_offset,
1214  (int) nread);
1215  if (signal_pending(current))
1216  return -EINTR;
1217 
1218  if (nread < 0) {
1219  LDBG(curlun, "error in file read: %d\n",
1220  (int) nread);
1221  nread = 0;
1222  } else if (nread < amount) {
1223  LDBG(curlun, "partial file read: %d/%u\n",
1224  (int) nread, amount);
1225  nread = round_down(nread, curlun->blksize);
1226  }
1227  file_offset += nread;
1228  amount_left -= nread;
1229  fsg->residue -= nread;
1230 
1231  /* Except at the end of the transfer, nread will be
1232  * equal to the buffer size, which is divisible by the
1233  * bulk-in maxpacket size.
1234  */
1235  bh->inreq->length = nread;
1236  bh->state = BUF_STATE_FULL;
1237 
1238  /* If an error occurred, report it and its position */
1239  if (nread < amount) {
1241  curlun->sense_data_info = file_offset >> curlun->blkbits;
1242  curlun->info_valid = 1;
1243  break;
1244  }
1245 
1246  if (amount_left == 0)
1247  break; // No more left to read
1248 
1249  /* Send this buffer and go read some more */
1250  bh->inreq->zero = 0;
1251  start_transfer(fsg, fsg->bulk_in, bh->inreq,
1252  &bh->inreq_busy, &bh->state);
1253  fsg->next_buffhd_to_fill = bh->next;
1254  }
1255 
1256  return -EIO; // No default reply
1257 }
1258 
1259 
1260 /*-------------------------------------------------------------------------*/
1261 
1262 static int do_write(struct fsg_dev *fsg)
1263 {
1264  struct fsg_lun *curlun = fsg->curlun;
1265  u32 lba;
1266  struct fsg_buffhd *bh;
1267  int get_some_more;
1268  u32 amount_left_to_req, amount_left_to_write;
1269  loff_t usb_offset, file_offset, file_offset_tmp;
1270  unsigned int amount;
1271  ssize_t nwritten;
1272  int rc;
1273 
1274  if (curlun->ro) {
1275  curlun->sense_data = SS_WRITE_PROTECTED;
1276  return -EINVAL;
1277  }
1278  spin_lock(&curlun->filp->f_lock);
1279  curlun->filp->f_flags &= ~O_SYNC; // Default is not to wait
1280  spin_unlock(&curlun->filp->f_lock);
1281 
1282  /* Get the starting Logical Block Address and check that it's
1283  * not too big */
1284  if (fsg->cmnd[0] == WRITE_6)
1285  lba = get_unaligned_be24(&fsg->cmnd[1]);
1286  else {
1287  lba = get_unaligned_be32(&fsg->cmnd[2]);
1288 
1289  /* We allow DPO (Disable Page Out = don't save data in the
1290  * cache) and FUA (Force Unit Access = write directly to the
1291  * medium). We don't implement DPO; we implement FUA by
1292  * performing synchronous output. */
1293  if ((fsg->cmnd[1] & ~0x18) != 0) {
1295  return -EINVAL;
1296  }
1297  /* FUA */
1298  if (!curlun->nofua && (fsg->cmnd[1] & 0x08)) {
1299  spin_lock(&curlun->filp->f_lock);
1300  curlun->filp->f_flags |= O_DSYNC;
1301  spin_unlock(&curlun->filp->f_lock);
1302  }
1303  }
1304  if (lba >= curlun->num_sectors) {
1306  return -EINVAL;
1307  }
1308 
1309  /* Carry out the file writes */
1310  get_some_more = 1;
1311  file_offset = usb_offset = ((loff_t) lba) << curlun->blkbits;
1312  amount_left_to_req = amount_left_to_write = fsg->data_size_from_cmnd;
1313 
1314  while (amount_left_to_write > 0) {
1315 
1316  /* Queue a request for more data from the host */
1317  bh = fsg->next_buffhd_to_fill;
1318  if (bh->state == BUF_STATE_EMPTY && get_some_more) {
1319 
1320  /* Figure out how much we want to get:
1321  * Try to get the remaining amount,
1322  * but not more than the buffer size.
1323  */
1324  amount = min(amount_left_to_req, mod_data.buflen);
1325 
1326  /* Beyond the end of the backing file? */
1327  if (usb_offset >= curlun->file_length) {
1328  get_some_more = 0;
1329  curlun->sense_data =
1331  curlun->sense_data_info = usb_offset >> curlun->blkbits;
1332  curlun->info_valid = 1;
1333  continue;
1334  }
1335 
1336  /* Get the next buffer */
1337  usb_offset += amount;
1338  fsg->usb_amount_left -= amount;
1339  amount_left_to_req -= amount;
1340  if (amount_left_to_req == 0)
1341  get_some_more = 0;
1342 
1343  /* Except at the end of the transfer, amount will be
1344  * equal to the buffer size, which is divisible by
1345  * the bulk-out maxpacket size.
1346  */
1347  set_bulk_out_req_length(fsg, bh, amount);
1348  start_transfer(fsg, fsg->bulk_out, bh->outreq,
1349  &bh->outreq_busy, &bh->state);
1350  fsg->next_buffhd_to_fill = bh->next;
1351  continue;
1352  }
1353 
1354  /* Write the received data to the backing file */
1355  bh = fsg->next_buffhd_to_drain;
1356  if (bh->state == BUF_STATE_EMPTY && !get_some_more)
1357  break; // We stopped early
1358  if (bh->state == BUF_STATE_FULL) {
1359  smp_rmb();
1360  fsg->next_buffhd_to_drain = bh->next;
1361  bh->state = BUF_STATE_EMPTY;
1362 
1363  /* Did something go wrong with the transfer? */
1364  if (bh->outreq->status != 0) {
1366  curlun->sense_data_info = file_offset >> curlun->blkbits;
1367  curlun->info_valid = 1;
1368  break;
1369  }
1370 
1371  amount = bh->outreq->actual;
1372  if (curlun->file_length - file_offset < amount) {
1373  LERROR(curlun,
1374  "write %u @ %llu beyond end %llu\n",
1375  amount, (unsigned long long) file_offset,
1376  (unsigned long long) curlun->file_length);
1377  amount = curlun->file_length - file_offset;
1378  }
1379 
1380  /* Don't accept excess data. The spec doesn't say
1381  * what to do in this case. We'll ignore the error.
1382  */
1383  amount = min(amount, bh->bulk_out_intended_length);
1384 
1385  /* Don't write a partial block */
1386  amount = round_down(amount, curlun->blksize);
1387  if (amount == 0)
1388  goto empty_write;
1389 
1390  /* Perform the write */
1391  file_offset_tmp = file_offset;
1392  nwritten = vfs_write(curlun->filp,
1393  (char __user *) bh->buf,
1394  amount, &file_offset_tmp);
1395  VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
1396  (unsigned long long) file_offset,
1397  (int) nwritten);
1398  if (signal_pending(current))
1399  return -EINTR; // Interrupted!
1400 
1401  if (nwritten < 0) {
1402  LDBG(curlun, "error in file write: %d\n",
1403  (int) nwritten);
1404  nwritten = 0;
1405  } else if (nwritten < amount) {
1406  LDBG(curlun, "partial file write: %d/%u\n",
1407  (int) nwritten, amount);
1408  nwritten = round_down(nwritten, curlun->blksize);
1409  }
1410  file_offset += nwritten;
1411  amount_left_to_write -= nwritten;
1412  fsg->residue -= nwritten;
1413 
1414  /* If an error occurred, report it and its position */
1415  if (nwritten < amount) {
1416  curlun->sense_data = SS_WRITE_ERROR;
1417  curlun->sense_data_info = file_offset >> curlun->blkbits;
1418  curlun->info_valid = 1;
1419  break;
1420  }
1421 
1422  empty_write:
1423  /* Did the host decide to stop early? */
1424  if (bh->outreq->actual < bh->bulk_out_intended_length) {
1425  fsg->short_packet_received = 1;
1426  break;
1427  }
1428  continue;
1429  }
1430 
1431  /* Wait for something to happen */
1432  rc = sleep_thread(fsg);
1433  if (rc)
1434  return rc;
1435  }
1436 
1437  return -EIO; // No default reply
1438 }
1439 
1440 
1441 /*-------------------------------------------------------------------------*/
1442 
1443 static int do_synchronize_cache(struct fsg_dev *fsg)
1444 {
1445  struct fsg_lun *curlun = fsg->curlun;
1446  int rc;
1447 
1448  /* We ignore the requested LBA and write out all file's
1449  * dirty data buffers. */
1450  rc = fsg_lun_fsync_sub(curlun);
1451  if (rc)
1452  curlun->sense_data = SS_WRITE_ERROR;
1453  return 0;
1454 }
1455 
1456 
1457 /*-------------------------------------------------------------------------*/
1458 
1459 static void invalidate_sub(struct fsg_lun *curlun)
1460 {
1461  struct file *filp = curlun->filp;
1462  struct inode *inode = filp->f_path.dentry->d_inode;
1463  unsigned long rc;
1464 
1465  rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
1466  VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
1467 }
1468 
1469 static int do_verify(struct fsg_dev *fsg)
1470 {
1471  struct fsg_lun *curlun = fsg->curlun;
1472  u32 lba;
1473  u32 verification_length;
1474  struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
1475  loff_t file_offset, file_offset_tmp;
1476  u32 amount_left;
1477  unsigned int amount;
1478  ssize_t nread;
1479 
1480  /* Get the starting Logical Block Address and check that it's
1481  * not too big */
1482  lba = get_unaligned_be32(&fsg->cmnd[2]);
1483  if (lba >= curlun->num_sectors) {
1485  return -EINVAL;
1486  }
1487 
1488  /* We allow DPO (Disable Page Out = don't save data in the
1489  * cache) but we don't implement it. */
1490  if ((fsg->cmnd[1] & ~0x10) != 0) {
1492  return -EINVAL;
1493  }
1494 
1495  verification_length = get_unaligned_be16(&fsg->cmnd[7]);
1496  if (unlikely(verification_length == 0))
1497  return -EIO; // No default reply
1498 
1499  /* Prepare to carry out the file verify */
1500  amount_left = verification_length << curlun->blkbits;
1501  file_offset = ((loff_t) lba) << curlun->blkbits;
1502 
1503  /* Write out all the dirty buffers before invalidating them */
1504  fsg_lun_fsync_sub(curlun);
1505  if (signal_pending(current))
1506  return -EINTR;
1507 
1508  invalidate_sub(curlun);
1509  if (signal_pending(current))
1510  return -EINTR;
1511 
1512  /* Just try to read the requested blocks */
1513  while (amount_left > 0) {
1514 
1515  /* Figure out how much we need to read:
1516  * Try to read the remaining amount, but not more than
1517  * the buffer size.
1518  * And don't try to read past the end of the file.
1519  */
1520  amount = min((unsigned int) amount_left, mod_data.buflen);
1521  amount = min((loff_t) amount,
1522  curlun->file_length - file_offset);
1523  if (amount == 0) {
1524  curlun->sense_data =
1526  curlun->sense_data_info = file_offset >> curlun->blkbits;
1527  curlun->info_valid = 1;
1528  break;
1529  }
1530 
1531  /* Perform the read */
1532  file_offset_tmp = file_offset;
1533  nread = vfs_read(curlun->filp,
1534  (char __user *) bh->buf,
1535  amount, &file_offset_tmp);
1536  VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1537  (unsigned long long) file_offset,
1538  (int) nread);
1539  if (signal_pending(current))
1540  return -EINTR;
1541 
1542  if (nread < 0) {
1543  LDBG(curlun, "error in file verify: %d\n",
1544  (int) nread);
1545  nread = 0;
1546  } else if (nread < amount) {
1547  LDBG(curlun, "partial file verify: %d/%u\n",
1548  (int) nread, amount);
1549  nread = round_down(nread, curlun->blksize);
1550  }
1551  if (nread == 0) {
1553  curlun->sense_data_info = file_offset >> curlun->blkbits;
1554  curlun->info_valid = 1;
1555  break;
1556  }
1557  file_offset += nread;
1558  amount_left -= nread;
1559  }
1560  return 0;
1561 }
1562 
1563 
1564 /*-------------------------------------------------------------------------*/
1565 
1566 static int do_inquiry(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1567 {
1568  u8 *buf = (u8 *) bh->buf;
1569 
1570  static char vendor_id[] = "Linux ";
1571  static char product_disk_id[] = "File-Stor Gadget";
1572  static char product_cdrom_id[] = "File-CD Gadget ";
1573 
1574  if (!fsg->curlun) { // Unsupported LUNs are okay
1575  fsg->bad_lun_okay = 1;
1576  memset(buf, 0, 36);
1577  buf[0] = 0x7f; // Unsupported, no device-type
1578  buf[4] = 31; // Additional length
1579  return 36;
1580  }
1581 
1582  memset(buf, 0, 8);
1583  buf[0] = (mod_data.cdrom ? TYPE_ROM : TYPE_DISK);
1584  if (mod_data.removable)
1585  buf[1] = 0x80;
1586  buf[2] = 2; // ANSI SCSI level 2
1587  buf[3] = 2; // SCSI-2 INQUIRY data format
1588  buf[4] = 31; // Additional length
1589  // No special options
1590  sprintf(buf + 8, "%-8s%-16s%04x", vendor_id,
1591  (mod_data.cdrom ? product_cdrom_id :
1592  product_disk_id),
1593  mod_data.release);
1594  return 36;
1595 }
1596 
1597 
1598 static int do_request_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1599 {
1600  struct fsg_lun *curlun = fsg->curlun;
1601  u8 *buf = (u8 *) bh->buf;
1602  u32 sd, sdinfo;
1603  int valid;
1604 
1605  /*
1606  * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1607  *
1608  * If a REQUEST SENSE command is received from an initiator
1609  * with a pending unit attention condition (before the target
1610  * generates the contingent allegiance condition), then the
1611  * target shall either:
1612  * a) report any pending sense data and preserve the unit
1613  * attention condition on the logical unit, or,
1614  * b) report the unit attention condition, may discard any
1615  * pending sense data, and clear the unit attention
1616  * condition on the logical unit for that initiator.
1617  *
1618  * FSG normally uses option a); enable this code to use option b).
1619  */
1620 #if 0
1621  if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1622  curlun->sense_data = curlun->unit_attention_data;
1623  curlun->unit_attention_data = SS_NO_SENSE;
1624  }
1625 #endif
1626 
1627  if (!curlun) { // Unsupported LUNs are okay
1628  fsg->bad_lun_okay = 1;
1630  sdinfo = 0;
1631  valid = 0;
1632  } else {
1633  sd = curlun->sense_data;
1634  sdinfo = curlun->sense_data_info;
1635  valid = curlun->info_valid << 7;
1636  curlun->sense_data = SS_NO_SENSE;
1637  curlun->sense_data_info = 0;
1638  curlun->info_valid = 0;
1639  }
1640 
1641  memset(buf, 0, 18);
1642  buf[0] = valid | 0x70; // Valid, current error
1643  buf[2] = SK(sd);
1644  put_unaligned_be32(sdinfo, &buf[3]); /* Sense information */
1645  buf[7] = 18 - 8; // Additional sense length
1646  buf[12] = ASC(sd);
1647  buf[13] = ASCQ(sd);
1648  return 18;
1649 }
1650 
1651 
1652 static int do_read_capacity(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1653 {
1654  struct fsg_lun *curlun = fsg->curlun;
1655  u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
1656  int pmi = fsg->cmnd[8];
1657  u8 *buf = (u8 *) bh->buf;
1658 
1659  /* Check the PMI and LBA fields */
1660  if (pmi > 1 || (pmi == 0 && lba != 0)) {
1662  return -EINVAL;
1663  }
1664 
1665  put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1666  /* Max logical block */
1667  put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
1668  return 8;
1669 }
1670 
1671 
1672 static int do_read_header(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1673 {
1674  struct fsg_lun *curlun = fsg->curlun;
1675  int msf = fsg->cmnd[1] & 0x02;
1676  u32 lba = get_unaligned_be32(&fsg->cmnd[2]);
1677  u8 *buf = (u8 *) bh->buf;
1678 
1679  if ((fsg->cmnd[1] & ~0x02) != 0) { /* Mask away MSF */
1681  return -EINVAL;
1682  }
1683  if (lba >= curlun->num_sectors) {
1685  return -EINVAL;
1686  }
1687 
1688  memset(buf, 0, 8);
1689  buf[0] = 0x01; /* 2048 bytes of user data, rest is EC */
1690  store_cdrom_address(&buf[4], msf, lba);
1691  return 8;
1692 }
1693 
1694 
1695 static int do_read_toc(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1696 {
1697  struct fsg_lun *curlun = fsg->curlun;
1698  int msf = fsg->cmnd[1] & 0x02;
1699  int start_track = fsg->cmnd[6];
1700  u8 *buf = (u8 *) bh->buf;
1701 
1702  if ((fsg->cmnd[1] & ~0x02) != 0 || /* Mask away MSF */
1703  start_track > 1) {
1705  return -EINVAL;
1706  }
1707 
1708  memset(buf, 0, 20);
1709  buf[1] = (20-2); /* TOC data length */
1710  buf[2] = 1; /* First track number */
1711  buf[3] = 1; /* Last track number */
1712  buf[5] = 0x16; /* Data track, copying allowed */
1713  buf[6] = 0x01; /* Only track is number 1 */
1714  store_cdrom_address(&buf[8], msf, 0);
1715 
1716  buf[13] = 0x16; /* Lead-out track is data */
1717  buf[14] = 0xAA; /* Lead-out track number */
1718  store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1719  return 20;
1720 }
1721 
1722 
1723 static int do_mode_sense(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1724 {
1725  struct fsg_lun *curlun = fsg->curlun;
1726  int mscmnd = fsg->cmnd[0];
1727  u8 *buf = (u8 *) bh->buf;
1728  u8 *buf0 = buf;
1729  int pc, page_code;
1730  int changeable_values, all_pages;
1731  int valid_page = 0;
1732  int len, limit;
1733 
1734  if ((fsg->cmnd[1] & ~0x08) != 0) { // Mask away DBD
1736  return -EINVAL;
1737  }
1738  pc = fsg->cmnd[2] >> 6;
1739  page_code = fsg->cmnd[2] & 0x3f;
1740  if (pc == 3) {
1742  return -EINVAL;
1743  }
1744  changeable_values = (pc == 1);
1745  all_pages = (page_code == 0x3f);
1746 
1747  /* Write the mode parameter header. Fixed values are: default
1748  * medium type, no cache control (DPOFUA), and no block descriptors.
1749  * The only variable value is the WriteProtect bit. We will fill in
1750  * the mode data length later. */
1751  memset(buf, 0, 8);
1752  if (mscmnd == MODE_SENSE) {
1753  buf[2] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1754  buf += 4;
1755  limit = 255;
1756  } else { // MODE_SENSE_10
1757  buf[3] = (curlun->ro ? 0x80 : 0x00); // WP, DPOFUA
1758  buf += 8;
1759  limit = 65535; // Should really be mod_data.buflen
1760  }
1761 
1762  /* No block descriptors */
1763 
1764  /* The mode pages, in numerical order. The only page we support
1765  * is the Caching page. */
1766  if (page_code == 0x08 || all_pages) {
1767  valid_page = 1;
1768  buf[0] = 0x08; // Page code
1769  buf[1] = 10; // Page length
1770  memset(buf+2, 0, 10); // None of the fields are changeable
1771 
1772  if (!changeable_values) {
1773  buf[2] = 0x04; // Write cache enable,
1774  // Read cache not disabled
1775  // No cache retention priorities
1776  put_unaligned_be16(0xffff, &buf[4]);
1777  /* Don't disable prefetch */
1778  /* Minimum prefetch = 0 */
1779  put_unaligned_be16(0xffff, &buf[8]);
1780  /* Maximum prefetch */
1781  put_unaligned_be16(0xffff, &buf[10]);
1782  /* Maximum prefetch ceiling */
1783  }
1784  buf += 12;
1785  }
1786 
1787  /* Check that a valid page was requested and the mode data length
1788  * isn't too long. */
1789  len = buf - buf0;
1790  if (!valid_page || len > limit) {
1792  return -EINVAL;
1793  }
1794 
1795  /* Store the mode data length */
1796  if (mscmnd == MODE_SENSE)
1797  buf0[0] = len - 1;
1798  else
1799  put_unaligned_be16(len - 2, buf0);
1800  return len;
1801 }
1802 
1803 
1804 static int do_start_stop(struct fsg_dev *fsg)
1805 {
1806  struct fsg_lun *curlun = fsg->curlun;
1807  int loej, start;
1808 
1809  if (!mod_data.removable) {
1810  curlun->sense_data = SS_INVALID_COMMAND;
1811  return -EINVAL;
1812  }
1813 
1814  // int immed = fsg->cmnd[1] & 0x01;
1815  loej = fsg->cmnd[4] & 0x02;
1816  start = fsg->cmnd[4] & 0x01;
1817 
1818 #ifdef CONFIG_USB_FILE_STORAGE_TEST
1819  if ((fsg->cmnd[1] & ~0x01) != 0 || // Mask away Immed
1820  (fsg->cmnd[4] & ~0x03) != 0) { // Mask LoEj, Start
1822  return -EINVAL;
1823  }
1824 
1825  if (!start) {
1826 
1827  /* Are we allowed to unload the media? */
1828  if (curlun->prevent_medium_removal) {
1829  LDBG(curlun, "unload attempt prevented\n");
1831  return -EINVAL;
1832  }
1833  if (loej) { // Simulate an unload/eject
1834  up_read(&fsg->filesem);
1835  down_write(&fsg->filesem);
1836  fsg_lun_close(curlun);
1837  up_write(&fsg->filesem);
1838  down_read(&fsg->filesem);
1839  }
1840  } else {
1841 
1842  /* Our emulation doesn't support mounting; the medium is
1843  * available for use as soon as it is loaded. */
1844  if (!fsg_lun_is_open(curlun)) {
1846  return -EINVAL;
1847  }
1848  }
1849 #endif
1850  return 0;
1851 }
1852 
1853 
1854 static int do_prevent_allow(struct fsg_dev *fsg)
1855 {
1856  struct fsg_lun *curlun = fsg->curlun;
1857  int prevent;
1858 
1859  if (!mod_data.removable) {
1860  curlun->sense_data = SS_INVALID_COMMAND;
1861  return -EINVAL;
1862  }
1863 
1864  prevent = fsg->cmnd[4] & 0x01;
1865  if ((fsg->cmnd[4] & ~0x01) != 0) { // Mask away Prevent
1867  return -EINVAL;
1868  }
1869 
1870  if (curlun->prevent_medium_removal && !prevent)
1871  fsg_lun_fsync_sub(curlun);
1872  curlun->prevent_medium_removal = prevent;
1873  return 0;
1874 }
1875 
1876 
1877 static int do_read_format_capacities(struct fsg_dev *fsg,
1878  struct fsg_buffhd *bh)
1879 {
1880  struct fsg_lun *curlun = fsg->curlun;
1881  u8 *buf = (u8 *) bh->buf;
1882 
1883  buf[0] = buf[1] = buf[2] = 0;
1884  buf[3] = 8; // Only the Current/Maximum Capacity Descriptor
1885  buf += 4;
1886 
1887  put_unaligned_be32(curlun->num_sectors, &buf[0]);
1888  /* Number of blocks */
1889  put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
1890  buf[4] = 0x02; /* Current capacity */
1891  return 12;
1892 }
1893 
1894 
1895 static int do_mode_select(struct fsg_dev *fsg, struct fsg_buffhd *bh)
1896 {
1897  struct fsg_lun *curlun = fsg->curlun;
1898 
1899  /* We don't support MODE SELECT */
1900  curlun->sense_data = SS_INVALID_COMMAND;
1901  return -EINVAL;
1902 }
1903 
1904 
1905 /*-------------------------------------------------------------------------*/
1906 
1907 static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1908 {
1909  int rc;
1910 
1911  rc = fsg_set_halt(fsg, fsg->bulk_in);
1912  if (rc == -EAGAIN)
1913  VDBG(fsg, "delayed bulk-in endpoint halt\n");
1914  while (rc != 0) {
1915  if (rc != -EAGAIN) {
1916  WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1917  rc = 0;
1918  break;
1919  }
1920 
1921  /* Wait for a short time and then try again */
1922  if (msleep_interruptible(100) != 0)
1923  return -EINTR;
1924  rc = usb_ep_set_halt(fsg->bulk_in);
1925  }
1926  return rc;
1927 }
1928 
1929 static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1930 {
1931  int rc;
1932 
1933  DBG(fsg, "bulk-in set wedge\n");
1934  rc = usb_ep_set_wedge(fsg->bulk_in);
1935  if (rc == -EAGAIN)
1936  VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1937  while (rc != 0) {
1938  if (rc != -EAGAIN) {
1939  WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1940  rc = 0;
1941  break;
1942  }
1943 
1944  /* Wait for a short time and then try again */
1945  if (msleep_interruptible(100) != 0)
1946  return -EINTR;
1947  rc = usb_ep_set_wedge(fsg->bulk_in);
1948  }
1949  return rc;
1950 }
1951 
1952 static int throw_away_data(struct fsg_dev *fsg)
1953 {
1954  struct fsg_buffhd *bh;
1955  u32 amount;
1956  int rc;
1957 
1958  while ((bh = fsg->next_buffhd_to_drain)->state != BUF_STATE_EMPTY ||
1959  fsg->usb_amount_left > 0) {
1960 
1961  /* Throw away the data in a filled buffer */
1962  if (bh->state == BUF_STATE_FULL) {
1963  smp_rmb();
1964  bh->state = BUF_STATE_EMPTY;
1965  fsg->next_buffhd_to_drain = bh->next;
1966 
1967  /* A short packet or an error ends everything */
1968  if (bh->outreq->actual < bh->bulk_out_intended_length ||
1969  bh->outreq->status != 0) {
1970  raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
1971  return -EINTR;
1972  }
1973  continue;
1974  }
1975 
1976  /* Try to submit another request if we need one */
1977  bh = fsg->next_buffhd_to_fill;
1978  if (bh->state == BUF_STATE_EMPTY && fsg->usb_amount_left > 0) {
1979  amount = min(fsg->usb_amount_left,
1980  (u32) mod_data.buflen);
1981 
1982  /* Except at the end of the transfer, amount will be
1983  * equal to the buffer size, which is divisible by
1984  * the bulk-out maxpacket size.
1985  */
1986  set_bulk_out_req_length(fsg, bh, amount);
1987  start_transfer(fsg, fsg->bulk_out, bh->outreq,
1988  &bh->outreq_busy, &bh->state);
1989  fsg->next_buffhd_to_fill = bh->next;
1990  fsg->usb_amount_left -= amount;
1991  continue;
1992  }
1993 
1994  /* Otherwise wait for something to happen */
1995  rc = sleep_thread(fsg);
1996  if (rc)
1997  return rc;
1998  }
1999  return 0;
2000 }
2001 
2002 
2003 static int finish_reply(struct fsg_dev *fsg)
2004 {
2005  struct fsg_buffhd *bh = fsg->next_buffhd_to_fill;
2006  int rc = 0;
2007 
2008  switch (fsg->data_dir) {
2009  case DATA_DIR_NONE:
2010  break; // Nothing to send
2011 
2012  /* If we don't know whether the host wants to read or write,
2013  * this must be CB or CBI with an unknown command. We mustn't
2014  * try to send or receive any data. So stall both bulk pipes
2015  * if we can and wait for a reset. */
2016  case DATA_DIR_UNKNOWN:
2017  if (mod_data.can_stall) {
2018  fsg_set_halt(fsg, fsg->bulk_out);
2019  rc = halt_bulk_in_endpoint(fsg);
2020  }
2021  break;
2022 
2023  /* All but the last buffer of data must have already been sent */
2024  case DATA_DIR_TO_HOST:
2025  if (fsg->data_size == 0)
2026  ; // Nothing to send
2027 
2028  /* If there's no residue, simply send the last buffer */
2029  else if (fsg->residue == 0) {
2030  bh->inreq->zero = 0;
2031  start_transfer(fsg, fsg->bulk_in, bh->inreq,
2032  &bh->inreq_busy, &bh->state);
2033  fsg->next_buffhd_to_fill = bh->next;
2034  }
2035 
2036  /* There is a residue. For CB and CBI, simply mark the end
2037  * of the data with a short packet. However, if we are
2038  * allowed to stall, there was no data at all (residue ==
2039  * data_size), and the command failed (invalid LUN or
2040  * sense data is set), then halt the bulk-in endpoint
2041  * instead. */
2042  else if (!transport_is_bbb()) {
2043  if (mod_data.can_stall &&
2044  fsg->residue == fsg->data_size &&
2045  (!fsg->curlun || fsg->curlun->sense_data != SS_NO_SENSE)) {
2046  bh->state = BUF_STATE_EMPTY;
2047  rc = halt_bulk_in_endpoint(fsg);
2048  } else {
2049  bh->inreq->zero = 1;
2050  start_transfer(fsg, fsg->bulk_in, bh->inreq,
2051  &bh->inreq_busy, &bh->state);
2052  fsg->next_buffhd_to_fill = bh->next;
2053  }
2054  }
2055 
2056  /*
2057  * For Bulk-only, mark the end of the data with a short
2058  * packet. If we are allowed to stall, halt the bulk-in
2059  * endpoint. (Note: This violates the Bulk-Only Transport
2060  * specification, which requires us to pad the data if we
2061  * don't halt the endpoint. Presumably nobody will mind.)
2062  */
2063  else {
2064  bh->inreq->zero = 1;
2065  start_transfer(fsg, fsg->bulk_in, bh->inreq,
2066  &bh->inreq_busy, &bh->state);
2067  fsg->next_buffhd_to_fill = bh->next;
2068  if (mod_data.can_stall)
2069  rc = halt_bulk_in_endpoint(fsg);
2070  }
2071  break;
2072 
2073  /* We have processed all we want from the data the host has sent.
2074  * There may still be outstanding bulk-out requests. */
2075  case DATA_DIR_FROM_HOST:
2076  if (fsg->residue == 0)
2077  ; // Nothing to receive
2078 
2079  /* Did the host stop sending unexpectedly early? */
2080  else if (fsg->short_packet_received) {
2081  raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2082  rc = -EINTR;
2083  }
2084 
2085  /* We haven't processed all the incoming data. Even though
2086  * we may be allowed to stall, doing so would cause a race.
2087  * The controller may already have ACK'ed all the remaining
2088  * bulk-out packets, in which case the host wouldn't see a
2089  * STALL. Not realizing the endpoint was halted, it wouldn't
2090  * clear the halt -- leading to problems later on. */
2091 #if 0
2092  else if (mod_data.can_stall) {
2093  fsg_set_halt(fsg, fsg->bulk_out);
2094  raise_exception(fsg, FSG_STATE_ABORT_BULK_OUT);
2095  rc = -EINTR;
2096  }
2097 #endif
2098 
2099  /* We can't stall. Read in the excess data and throw it
2100  * all away. */
2101  else
2102  rc = throw_away_data(fsg);
2103  break;
2104  }
2105  return rc;
2106 }
2107 
2108 
2109 static int send_status(struct fsg_dev *fsg)
2110 {
2111  struct fsg_lun *curlun = fsg->curlun;
2112  struct fsg_buffhd *bh;
2113  int rc;
2115  u32 sd, sdinfo = 0;
2116 
2117  /* Wait for the next buffer to become available */
2118  bh = fsg->next_buffhd_to_fill;
2119  while (bh->state != BUF_STATE_EMPTY) {
2120  rc = sleep_thread(fsg);
2121  if (rc)
2122  return rc;
2123  }
2124 
2125  if (curlun) {
2126  sd = curlun->sense_data;
2127  sdinfo = curlun->sense_data_info;
2128  } else if (fsg->bad_lun_okay)
2129  sd = SS_NO_SENSE;
2130  else
2132 
2133  if (fsg->phase_error) {
2134  DBG(fsg, "sending phase-error status\n");
2135  status = US_BULK_STAT_PHASE;
2136  sd = SS_INVALID_COMMAND;
2137  } else if (sd != SS_NO_SENSE) {
2138  DBG(fsg, "sending command-failure status\n");
2139  status = US_BULK_STAT_FAIL;
2140  VDBG(fsg, " sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
2141  " info x%x\n",
2142  SK(sd), ASC(sd), ASCQ(sd), sdinfo);
2143  }
2144 
2145  if (transport_is_bbb()) {
2146  struct bulk_cs_wrap *csw = bh->buf;
2147 
2148  /* Store and send the Bulk-only CSW */
2150  csw->Tag = fsg->tag;
2151  csw->Residue = cpu_to_le32(fsg->residue);
2152  csw->Status = status;
2153 
2154  bh->inreq->length = US_BULK_CS_WRAP_LEN;
2155  bh->inreq->zero = 0;
2156  start_transfer(fsg, fsg->bulk_in, bh->inreq,
2157  &bh->inreq_busy, &bh->state);
2158 
2159  } else if (mod_data.transport_type == USB_PR_CB) {
2160 
2161  /* Control-Bulk transport has no status phase! */
2162  return 0;
2163 
2164  } else { // USB_PR_CBI
2165  struct interrupt_data *buf = bh->buf;
2166 
2167  /* Store and send the Interrupt data. UFI sends the ASC
2168  * and ASCQ bytes. Everything else sends a Type (which
2169  * is always 0) and the status Value. */
2170  if (mod_data.protocol_type == USB_SC_UFI) {
2171  buf->bType = ASC(sd);
2172  buf->bValue = ASCQ(sd);
2173  } else {
2174  buf->bType = 0;
2175  buf->bValue = status;
2176  }
2177  fsg->intreq->length = CBI_INTERRUPT_DATA_LEN;
2178 
2179  fsg->intr_buffhd = bh; // Point to the right buffhd
2180  fsg->intreq->buf = bh->inreq->buf;
2181  fsg->intreq->context = bh;
2182  start_transfer(fsg, fsg->intr_in, fsg->intreq,
2183  &fsg->intreq_busy, &bh->state);
2184  }
2185 
2186  fsg->next_buffhd_to_fill = bh->next;
2187  return 0;
2188 }
2189 
2190 
2191 /*-------------------------------------------------------------------------*/
2192 
2193 /* Check whether the command is properly formed and whether its data size
2194  * and direction agree with the values we already have. */
2195 static int check_command(struct fsg_dev *fsg, int cmnd_size,
2196  enum data_direction data_dir, unsigned int mask,
2197  int needs_medium, const char *name)
2198 {
2199  int i;
2200  int lun = fsg->cmnd[1] >> 5;
2201  static const char dirletter[4] = {'u', 'o', 'i', 'n'};
2202  char hdlen[20];
2203  struct fsg_lun *curlun;
2204 
2205  /* Adjust the expected cmnd_size for protocol encapsulation padding.
2206  * Transparent SCSI doesn't pad. */
2207  if (protocol_is_scsi())
2208  ;
2209 
2210  /* There's some disagreement as to whether RBC pads commands or not.
2211  * We'll play it safe and accept either form. */
2212  else if (mod_data.protocol_type == USB_SC_RBC) {
2213  if (fsg->cmnd_size == 12)
2214  cmnd_size = 12;
2215 
2216  /* All the other protocols pad to 12 bytes */
2217  } else
2218  cmnd_size = 12;
2219 
2220  hdlen[0] = 0;
2221  if (fsg->data_dir != DATA_DIR_UNKNOWN)
2222  sprintf(hdlen, ", H%c=%u", dirletter[(int) fsg->data_dir],
2223  fsg->data_size);
2224  VDBG(fsg, "SCSI command: %s; Dc=%d, D%c=%u; Hc=%d%s\n",
2225  name, cmnd_size, dirletter[(int) data_dir],
2226  fsg->data_size_from_cmnd, fsg->cmnd_size, hdlen);
2227 
2228  /* We can't reply at all until we know the correct data direction
2229  * and size. */
2230  if (fsg->data_size_from_cmnd == 0)
2231  data_dir = DATA_DIR_NONE;
2232  if (fsg->data_dir == DATA_DIR_UNKNOWN) { // CB or CBI
2233  fsg->data_dir = data_dir;
2234  fsg->data_size = fsg->data_size_from_cmnd;
2235 
2236  } else { // Bulk-only
2237  if (fsg->data_size < fsg->data_size_from_cmnd) {
2238 
2239  /* Host data size < Device data size is a phase error.
2240  * Carry out the command, but only transfer as much
2241  * as we are allowed. */
2242  fsg->data_size_from_cmnd = fsg->data_size;
2243  fsg->phase_error = 1;
2244  }
2245  }
2246  fsg->residue = fsg->usb_amount_left = fsg->data_size;
2247 
2248  /* Conflicting data directions is a phase error */
2249  if (fsg->data_dir != data_dir && fsg->data_size_from_cmnd > 0) {
2250  fsg->phase_error = 1;
2251  return -EINVAL;
2252  }
2253 
2254  /* Verify the length of the command itself */
2255  if (cmnd_size != fsg->cmnd_size) {
2256 
2257  /* Special case workaround: There are plenty of buggy SCSI
2258  * implementations. Many have issues with cbw->Length
2259  * field passing a wrong command size. For those cases we
2260  * always try to work around the problem by using the length
2261  * sent by the host side provided it is at least as large
2262  * as the correct command length.
2263  * Examples of such cases would be MS-Windows, which issues
2264  * REQUEST SENSE with cbw->Length == 12 where it should
2265  * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
2266  * REQUEST SENSE with cbw->Length == 10 where it should
2267  * be 6 as well.
2268  */
2269  if (cmnd_size <= fsg->cmnd_size) {
2270  DBG(fsg, "%s is buggy! Expected length %d "
2271  "but we got %d\n", name,
2272  cmnd_size, fsg->cmnd_size);
2273  cmnd_size = fsg->cmnd_size;
2274  } else {
2275  fsg->phase_error = 1;
2276  return -EINVAL;
2277  }
2278  }
2279 
2280  /* Check that the LUN values are consistent */
2281  if (transport_is_bbb()) {
2282  if (fsg->lun != lun)
2283  DBG(fsg, "using LUN %d from CBW, "
2284  "not LUN %d from CDB\n",
2285  fsg->lun, lun);
2286  }
2287 
2288  /* Check the LUN */
2289  curlun = fsg->curlun;
2290  if (curlun) {
2291  if (fsg->cmnd[0] != REQUEST_SENSE) {
2292  curlun->sense_data = SS_NO_SENSE;
2293  curlun->sense_data_info = 0;
2294  curlun->info_valid = 0;
2295  }
2296  } else {
2297  fsg->bad_lun_okay = 0;
2298 
2299  /* INQUIRY and REQUEST SENSE commands are explicitly allowed
2300  * to use unsupported LUNs; all others may not. */
2301  if (fsg->cmnd[0] != INQUIRY &&
2302  fsg->cmnd[0] != REQUEST_SENSE) {
2303  DBG(fsg, "unsupported LUN %d\n", fsg->lun);
2304  return -EINVAL;
2305  }
2306  }
2307 
2308  /* If a unit attention condition exists, only INQUIRY and
2309  * REQUEST SENSE commands are allowed; anything else must fail. */
2310  if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
2311  fsg->cmnd[0] != INQUIRY &&
2312  fsg->cmnd[0] != REQUEST_SENSE) {
2313  curlun->sense_data = curlun->unit_attention_data;
2314  curlun->unit_attention_data = SS_NO_SENSE;
2315  return -EINVAL;
2316  }
2317 
2318  /* Check that only command bytes listed in the mask are non-zero */
2319  fsg->cmnd[1] &= 0x1f; // Mask away the LUN
2320  for (i = 1; i < cmnd_size; ++i) {
2321  if (fsg->cmnd[i] && !(mask & (1 << i))) {
2322  if (curlun)
2324  return -EINVAL;
2325  }
2326  }
2327 
2328  /* If the medium isn't mounted and the command needs to access
2329  * it, return an error. */
2330  if (curlun && !fsg_lun_is_open(curlun) && needs_medium) {
2332  return -EINVAL;
2333  }
2334 
2335  return 0;
2336 }
2337 
2338 /* wrapper of check_command for data size in blocks handling */
2339 static int check_command_size_in_blocks(struct fsg_dev *fsg, int cmnd_size,
2340  enum data_direction data_dir, unsigned int mask,
2341  int needs_medium, const char *name)
2342 {
2343  if (fsg->curlun)
2344  fsg->data_size_from_cmnd <<= fsg->curlun->blkbits;
2345  return check_command(fsg, cmnd_size, data_dir,
2346  mask, needs_medium, name);
2347 }
2348 
2349 static int do_scsi_command(struct fsg_dev *fsg)
2350 {
2351  struct fsg_buffhd *bh;
2352  int rc;
2353  int reply = -EINVAL;
2354  int i;
2355  static char unknown[16];
2356 
2357  dump_cdb(fsg);
2358 
2359  /* Wait for the next buffer to become available for data or status */
2360  bh = fsg->next_buffhd_to_drain = fsg->next_buffhd_to_fill;
2361  while (bh->state != BUF_STATE_EMPTY) {
2362  rc = sleep_thread(fsg);
2363  if (rc)
2364  return rc;
2365  }
2366  fsg->phase_error = 0;
2367  fsg->short_packet_received = 0;
2368 
2369  down_read(&fsg->filesem); // We're using the backing file
2370  switch (fsg->cmnd[0]) {
2371 
2372  case INQUIRY:
2373  fsg->data_size_from_cmnd = fsg->cmnd[4];
2374  if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2375  (1<<4), 0,
2376  "INQUIRY")) == 0)
2377  reply = do_inquiry(fsg, bh);
2378  break;
2379 
2380  case MODE_SELECT:
2381  fsg->data_size_from_cmnd = fsg->cmnd[4];
2382  if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
2383  (1<<1) | (1<<4), 0,
2384  "MODE SELECT(6)")) == 0)
2385  reply = do_mode_select(fsg, bh);
2386  break;
2387 
2388  case MODE_SELECT_10:
2389  fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2390  if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
2391  (1<<1) | (3<<7), 0,
2392  "MODE SELECT(10)")) == 0)
2393  reply = do_mode_select(fsg, bh);
2394  break;
2395 
2396  case MODE_SENSE:
2397  fsg->data_size_from_cmnd = fsg->cmnd[4];
2398  if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2399  (1<<1) | (1<<2) | (1<<4), 0,
2400  "MODE SENSE(6)")) == 0)
2401  reply = do_mode_sense(fsg, bh);
2402  break;
2403 
2404  case MODE_SENSE_10:
2405  fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2406  if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2407  (1<<1) | (1<<2) | (3<<7), 0,
2408  "MODE SENSE(10)")) == 0)
2409  reply = do_mode_sense(fsg, bh);
2410  break;
2411 
2412  case ALLOW_MEDIUM_REMOVAL:
2413  fsg->data_size_from_cmnd = 0;
2414  if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2415  (1<<4), 0,
2416  "PREVENT-ALLOW MEDIUM REMOVAL")) == 0)
2417  reply = do_prevent_allow(fsg);
2418  break;
2419 
2420  case READ_6:
2421  i = fsg->cmnd[4];
2422  fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
2423  if ((reply = check_command_size_in_blocks(fsg, 6,
2425  (7<<1) | (1<<4), 1,
2426  "READ(6)")) == 0)
2427  reply = do_read(fsg);
2428  break;
2429 
2430  case READ_10:
2431  fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2432  if ((reply = check_command_size_in_blocks(fsg, 10,
2434  (1<<1) | (0xf<<2) | (3<<7), 1,
2435  "READ(10)")) == 0)
2436  reply = do_read(fsg);
2437  break;
2438 
2439  case READ_12:
2440  fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
2441  if ((reply = check_command_size_in_blocks(fsg, 12,
2443  (1<<1) | (0xf<<2) | (0xf<<6), 1,
2444  "READ(12)")) == 0)
2445  reply = do_read(fsg);
2446  break;
2447 
2448  case READ_CAPACITY:
2449  fsg->data_size_from_cmnd = 8;
2450  if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2451  (0xf<<2) | (1<<8), 1,
2452  "READ CAPACITY")) == 0)
2453  reply = do_read_capacity(fsg, bh);
2454  break;
2455 
2456  case READ_HEADER:
2457  if (!mod_data.cdrom)
2458  goto unknown_cmnd;
2459  fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2460  if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2461  (3<<7) | (0x1f<<1), 1,
2462  "READ HEADER")) == 0)
2463  reply = do_read_header(fsg, bh);
2464  break;
2465 
2466  case READ_TOC:
2467  if (!mod_data.cdrom)
2468  goto unknown_cmnd;
2469  fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2470  if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2471  (7<<6) | (1<<1), 1,
2472  "READ TOC")) == 0)
2473  reply = do_read_toc(fsg, bh);
2474  break;
2475 
2477  fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2478  if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
2479  (3<<7), 1,
2480  "READ FORMAT CAPACITIES")) == 0)
2481  reply = do_read_format_capacities(fsg, bh);
2482  break;
2483 
2484  case REQUEST_SENSE:
2485  fsg->data_size_from_cmnd = fsg->cmnd[4];
2486  if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
2487  (1<<4), 0,
2488  "REQUEST SENSE")) == 0)
2489  reply = do_request_sense(fsg, bh);
2490  break;
2491 
2492  case START_STOP:
2493  fsg->data_size_from_cmnd = 0;
2494  if ((reply = check_command(fsg, 6, DATA_DIR_NONE,
2495  (1<<1) | (1<<4), 0,
2496  "START-STOP UNIT")) == 0)
2497  reply = do_start_stop(fsg);
2498  break;
2499 
2500  case SYNCHRONIZE_CACHE:
2501  fsg->data_size_from_cmnd = 0;
2502  if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2503  (0xf<<2) | (3<<7), 1,
2504  "SYNCHRONIZE CACHE")) == 0)
2505  reply = do_synchronize_cache(fsg);
2506  break;
2507 
2508  case TEST_UNIT_READY:
2509  fsg->data_size_from_cmnd = 0;
2510  reply = check_command(fsg, 6, DATA_DIR_NONE,
2511  0, 1,
2512  "TEST UNIT READY");
2513  break;
2514 
2515  /* Although optional, this command is used by MS-Windows. We
2516  * support a minimal version: BytChk must be 0. */
2517  case VERIFY:
2518  fsg->data_size_from_cmnd = 0;
2519  if ((reply = check_command(fsg, 10, DATA_DIR_NONE,
2520  (1<<1) | (0xf<<2) | (3<<7), 1,
2521  "VERIFY")) == 0)
2522  reply = do_verify(fsg);
2523  break;
2524 
2525  case WRITE_6:
2526  i = fsg->cmnd[4];
2527  fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
2528  if ((reply = check_command_size_in_blocks(fsg, 6,
2530  (7<<1) | (1<<4), 1,
2531  "WRITE(6)")) == 0)
2532  reply = do_write(fsg);
2533  break;
2534 
2535  case WRITE_10:
2536  fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
2537  if ((reply = check_command_size_in_blocks(fsg, 10,
2539  (1<<1) | (0xf<<2) | (3<<7), 1,
2540  "WRITE(10)")) == 0)
2541  reply = do_write(fsg);
2542  break;
2543 
2544  case WRITE_12:
2545  fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
2546  if ((reply = check_command_size_in_blocks(fsg, 12,
2548  (1<<1) | (0xf<<2) | (0xf<<6), 1,
2549  "WRITE(12)")) == 0)
2550  reply = do_write(fsg);
2551  break;
2552 
2553  /* Some mandatory commands that we recognize but don't implement.
2554  * They don't mean much in this setting. It's left as an exercise
2555  * for anyone interested to implement RESERVE and RELEASE in terms
2556  * of Posix locks. */
2557  case FORMAT_UNIT:
2558  case RELEASE:
2559  case RESERVE:
2560  case SEND_DIAGNOSTIC:
2561  // Fall through
2562 
2563  default:
2564  unknown_cmnd:
2565  fsg->data_size_from_cmnd = 0;
2566  sprintf(unknown, "Unknown x%02x", fsg->cmnd[0]);
2567  if ((reply = check_command(fsg, fsg->cmnd_size,
2568  DATA_DIR_UNKNOWN, ~0, 0, unknown)) == 0) {
2569  fsg->curlun->sense_data = SS_INVALID_COMMAND;
2570  reply = -EINVAL;
2571  }
2572  break;
2573  }
2574  up_read(&fsg->filesem);
2575 
2576  if (reply == -EINTR || signal_pending(current))
2577  return -EINTR;
2578 
2579  /* Set up the single reply buffer for finish_reply() */
2580  if (reply == -EINVAL)
2581  reply = 0; // Error reply length
2582  if (reply >= 0 && fsg->data_dir == DATA_DIR_TO_HOST) {
2583  reply = min((u32) reply, fsg->data_size_from_cmnd);
2584  bh->inreq->length = reply;
2585  bh->state = BUF_STATE_FULL;
2586  fsg->residue -= reply;
2587  } // Otherwise it's already set
2588 
2589  return 0;
2590 }
2591 
2592 
2593 /*-------------------------------------------------------------------------*/
2594 
2595 static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2596 {
2597  struct usb_request *req = bh->outreq;
2598  struct bulk_cb_wrap *cbw = req->buf;
2599 
2600  /* Was this a real packet? Should it be ignored? */
2601  if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2602  return -EINVAL;
2603 
2604  /* Is the CBW valid? */
2605  if (req->actual != US_BULK_CB_WRAP_LEN ||
2606  cbw->Signature != cpu_to_le32(
2607  US_BULK_CB_SIGN)) {
2608  DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2609  req->actual,
2610  le32_to_cpu(cbw->Signature));
2611 
2612  /* The Bulk-only spec says we MUST stall the IN endpoint
2613  * (6.6.1), so it's unavoidable. It also says we must
2614  * retain this state until the next reset, but there's
2615  * no way to tell the controller driver it should ignore
2616  * Clear-Feature(HALT) requests.
2617  *
2618  * We aren't required to halt the OUT endpoint; instead
2619  * we can simply accept and discard any data received
2620  * until the next reset. */
2621  wedge_bulk_in_endpoint(fsg);
2623  return -EINVAL;
2624  }
2625 
2626  /* Is the CBW meaningful? */
2627  if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~US_BULK_FLAG_IN ||
2628  cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2629  DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2630  "cmdlen %u\n",
2631  cbw->Lun, cbw->Flags, cbw->Length);
2632 
2633  /* We can do anything we want here, so let's stall the
2634  * bulk pipes if we are allowed to. */
2635  if (mod_data.can_stall) {
2636  fsg_set_halt(fsg, fsg->bulk_out);
2637  halt_bulk_in_endpoint(fsg);
2638  }
2639  return -EINVAL;
2640  }
2641 
2642  /* Save the command for later */
2643  fsg->cmnd_size = cbw->Length;
2644  memcpy(fsg->cmnd, cbw->CDB, fsg->cmnd_size);
2645  if (cbw->Flags & US_BULK_FLAG_IN)
2646  fsg->data_dir = DATA_DIR_TO_HOST;
2647  else
2650  if (fsg->data_size == 0)
2651  fsg->data_dir = DATA_DIR_NONE;
2652  fsg->lun = cbw->Lun;
2653  fsg->tag = cbw->Tag;
2654  return 0;
2655 }
2656 
2657 
2658 static int get_next_command(struct fsg_dev *fsg)
2659 {
2660  struct fsg_buffhd *bh;
2661  int rc = 0;
2662 
2663  if (transport_is_bbb()) {
2664 
2665  /* Wait for the next buffer to become available */
2666  bh = fsg->next_buffhd_to_fill;
2667  while (bh->state != BUF_STATE_EMPTY) {
2668  rc = sleep_thread(fsg);
2669  if (rc)
2670  return rc;
2671  }
2672 
2673  /* Queue a request to read a Bulk-only CBW */
2674  set_bulk_out_req_length(fsg, bh, US_BULK_CB_WRAP_LEN);
2675  start_transfer(fsg, fsg->bulk_out, bh->outreq,
2676  &bh->outreq_busy, &bh->state);
2677 
2678  /* We will drain the buffer in software, which means we
2679  * can reuse it for the next filling. No need to advance
2680  * next_buffhd_to_fill. */
2681 
2682  /* Wait for the CBW to arrive */
2683  while (bh->state != BUF_STATE_FULL) {
2684  rc = sleep_thread(fsg);
2685  if (rc)
2686  return rc;
2687  }
2688  smp_rmb();
2689  rc = received_cbw(fsg, bh);
2690  bh->state = BUF_STATE_EMPTY;
2691 
2692  } else { // USB_PR_CB or USB_PR_CBI
2693 
2694  /* Wait for the next command to arrive */
2695  while (fsg->cbbuf_cmnd_size == 0) {
2696  rc = sleep_thread(fsg);
2697  if (rc)
2698  return rc;
2699  }
2700 
2701  /* Is the previous status interrupt request still busy?
2702  * The host is allowed to skip reading the status,
2703  * so we must cancel it. */
2704  if (fsg->intreq_busy)
2705  usb_ep_dequeue(fsg->intr_in, fsg->intreq);
2706 
2707  /* Copy the command and mark the buffer empty */
2708  fsg->data_dir = DATA_DIR_UNKNOWN;
2709  spin_lock_irq(&fsg->lock);
2710  fsg->cmnd_size = fsg->cbbuf_cmnd_size;
2711  memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
2712  fsg->cbbuf_cmnd_size = 0;
2713  spin_unlock_irq(&fsg->lock);
2714 
2715  /* Use LUN from the command */
2716  fsg->lun = fsg->cmnd[1] >> 5;
2717  }
2718 
2719  /* Update current lun */
2720  if (fsg->lun >= 0 && fsg->lun < fsg->nluns)
2721  fsg->curlun = &fsg->luns[fsg->lun];
2722  else
2723  fsg->curlun = NULL;
2724 
2725  return rc;
2726 }
2727 
2728 
2729 /*-------------------------------------------------------------------------*/
2730 
2731 static int enable_endpoint(struct fsg_dev *fsg, struct usb_ep *ep,
2732  const struct usb_endpoint_descriptor *d)
2733 {
2734  int rc;
2735 
2736  ep->driver_data = fsg;
2737  ep->desc = d;
2738  rc = usb_ep_enable(ep);
2739  if (rc)
2740  ERROR(fsg, "can't enable %s, result %d\n", ep->name, rc);
2741  return rc;
2742 }
2743 
2744 static int alloc_request(struct fsg_dev *fsg, struct usb_ep *ep,
2745  struct usb_request **preq)
2746 {
2747  *preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2748  if (*preq)
2749  return 0;
2750  ERROR(fsg, "can't allocate request for %s\n", ep->name);
2751  return -ENOMEM;
2752 }
2753 
2754 /*
2755  * Reset interface setting and re-init endpoint state (toggle etc).
2756  * Call with altsetting < 0 to disable the interface. The only other
2757  * available altsetting is 0, which enables the interface.
2758  */
2759 static int do_set_interface(struct fsg_dev *fsg, int altsetting)
2760 {
2761  int rc = 0;
2762  int i;
2763  const struct usb_endpoint_descriptor *d;
2764 
2765  if (fsg->running)
2766  DBG(fsg, "reset interface\n");
2767 
2768 reset:
2769  /* Deallocate the requests */
2770  for (i = 0; i < fsg_num_buffers; ++i) {
2771  struct fsg_buffhd *bh = &fsg->buffhds[i];
2772 
2773  if (bh->inreq) {
2774  usb_ep_free_request(fsg->bulk_in, bh->inreq);
2775  bh->inreq = NULL;
2776  }
2777  if (bh->outreq) {
2778  usb_ep_free_request(fsg->bulk_out, bh->outreq);
2779  bh->outreq = NULL;
2780  }
2781  }
2782  if (fsg->intreq) {
2783  usb_ep_free_request(fsg->intr_in, fsg->intreq);
2784  fsg->intreq = NULL;
2785  }
2786 
2787  /* Disable the endpoints */
2788  if (fsg->bulk_in_enabled) {
2789  usb_ep_disable(fsg->bulk_in);
2790  fsg->bulk_in_enabled = 0;
2791  }
2792  if (fsg->bulk_out_enabled) {
2793  usb_ep_disable(fsg->bulk_out);
2794  fsg->bulk_out_enabled = 0;
2795  }
2796  if (fsg->intr_in_enabled) {
2797  usb_ep_disable(fsg->intr_in);
2798  fsg->intr_in_enabled = 0;
2799  }
2800 
2801  fsg->running = 0;
2802  if (altsetting < 0 || rc != 0)
2803  return rc;
2804 
2805  DBG(fsg, "set interface %d\n", altsetting);
2806 
2807  /* Enable the endpoints */
2808  d = fsg_ep_desc(fsg->gadget,
2809  &fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc,
2810  &fsg_ss_bulk_in_desc);
2811  if ((rc = enable_endpoint(fsg, fsg->bulk_in, d)) != 0)
2812  goto reset;
2813  fsg->bulk_in_enabled = 1;
2814 
2815  d = fsg_ep_desc(fsg->gadget,
2816  &fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc,
2817  &fsg_ss_bulk_out_desc);
2818  if ((rc = enable_endpoint(fsg, fsg->bulk_out, d)) != 0)
2819  goto reset;
2820  fsg->bulk_out_enabled = 1;
2821  fsg->bulk_out_maxpacket = usb_endpoint_maxp(d);
2823 
2824  if (transport_is_cbi()) {
2825  d = fsg_ep_desc(fsg->gadget,
2826  &fsg_fs_intr_in_desc, &fsg_hs_intr_in_desc,
2827  &fsg_ss_intr_in_desc);
2828  if ((rc = enable_endpoint(fsg, fsg->intr_in, d)) != 0)
2829  goto reset;
2830  fsg->intr_in_enabled = 1;
2831  }
2832 
2833  /* Allocate the requests */
2834  for (i = 0; i < fsg_num_buffers; ++i) {
2835  struct fsg_buffhd *bh = &fsg->buffhds[i];
2836 
2837  if ((rc = alloc_request(fsg, fsg->bulk_in, &bh->inreq)) != 0)
2838  goto reset;
2839  if ((rc = alloc_request(fsg, fsg->bulk_out, &bh->outreq)) != 0)
2840  goto reset;
2841  bh->inreq->buf = bh->outreq->buf = bh->buf;
2842  bh->inreq->context = bh->outreq->context = bh;
2843  bh->inreq->complete = bulk_in_complete;
2844  bh->outreq->complete = bulk_out_complete;
2845  }
2846  if (transport_is_cbi()) {
2847  if ((rc = alloc_request(fsg, fsg->intr_in, &fsg->intreq)) != 0)
2848  goto reset;
2849  fsg->intreq->complete = intr_in_complete;
2850  }
2851 
2852  fsg->running = 1;
2853  for (i = 0; i < fsg->nluns; ++i)
2854  fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
2855  return rc;
2856 }
2857 
2858 
2859 /*
2860  * Change our operational configuration. This code must agree with the code
2861  * that returns config descriptors, and with interface altsetting code.
2862  *
2863  * It's also responsible for power management interactions. Some
2864  * configurations might not work with our current power sources.
2865  * For now we just assume the gadget is always self-powered.
2866  */
2867 static int do_set_config(struct fsg_dev *fsg, u8 new_config)
2868 {
2869  int rc = 0;
2870 
2871  /* Disable the single interface */
2872  if (fsg->config != 0) {
2873  DBG(fsg, "reset config\n");
2874  fsg->config = 0;
2875  rc = do_set_interface(fsg, -1);
2876  }
2877 
2878  /* Enable the interface */
2879  if (new_config != 0) {
2880  fsg->config = new_config;
2881  if ((rc = do_set_interface(fsg, 0)) != 0)
2882  fsg->config = 0; // Reset on errors
2883  else
2884  INFO(fsg, "%s config #%d\n",
2885  usb_speed_string(fsg->gadget->speed),
2886  fsg->config);
2887  }
2888  return rc;
2889 }
2890 
2891 
2892 /*-------------------------------------------------------------------------*/
2893 
2894 static void handle_exception(struct fsg_dev *fsg)
2895 {
2896  siginfo_t info;
2897  int sig;
2898  int i;
2899  int num_active;
2900  struct fsg_buffhd *bh;
2901  enum fsg_state old_state;
2902  u8 new_config;
2903  struct fsg_lun *curlun;
2904  unsigned int exception_req_tag;
2905  int rc;
2906 
2907  /* Clear the existing signals. Anything but SIGUSR1 is converted
2908  * into a high-priority EXIT exception. */
2909  for (;;) {
2910  sig = dequeue_signal_lock(current, &current->blocked, &info);
2911  if (!sig)
2912  break;
2913  if (sig != SIGUSR1) {
2914  if (fsg->state < FSG_STATE_EXIT)
2915  DBG(fsg, "Main thread exiting on signal\n");
2916  raise_exception(fsg, FSG_STATE_EXIT);
2917  }
2918  }
2919 
2920  /* Cancel all the pending transfers */
2921  if (fsg->intreq_busy)
2922  usb_ep_dequeue(fsg->intr_in, fsg->intreq);
2923  for (i = 0; i < fsg_num_buffers; ++i) {
2924  bh = &fsg->buffhds[i];
2925  if (bh->inreq_busy)
2926  usb_ep_dequeue(fsg->bulk_in, bh->inreq);
2927  if (bh->outreq_busy)
2928  usb_ep_dequeue(fsg->bulk_out, bh->outreq);
2929  }
2930 
2931  /* Wait until everything is idle */
2932  for (;;) {
2933  num_active = fsg->intreq_busy;
2934  for (i = 0; i < fsg_num_buffers; ++i) {
2935  bh = &fsg->buffhds[i];
2936  num_active += bh->inreq_busy + bh->outreq_busy;
2937  }
2938  if (num_active == 0)
2939  break;
2940  if (sleep_thread(fsg))
2941  return;
2942  }
2943 
2944  /* Clear out the controller's fifos */
2945  if (fsg->bulk_in_enabled)
2946  usb_ep_fifo_flush(fsg->bulk_in);
2947  if (fsg->bulk_out_enabled)
2948  usb_ep_fifo_flush(fsg->bulk_out);
2949  if (fsg->intr_in_enabled)
2950  usb_ep_fifo_flush(fsg->intr_in);
2951 
2952  /* Reset the I/O buffer states and pointers, the SCSI
2953  * state, and the exception. Then invoke the handler. */
2954  spin_lock_irq(&fsg->lock);
2955 
2956  for (i = 0; i < fsg_num_buffers; ++i) {
2957  bh = &fsg->buffhds[i];
2958  bh->state = BUF_STATE_EMPTY;
2959  }
2961  &fsg->buffhds[0];
2962 
2963  exception_req_tag = fsg->exception_req_tag;
2964  new_config = fsg->new_config;
2965  old_state = fsg->state;
2966 
2967  if (old_state == FSG_STATE_ABORT_BULK_OUT)
2969  else {
2970  for (i = 0; i < fsg->nluns; ++i) {
2971  curlun = &fsg->luns[i];
2972  curlun->prevent_medium_removal = 0;
2973  curlun->sense_data = curlun->unit_attention_data =
2974  SS_NO_SENSE;
2975  curlun->sense_data_info = 0;
2976  curlun->info_valid = 0;
2977  }
2978  fsg->state = FSG_STATE_IDLE;
2979  }
2980  spin_unlock_irq(&fsg->lock);
2981 
2982  /* Carry out any extra actions required for the exception */
2983  switch (old_state) {
2984  default:
2985  break;
2986 
2988  send_status(fsg);
2989  spin_lock_irq(&fsg->lock);
2990  if (fsg->state == FSG_STATE_STATUS_PHASE)
2991  fsg->state = FSG_STATE_IDLE;
2992  spin_unlock_irq(&fsg->lock);
2993  break;
2994 
2995  case FSG_STATE_RESET:
2996  /* In case we were forced against our will to halt a
2997  * bulk endpoint, clear the halt now. (The SuperH UDC
2998  * requires this.) */
3000  usb_ep_clear_halt(fsg->bulk_in);
3001 
3002  if (transport_is_bbb()) {
3003  if (fsg->ep0_req_tag == exception_req_tag)
3004  ep0_queue(fsg); // Complete the status stage
3005 
3006  } else if (transport_is_cbi())
3007  send_status(fsg); // Status by interrupt pipe
3008 
3009  /* Technically this should go here, but it would only be
3010  * a waste of time. Ditto for the INTERFACE_CHANGE and
3011  * CONFIG_CHANGE cases. */
3012  // for (i = 0; i < fsg->nluns; ++i)
3013  // fsg->luns[i].unit_attention_data = SS_RESET_OCCURRED;
3014  break;
3015 
3017  rc = do_set_interface(fsg, 0);
3018  if (fsg->ep0_req_tag != exception_req_tag)
3019  break;
3020  if (rc != 0) // STALL on errors
3021  fsg_set_halt(fsg, fsg->ep0);
3022  else // Complete the status stage
3023  ep0_queue(fsg);
3024  break;
3025 
3027  rc = do_set_config(fsg, new_config);
3028  if (fsg->ep0_req_tag != exception_req_tag)
3029  break;
3030  if (rc != 0) // STALL on errors
3031  fsg_set_halt(fsg, fsg->ep0);
3032  else // Complete the status stage
3033  ep0_queue(fsg);
3034  break;
3035 
3036  case FSG_STATE_DISCONNECT:
3037  for (i = 0; i < fsg->nluns; ++i)
3038  fsg_lun_fsync_sub(fsg->luns + i);
3039  do_set_config(fsg, 0); // Unconfigured state
3040  break;
3041 
3042  case FSG_STATE_EXIT:
3043  case FSG_STATE_TERMINATED:
3044  do_set_config(fsg, 0); // Free resources
3045  spin_lock_irq(&fsg->lock);
3046  fsg->state = FSG_STATE_TERMINATED; // Stop the thread
3047  spin_unlock_irq(&fsg->lock);
3048  break;
3049  }
3050 }
3051 
3052 
3053 /*-------------------------------------------------------------------------*/
3054 
3055 static int fsg_main_thread(void *fsg_)
3056 {
3057  struct fsg_dev *fsg = fsg_;
3058 
3059  /* Allow the thread to be killed by a signal, but set the signal mask
3060  * to block everything but INT, TERM, KILL, and USR1. */
3065 
3066  /* Allow the thread to be frozen */
3067  set_freezable();
3068 
3069  /* Arrange for userspace references to be interpreted as kernel
3070  * pointers. That way we can pass a kernel pointer to a routine
3071  * that expects a __user pointer and it will work okay. */
3072  set_fs(get_ds());
3073 
3074  /* The main loop */
3075  while (fsg->state != FSG_STATE_TERMINATED) {
3076  if (exception_in_progress(fsg) || signal_pending(current)) {
3077  handle_exception(fsg);
3078  continue;
3079  }
3080 
3081  if (!fsg->running) {
3082  sleep_thread(fsg);
3083  continue;
3084  }
3085 
3086  if (get_next_command(fsg))
3087  continue;
3088 
3089  spin_lock_irq(&fsg->lock);
3090  if (!exception_in_progress(fsg))
3091  fsg->state = FSG_STATE_DATA_PHASE;
3092  spin_unlock_irq(&fsg->lock);
3093 
3094  if (do_scsi_command(fsg) || finish_reply(fsg))
3095  continue;
3096 
3097  spin_lock_irq(&fsg->lock);
3098  if (!exception_in_progress(fsg))
3100  spin_unlock_irq(&fsg->lock);
3101 
3102  if (send_status(fsg))
3103  continue;
3104 
3105  spin_lock_irq(&fsg->lock);
3106  if (!exception_in_progress(fsg))
3107  fsg->state = FSG_STATE_IDLE;
3108  spin_unlock_irq(&fsg->lock);
3109  }
3110 
3111  spin_lock_irq(&fsg->lock);
3112  fsg->thread_task = NULL;
3113  spin_unlock_irq(&fsg->lock);
3114 
3115  /* If we are exiting because of a signal, unregister the
3116  * gadget driver. */
3118  usb_gadget_unregister_driver(&fsg_driver);
3119 
3120  /* Let the unbind and cleanup routines know the thread has exited */
3122 }
3123 
3124 
3125 /*-------------------------------------------------------------------------*/
3126 
3127 
3128 /* The write permissions and store_xxx pointers are set in fsg_bind() */
3129 static DEVICE_ATTR(ro, 0444, fsg_show_ro, NULL);
3130 static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, NULL);
3131 static DEVICE_ATTR(file, 0444, fsg_show_file, NULL);
3132 
3133 
3134 /*-------------------------------------------------------------------------*/
3135 
3136 static void fsg_release(struct kref *ref)
3137 {
3138  struct fsg_dev *fsg = container_of(ref, struct fsg_dev, ref);
3139 
3140  kfree(fsg->luns);
3141  kfree(fsg);
3142 }
3143 
3144 static void lun_release(struct device *dev)
3145 {
3146  struct rw_semaphore *filesem = dev_get_drvdata(dev);
3147  struct fsg_dev *fsg =
3148  container_of(filesem, struct fsg_dev, filesem);
3149 
3150  kref_put(&fsg->ref, fsg_release);
3151 }
3152 
3153 static void /* __init_or_exit */ fsg_unbind(struct usb_gadget *gadget)
3154 {
3155  struct fsg_dev *fsg = get_gadget_data(gadget);
3156  int i;
3157  struct fsg_lun *curlun;
3158  struct usb_request *req = fsg->ep0req;
3159 
3160  DBG(fsg, "unbind\n");
3162 
3163  /* If the thread isn't already dead, tell it to exit now */
3164  if (fsg->state != FSG_STATE_TERMINATED) {
3165  raise_exception(fsg, FSG_STATE_EXIT);
3167 
3168  /* The cleanup routine waits for this completion also */
3169  complete(&fsg->thread_notifier);
3170  }
3171 
3172  /* Unregister the sysfs attribute files and the LUNs */
3173  for (i = 0; i < fsg->nluns; ++i) {
3174  curlun = &fsg->luns[i];
3175  if (curlun->registered) {
3176  device_remove_file(&curlun->dev, &dev_attr_nofua);
3177  device_remove_file(&curlun->dev, &dev_attr_ro);
3178  device_remove_file(&curlun->dev, &dev_attr_file);
3179  fsg_lun_close(curlun);
3180  device_unregister(&curlun->dev);
3181  curlun->registered = 0;
3182  }
3183  }
3184 
3185  /* Free the data buffers */
3186  for (i = 0; i < fsg_num_buffers; ++i)
3187  kfree(fsg->buffhds[i].buf);
3188 
3189  /* Free the request and buffer for endpoint 0 */
3190  if (req) {
3191  kfree(req->buf);
3192  usb_ep_free_request(fsg->ep0, req);
3193  }
3194 
3195  set_gadget_data(gadget, NULL);
3196 }
3197 
3198 
3199 static int __init check_parameters(struct fsg_dev *fsg)
3200 {
3201  int prot;
3202 
3203  /* Store the default values */
3204  mod_data.transport_type = USB_PR_BULK;
3205  mod_data.transport_name = "Bulk-only";
3206  mod_data.protocol_type = USB_SC_SCSI;
3207  mod_data.protocol_name = "Transparent SCSI";
3208 
3209  /* Some peripheral controllers are known not to be able to
3210  * halt bulk endpoints correctly. If one of them is present,
3211  * disable stalls.
3212  */
3213  if (gadget_is_at91(fsg->gadget))
3214  mod_data.can_stall = 0;
3215 
3216  if (mod_data.release == 0xffff)
3217  mod_data.release = get_default_bcdDevice();
3218 
3219  prot = simple_strtol(mod_data.protocol_parm, NULL, 0);
3220 
3221 #ifdef CONFIG_USB_FILE_STORAGE_TEST
3222  if (strnicmp(mod_data.transport_parm, "BBB", 10) == 0) {
3223  ; // Use default setting
3224  } else if (strnicmp(mod_data.transport_parm, "CB", 10) == 0) {
3225  mod_data.transport_type = USB_PR_CB;
3226  mod_data.transport_name = "Control-Bulk";
3227  } else if (strnicmp(mod_data.transport_parm, "CBI", 10) == 0) {
3228  mod_data.transport_type = USB_PR_CBI;
3229  mod_data.transport_name = "Control-Bulk-Interrupt";
3230  } else {
3231  ERROR(fsg, "invalid transport: %s\n", mod_data.transport_parm);
3232  return -EINVAL;
3233  }
3234 
3235  if (strnicmp(mod_data.protocol_parm, "SCSI", 10) == 0 ||
3236  prot == USB_SC_SCSI) {
3237  ; // Use default setting
3238  } else if (strnicmp(mod_data.protocol_parm, "RBC", 10) == 0 ||
3239  prot == USB_SC_RBC) {
3240  mod_data.protocol_type = USB_SC_RBC;
3241  mod_data.protocol_name = "RBC";
3242  } else if (strnicmp(mod_data.protocol_parm, "8020", 4) == 0 ||
3243  strnicmp(mod_data.protocol_parm, "ATAPI", 10) == 0 ||
3244  prot == USB_SC_8020) {
3245  mod_data.protocol_type = USB_SC_8020;
3246  mod_data.protocol_name = "8020i (ATAPI)";
3247  } else if (strnicmp(mod_data.protocol_parm, "QIC", 3) == 0 ||
3248  prot == USB_SC_QIC) {
3249  mod_data.protocol_type = USB_SC_QIC;
3250  mod_data.protocol_name = "QIC-157";
3251  } else if (strnicmp(mod_data.protocol_parm, "UFI", 10) == 0 ||
3252  prot == USB_SC_UFI) {
3253  mod_data.protocol_type = USB_SC_UFI;
3254  mod_data.protocol_name = "UFI";
3255  } else if (strnicmp(mod_data.protocol_parm, "8070", 4) == 0 ||
3256  prot == USB_SC_8070) {
3257  mod_data.protocol_type = USB_SC_8070;
3258  mod_data.protocol_name = "8070i";
3259  } else {
3260  ERROR(fsg, "invalid protocol: %s\n", mod_data.protocol_parm);
3261  return -EINVAL;
3262  }
3263 
3264  mod_data.buflen &= PAGE_CACHE_MASK;
3265  if (mod_data.buflen <= 0) {
3266  ERROR(fsg, "invalid buflen\n");
3267  return -ETOOSMALL;
3268  }
3269 
3270 #endif /* CONFIG_USB_FILE_STORAGE_TEST */
3271 
3272  /* Serial string handling.
3273  * On a real device, the serial string would be loaded
3274  * from permanent storage. */
3275  if (mod_data.serial) {
3276  const char *ch;
3277  unsigned len = 0;
3278 
3279  /* Sanity check :
3280  * The CB[I] specification limits the serial string to
3281  * 12 uppercase hexadecimal characters.
3282  * BBB need at least 12 uppercase hexadecimal characters,
3283  * with a maximum of 126. */
3284  for (ch = mod_data.serial; *ch; ++ch) {
3285  ++len;
3286  if ((*ch < '0' || *ch > '9') &&
3287  (*ch < 'A' || *ch > 'F')) { /* not uppercase hex */
3288  WARNING(fsg,
3289  "Invalid serial string character: %c\n",
3290  *ch);
3291  goto no_serial;
3292  }
3293  }
3294  if (len > 126 ||
3295  (mod_data.transport_type == USB_PR_BULK && len < 12) ||
3296  (mod_data.transport_type != USB_PR_BULK && len > 12)) {
3297  WARNING(fsg, "Invalid serial string length!\n");
3298  goto no_serial;
3299  }
3300  fsg_strings[FSG_STRING_SERIAL - 1].s = mod_data.serial;
3301  } else {
3302  WARNING(fsg, "No serial-number string provided!\n");
3303  no_serial:
3304  device_desc.iSerialNumber = 0;
3305  }
3306 
3307  return 0;
3308 }
3309 
3310 
3311 static int __init fsg_bind(struct usb_gadget *gadget,
3312  struct usb_gadget_driver *driver)
3313 {
3314  struct fsg_dev *fsg = the_fsg;
3315  int rc;
3316  int i;
3317  struct fsg_lun *curlun;
3318  struct usb_ep *ep;
3319  struct usb_request *req;
3320  char *pathbuf, *p;
3321 
3322  fsg->gadget = gadget;
3323  set_gadget_data(gadget, fsg);
3324  fsg->ep0 = gadget->ep0;
3325  fsg->ep0->driver_data = fsg;
3326 
3327  if ((rc = check_parameters(fsg)) != 0)
3328  goto out;
3329 
3330  if (mod_data.removable) { // Enable the store_xxx attributes
3331  dev_attr_file.attr.mode = 0644;
3332  dev_attr_file.store = fsg_store_file;
3333  if (!mod_data.cdrom) {
3334  dev_attr_ro.attr.mode = 0644;
3335  dev_attr_ro.store = fsg_store_ro;
3336  }
3337  }
3338 
3339  /* Only for removable media? */
3340  dev_attr_nofua.attr.mode = 0644;
3341  dev_attr_nofua.store = fsg_store_nofua;
3342 
3343  /* Find out how many LUNs there should be */
3344  i = mod_data.nluns;
3345  if (i == 0)
3346  i = max(mod_data.num_filenames, 1u);
3347  if (i > FSG_MAX_LUNS) {
3348  ERROR(fsg, "invalid number of LUNs: %d\n", i);
3349  rc = -EINVAL;
3350  goto out;
3351  }
3352 
3353  /* Create the LUNs, open their backing files, and register the
3354  * LUN devices in sysfs. */
3355  fsg->luns = kzalloc(i * sizeof(struct fsg_lun), GFP_KERNEL);
3356  if (!fsg->luns) {
3357  rc = -ENOMEM;
3358  goto out;
3359  }
3360  fsg->nluns = i;
3361 
3362  for (i = 0; i < fsg->nluns; ++i) {
3363  curlun = &fsg->luns[i];
3364  curlun->cdrom = !!mod_data.cdrom;
3365  curlun->ro = mod_data.cdrom || mod_data.ro[i];
3366  curlun->initially_ro = curlun->ro;
3367  curlun->removable = mod_data.removable;
3368  curlun->nofua = mod_data.nofua[i];
3369  curlun->dev.release = lun_release;
3370  curlun->dev.parent = &gadget->dev;
3371  curlun->dev.driver = &fsg_driver.driver;
3372  dev_set_drvdata(&curlun->dev, &fsg->filesem);
3373  dev_set_name(&curlun->dev,"%s-lun%d",
3374  dev_name(&gadget->dev), i);
3375 
3376  kref_get(&fsg->ref);
3377  rc = device_register(&curlun->dev);
3378  if (rc) {
3379  INFO(fsg, "failed to register LUN%d: %d\n", i, rc);
3380  put_device(&curlun->dev);
3381  goto out;
3382  }
3383  curlun->registered = 1;
3384 
3385  rc = device_create_file(&curlun->dev, &dev_attr_ro);
3386  if (rc)
3387  goto out;
3388  rc = device_create_file(&curlun->dev, &dev_attr_nofua);
3389  if (rc)
3390  goto out;
3391  rc = device_create_file(&curlun->dev, &dev_attr_file);
3392  if (rc)
3393  goto out;
3394 
3395  if (mod_data.file[i] && *mod_data.file[i]) {
3396  rc = fsg_lun_open(curlun, mod_data.file[i]);
3397  if (rc)
3398  goto out;
3399  } else if (!mod_data.removable) {
3400  ERROR(fsg, "no file given for LUN%d\n", i);
3401  rc = -EINVAL;
3402  goto out;
3403  }
3404  }
3405 
3406  /* Find all the endpoints we will use */
3407  usb_ep_autoconfig_reset(gadget);
3408  ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
3409  if (!ep)
3410  goto autoconf_fail;
3411  ep->driver_data = fsg; // claim the endpoint
3412  fsg->bulk_in = ep;
3413 
3414  ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
3415  if (!ep)
3416  goto autoconf_fail;
3417  ep->driver_data = fsg; // claim the endpoint
3418  fsg->bulk_out = ep;
3419 
3420  if (transport_is_cbi()) {
3421  ep = usb_ep_autoconfig(gadget, &fsg_fs_intr_in_desc);
3422  if (!ep)
3423  goto autoconf_fail;
3424  ep->driver_data = fsg; // claim the endpoint
3425  fsg->intr_in = ep;
3426  }
3427 
3428  /* Fix up the descriptors */
3429  device_desc.idVendor = cpu_to_le16(mod_data.vendor);
3430  device_desc.idProduct = cpu_to_le16(mod_data.product);
3431  device_desc.bcdDevice = cpu_to_le16(mod_data.release);
3432 
3433  i = (transport_is_cbi() ? 3 : 2); // Number of endpoints
3434  fsg_intf_desc.bNumEndpoints = i;
3435  fsg_intf_desc.bInterfaceSubClass = mod_data.protocol_type;
3436  fsg_intf_desc.bInterfaceProtocol = mod_data.transport_type;
3437  fsg_fs_function[i + FSG_FS_FUNCTION_PRE_EP_ENTRIES] = NULL;
3438 
3439  if (gadget_is_dualspeed(gadget)) {
3440  fsg_hs_function[i + FSG_HS_FUNCTION_PRE_EP_ENTRIES] = NULL;
3441 
3442  /* Assume endpoint addresses are the same for both speeds */
3443  fsg_hs_bulk_in_desc.bEndpointAddress =
3444  fsg_fs_bulk_in_desc.bEndpointAddress;
3445  fsg_hs_bulk_out_desc.bEndpointAddress =
3446  fsg_fs_bulk_out_desc.bEndpointAddress;
3447  fsg_hs_intr_in_desc.bEndpointAddress =
3448  fsg_fs_intr_in_desc.bEndpointAddress;
3449  }
3450 
3451  if (gadget_is_superspeed(gadget)) {
3452  unsigned max_burst;
3453 
3454  fsg_ss_function[i + FSG_SS_FUNCTION_PRE_EP_ENTRIES] = NULL;
3455 
3456  /* Calculate bMaxBurst, we know packet size is 1024 */
3457  max_burst = min_t(unsigned, mod_data.buflen / 1024, 15);
3458 
3459  /* Assume endpoint addresses are the same for both speeds */
3460  fsg_ss_bulk_in_desc.bEndpointAddress =
3461  fsg_fs_bulk_in_desc.bEndpointAddress;
3462  fsg_ss_bulk_in_comp_desc.bMaxBurst = max_burst;
3463 
3464  fsg_ss_bulk_out_desc.bEndpointAddress =
3465  fsg_fs_bulk_out_desc.bEndpointAddress;
3466  fsg_ss_bulk_out_comp_desc.bMaxBurst = max_burst;
3467  }
3468 
3469  if (gadget_is_otg(gadget))
3470  fsg_otg_desc.bmAttributes |= USB_OTG_HNP;
3471 
3472  rc = -ENOMEM;
3473 
3474  /* Allocate the request and buffer for endpoint 0 */
3475  fsg->ep0req = req = usb_ep_alloc_request(fsg->ep0, GFP_KERNEL);
3476  if (!req)
3477  goto out;
3478  req->buf = kmalloc(EP0_BUFSIZE, GFP_KERNEL);
3479  if (!req->buf)
3480  goto out;
3481  req->complete = ep0_complete;
3482 
3483  /* Allocate the data buffers */
3484  for (i = 0; i < fsg_num_buffers; ++i) {
3485  struct fsg_buffhd *bh = &fsg->buffhds[i];
3486 
3487  /* Allocate for the bulk-in endpoint. We assume that
3488  * the buffer will also work with the bulk-out (and
3489  * interrupt-in) endpoint. */
3490  bh->buf = kmalloc(mod_data.buflen, GFP_KERNEL);
3491  if (!bh->buf)
3492  goto out;
3493  bh->next = bh + 1;
3494  }
3495  fsg->buffhds[fsg_num_buffers - 1].next = &fsg->buffhds[0];
3496 
3497  /* This should reflect the actual gadget power source */
3498  usb_gadget_set_selfpowered(gadget);
3499 
3500  snprintf(fsg_string_manufacturer, sizeof fsg_string_manufacturer,
3501  "%s %s with %s",
3502  init_utsname()->sysname, init_utsname()->release,
3503  gadget->name);
3504 
3505  fsg->thread_task = kthread_create(fsg_main_thread, fsg,
3506  "file-storage-gadget");
3507  if (IS_ERR(fsg->thread_task)) {
3508  rc = PTR_ERR(fsg->thread_task);
3509  goto out;
3510  }
3511 
3512  INFO(fsg, DRIVER_DESC ", version: " DRIVER_VERSION "\n");
3513  INFO(fsg, "NOTE: This driver is deprecated. "
3514  "Consider using g_mass_storage instead.\n");
3515  INFO(fsg, "Number of LUNs=%d\n", fsg->nluns);
3516 
3517  pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
3518  for (i = 0; i < fsg->nluns; ++i) {
3519  curlun = &fsg->luns[i];
3520  if (fsg_lun_is_open(curlun)) {
3521  p = NULL;
3522  if (pathbuf) {
3523  p = d_path(&curlun->filp->f_path,
3524  pathbuf, PATH_MAX);
3525  if (IS_ERR(p))
3526  p = NULL;
3527  }
3528  LINFO(curlun, "ro=%d, nofua=%d, file: %s\n",
3529  curlun->ro, curlun->nofua, (p ? p : "(error)"));
3530  }
3531  }
3532  kfree(pathbuf);
3533 
3534  DBG(fsg, "transport=%s (x%02x)\n",
3535  mod_data.transport_name, mod_data.transport_type);
3536  DBG(fsg, "protocol=%s (x%02x)\n",
3537  mod_data.protocol_name, mod_data.protocol_type);
3538  DBG(fsg, "VendorID=x%04x, ProductID=x%04x, Release=x%04x\n",
3539  mod_data.vendor, mod_data.product, mod_data.release);
3540  DBG(fsg, "removable=%d, stall=%d, cdrom=%d, buflen=%u\n",
3541  mod_data.removable, mod_data.can_stall,
3542  mod_data.cdrom, mod_data.buflen);
3543  DBG(fsg, "I/O thread pid: %d\n", task_pid_nr(fsg->thread_task));
3544 
3546 
3547  /* Tell the thread to start working */
3549  return 0;
3550 
3551 autoconf_fail:
3552  ERROR(fsg, "unable to autoconfigure all endpoints\n");
3553  rc = -ENOTSUPP;
3554 
3555 out:
3556  fsg->state = FSG_STATE_TERMINATED; // The thread is dead
3557  fsg_unbind(gadget);
3558  complete(&fsg->thread_notifier);
3559  return rc;
3560 }
3561 
3562 
3563 /*-------------------------------------------------------------------------*/
3564 
3565 static void fsg_suspend(struct usb_gadget *gadget)
3566 {
3567  struct fsg_dev *fsg = get_gadget_data(gadget);
3568 
3569  DBG(fsg, "suspend\n");
3571 }
3572 
3573 static void fsg_resume(struct usb_gadget *gadget)
3574 {
3575  struct fsg_dev *fsg = get_gadget_data(gadget);
3576 
3577  DBG(fsg, "resume\n");
3579 }
3580 
3581 
3582 /*-------------------------------------------------------------------------*/
3583 
3584 static __refdata struct usb_gadget_driver fsg_driver = {
3585  .max_speed = USB_SPEED_SUPER,
3586  .function = (char *) fsg_string_product,
3587  .bind = fsg_bind,
3588  .unbind = fsg_unbind,
3589  .disconnect = fsg_disconnect,
3590  .setup = fsg_setup,
3591  .suspend = fsg_suspend,
3592  .resume = fsg_resume,
3593 
3594  .driver = {
3595  .name = DRIVER_NAME,
3596  .owner = THIS_MODULE,
3597  // .release = ...
3598  // .suspend = ...
3599  // .resume = ...
3600  },
3601 };
3602 
3603 
3604 static int __init fsg_alloc(void)
3605 {
3606  struct fsg_dev *fsg;
3607 
3608  fsg = kzalloc(sizeof *fsg +
3609  fsg_num_buffers * sizeof *(fsg->buffhds), GFP_KERNEL);
3610 
3611  if (!fsg)
3612  return -ENOMEM;
3613  spin_lock_init(&fsg->lock);
3614  init_rwsem(&fsg->filesem);
3615  kref_init(&fsg->ref);
3616  init_completion(&fsg->thread_notifier);
3617 
3618  the_fsg = fsg;
3619  return 0;
3620 }
3621 
3622 
3623 static int __init fsg_init(void)
3624 {
3625  int rc;
3626  struct fsg_dev *fsg;
3627 
3628  rc = fsg_num_buffers_validate();
3629  if (rc != 0)
3630  return rc;
3631 
3632  if ((rc = fsg_alloc()) != 0)
3633  return rc;
3634  fsg = the_fsg;
3635  rc = usb_gadget_probe_driver(&fsg_driver);
3636  if (rc != 0)
3637  kref_put(&fsg->ref, fsg_release);
3638  return rc;
3639 }
3640 module_init(fsg_init);
3641 
3642 
3643 static void __exit fsg_cleanup(void)
3644 {
3645  struct fsg_dev *fsg = the_fsg;
3646 
3647  /* Unregister the driver iff the thread hasn't already done so */
3649  usb_gadget_unregister_driver(&fsg_driver);
3650 
3651  /* Wait for the thread to finish up */
3653 
3654  kref_put(&fsg->ref, fsg_release);
3655 }
3656 module_exit(fsg_cleanup);