Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
urb.c
Go to the documentation of this file.
1 #include <linux/module.h>
2 #include <linux/string.h>
3 #include <linux/bitops.h>
4 #include <linux/slab.h>
5 #include <linux/init.h>
6 #include <linux/log2.h>
7 #include <linux/usb.h>
8 #include <linux/wait.h>
9 #include <linux/usb/hcd.h>
10 
11 #define to_urb(d) container_of(d, struct urb, kref)
12 
13 
14 static void urb_destroy(struct kref *kref)
15 {
16  struct urb *urb = to_urb(kref);
17 
18  if (urb->transfer_flags & URB_FREE_BUFFER)
19  kfree(urb->transfer_buffer);
20 
21  kfree(urb);
22 }
23 
38 void usb_init_urb(struct urb *urb)
39 {
40  if (urb) {
41  memset(urb, 0, sizeof(*urb));
42  kref_init(&urb->kref);
43  INIT_LIST_HEAD(&urb->anchor_list);
44  }
45 }
47 
64 struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
65 {
66  struct urb *urb;
67 
68  urb = kmalloc(sizeof(struct urb) +
69  iso_packets * sizeof(struct usb_iso_packet_descriptor),
70  mem_flags);
71  if (!urb) {
72  printk(KERN_ERR "alloc_urb: kmalloc failed\n");
73  return NULL;
74  }
75  usb_init_urb(urb);
76  return urb;
77 }
79 
90 void usb_free_urb(struct urb *urb)
91 {
92  if (urb)
93  kref_put(&urb->kref, urb_destroy);
94 }
96 
107 struct urb *usb_get_urb(struct urb *urb)
108 {
109  if (urb)
110  kref_get(&urb->kref);
111  return urb;
112 }
114 
123 void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor)
124 {
125  unsigned long flags;
126 
127  spin_lock_irqsave(&anchor->lock, flags);
128  usb_get_urb(urb);
129  list_add_tail(&urb->anchor_list, &anchor->urb_list);
130  urb->anchor = anchor;
131 
132  if (unlikely(anchor->poisoned)) {
133  atomic_inc(&urb->reject);
134  }
135 
136  spin_unlock_irqrestore(&anchor->lock, flags);
137 }
139 
140 /* Callers must hold anchor->lock */
141 static void __usb_unanchor_urb(struct urb *urb, struct usb_anchor *anchor)
142 {
143  urb->anchor = NULL;
144  list_del(&urb->anchor_list);
145  usb_put_urb(urb);
146  if (list_empty(&anchor->urb_list))
147  wake_up(&anchor->wait);
148 }
149 
156 void usb_unanchor_urb(struct urb *urb)
157 {
158  unsigned long flags;
159  struct usb_anchor *anchor;
160 
161  if (!urb)
162  return;
163 
164  anchor = urb->anchor;
165  if (!anchor)
166  return;
167 
168  spin_lock_irqsave(&anchor->lock, flags);
169  /*
170  * At this point, we could be competing with another thread which
171  * has the same intention. To protect the urb from being unanchored
172  * twice, only the winner of the race gets the job.
173  */
174  if (likely(anchor == urb->anchor))
175  __usb_unanchor_urb(urb, anchor);
176  spin_unlock_irqrestore(&anchor->lock, flags);
177 }
179 
180 /*-------------------------------------------------------------------*/
181 
301 int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
302 {
303  int xfertype, max;
304  struct usb_device *dev;
305  struct usb_host_endpoint *ep;
306  int is_out;
307 
308  if (!urb || urb->hcpriv || !urb->complete)
309  return -EINVAL;
310  dev = urb->dev;
311  if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
312  return -ENODEV;
313 
314  /* For now, get the endpoint from the pipe. Eventually drivers
315  * will be required to set urb->ep directly and we will eliminate
316  * urb->pipe.
317  */
318  ep = usb_pipe_endpoint(dev, urb->pipe);
319  if (!ep)
320  return -ENOENT;
321 
322  urb->ep = ep;
323  urb->status = -EINPROGRESS;
324  urb->actual_length = 0;
325 
326  /* Lots of sanity checks, so HCDs can rely on clean data
327  * and don't need to duplicate tests
328  */
329  xfertype = usb_endpoint_type(&ep->desc);
330  if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
331  struct usb_ctrlrequest *setup =
332  (struct usb_ctrlrequest *) urb->setup_packet;
333 
334  if (!setup)
335  return -ENOEXEC;
336  is_out = !(setup->bRequestType & USB_DIR_IN) ||
337  !setup->wLength;
338  } else {
339  is_out = usb_endpoint_dir_out(&ep->desc);
340  }
341 
342  /* Clear the internal flags and cache the direction for later use */
343  urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE |
344  URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL |
345  URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL |
346  URB_DMA_SG_COMBINED);
347  urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN);
348 
349  if (xfertype != USB_ENDPOINT_XFER_CONTROL &&
350  dev->state < USB_STATE_CONFIGURED)
351  return -ENODEV;
352 
353  max = usb_endpoint_maxp(&ep->desc);
354  if (max <= 0) {
355  dev_dbg(&dev->dev,
356  "bogus endpoint ep%d%s in %s (bad maxpacket %d)\n",
357  usb_endpoint_num(&ep->desc), is_out ? "out" : "in",
358  __func__, max);
359  return -EMSGSIZE;
360  }
361 
362  /* periodic transfers limit size per frame/uframe,
363  * but drivers only control those sizes for ISO.
364  * while we're checking, initialize return status.
365  */
366  if (xfertype == USB_ENDPOINT_XFER_ISOC) {
367  int n, len;
368 
369  /* SuperSpeed isoc endpoints have up to 16 bursts of up to
370  * 3 packets each
371  */
372  if (dev->speed == USB_SPEED_SUPER) {
373  int burst = 1 + ep->ss_ep_comp.bMaxBurst;
374  int mult = USB_SS_MULT(ep->ss_ep_comp.bmAttributes);
375  max *= burst;
376  max *= mult;
377  }
378 
379  /* "high bandwidth" mode, 1-3 packets/uframe? */
380  if (dev->speed == USB_SPEED_HIGH) {
381  int mult = 1 + ((max >> 11) & 0x03);
382  max &= 0x07ff;
383  max *= mult;
384  }
385 
386  if (urb->number_of_packets <= 0)
387  return -EINVAL;
388  for (n = 0; n < urb->number_of_packets; n++) {
389  len = urb->iso_frame_desc[n].length;
390  if (len < 0 || len > max)
391  return -EMSGSIZE;
392  urb->iso_frame_desc[n].status = -EXDEV;
393  urb->iso_frame_desc[n].actual_length = 0;
394  }
395  }
396 
397  /* the I/O buffer must be mapped/unmapped, except when length=0 */
398  if (urb->transfer_buffer_length > INT_MAX)
399  return -EMSGSIZE;
400 
401 #ifdef DEBUG
402  /* stuff that drivers shouldn't do, but which shouldn't
403  * cause problems in HCDs if they get it wrong.
404  */
405  {
406  unsigned int allowed;
407  static int pipetypes[4] = {
408  PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
409  };
410 
411  /* Check that the pipe's type matches the endpoint's type */
412  if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
413  dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n",
414  usb_pipetype(urb->pipe), pipetypes[xfertype]);
415 
416  /* Check against a simple/standard policy */
417  allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT | URB_DIR_MASK |
418  URB_FREE_BUFFER);
419  switch (xfertype) {
421  if (is_out)
422  allowed |= URB_ZERO_PACKET;
423  /* FALLTHROUGH */
425  allowed |= URB_NO_FSBR; /* only affects UHCI */
426  /* FALLTHROUGH */
427  default: /* all non-iso endpoints */
428  if (!is_out)
429  allowed |= URB_SHORT_NOT_OK;
430  break;
432  allowed |= URB_ISO_ASAP;
433  break;
434  }
435  allowed &= urb->transfer_flags;
436 
437  /* warn if submitter gave bogus flags */
438  if (allowed != urb->transfer_flags)
439  dev_WARN(&dev->dev, "BOGUS urb flags, %x --> %x\n",
440  urb->transfer_flags, allowed);
441  }
442 #endif
443  /*
444  * Force periodic transfer intervals to be legal values that are
445  * a power of two (so HCDs don't need to).
446  *
447  * FIXME want bus->{intr,iso}_sched_horizon values here. Each HC
448  * supports different values... this uses EHCI/UHCI defaults (and
449  * EHCI can use smaller non-default values).
450  */
451  switch (xfertype) {
454  /* too small? */
455  switch (dev->speed) {
456  case USB_SPEED_WIRELESS:
457  if (urb->interval < 6)
458  return -EINVAL;
459  break;
460  default:
461  if (urb->interval <= 0)
462  return -EINVAL;
463  break;
464  }
465  /* too big? */
466  switch (dev->speed) {
467  case USB_SPEED_SUPER: /* units are 125us */
468  /* Handle up to 2^(16-1) microframes */
469  if (urb->interval > (1 << 15))
470  return -EINVAL;
471  max = 1 << 15;
472  break;
473  case USB_SPEED_WIRELESS:
474  if (urb->interval > 16)
475  return -EINVAL;
476  break;
477  case USB_SPEED_HIGH: /* units are microframes */
478  /* NOTE usb handles 2^15 */
479  if (urb->interval > (1024 * 8))
480  urb->interval = 1024 * 8;
481  max = 1024 * 8;
482  break;
483  case USB_SPEED_FULL: /* units are frames/msec */
484  case USB_SPEED_LOW:
485  if (xfertype == USB_ENDPOINT_XFER_INT) {
486  if (urb->interval > 255)
487  return -EINVAL;
488  /* NOTE ohci only handles up to 32 */
489  max = 128;
490  } else {
491  if (urb->interval > 1024)
492  urb->interval = 1024;
493  /* NOTE usb and ohci handle up to 2^15 */
494  max = 1024;
495  }
496  break;
497  default:
498  return -EINVAL;
499  }
500  if (dev->speed != USB_SPEED_WIRELESS) {
501  /* Round down to a power of 2, no more than max */
502  urb->interval = min(max, 1 << ilog2(urb->interval));
503  }
504  }
505 
506  return usb_hcd_submit_urb(urb, mem_flags);
507 }
509 
510 /*-------------------------------------------------------------------*/
511 
582 int usb_unlink_urb(struct urb *urb)
583 {
584  if (!urb)
585  return -EINVAL;
586  if (!urb->dev)
587  return -ENODEV;
588  if (!urb->ep)
589  return -EIDRM;
590  return usb_hcd_unlink_urb(urb, -ECONNRESET);
591 }
593 
621 void usb_kill_urb(struct urb *urb)
622 {
623  might_sleep();
624  if (!(urb && urb->dev && urb->ep))
625  return;
626  atomic_inc(&urb->reject);
627 
628  usb_hcd_unlink_urb(urb, -ENOENT);
629  wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
630 
631  atomic_dec(&urb->reject);
632 }
634 
662 void usb_poison_urb(struct urb *urb)
663 {
664  might_sleep();
665  if (!(urb && urb->dev && urb->ep))
666  return;
667  atomic_inc(&urb->reject);
668 
669  usb_hcd_unlink_urb(urb, -ENOENT);
670  wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
671 }
673 
674 void usb_unpoison_urb(struct urb *urb)
675 {
676  if (!urb)
677  return;
678 
679  atomic_dec(&urb->reject);
680 }
682 
695 void usb_block_urb(struct urb *urb)
696 {
697  if (!urb)
698  return;
699 
700  atomic_inc(&urb->reject);
701 }
703 
714 void usb_kill_anchored_urbs(struct usb_anchor *anchor)
715 {
716  struct urb *victim;
717 
718  spin_lock_irq(&anchor->lock);
719  while (!list_empty(&anchor->urb_list)) {
720  victim = list_entry(anchor->urb_list.prev, struct urb,
721  anchor_list);
722  /* we must make sure the URB isn't freed before we kill it*/
723  usb_get_urb(victim);
724  spin_unlock_irq(&anchor->lock);
725  /* this will unanchor the URB */
726  usb_kill_urb(victim);
727  usb_put_urb(victim);
728  spin_lock_irq(&anchor->lock);
729  }
730  spin_unlock_irq(&anchor->lock);
731 }
733 
734 
746 void usb_poison_anchored_urbs(struct usb_anchor *anchor)
747 {
748  struct urb *victim;
749 
750  spin_lock_irq(&anchor->lock);
751  anchor->poisoned = 1;
752  while (!list_empty(&anchor->urb_list)) {
753  victim = list_entry(anchor->urb_list.prev, struct urb,
754  anchor_list);
755  /* we must make sure the URB isn't freed before we kill it*/
756  usb_get_urb(victim);
757  spin_unlock_irq(&anchor->lock);
758  /* this will unanchor the URB */
759  usb_poison_urb(victim);
760  usb_put_urb(victim);
761  spin_lock_irq(&anchor->lock);
762  }
763  spin_unlock_irq(&anchor->lock);
764 }
766 
774 void usb_unpoison_anchored_urbs(struct usb_anchor *anchor)
775 {
776  unsigned long flags;
777  struct urb *lazarus;
778 
779  spin_lock_irqsave(&anchor->lock, flags);
780  list_for_each_entry(lazarus, &anchor->urb_list, anchor_list) {
781  usb_unpoison_urb(lazarus);
782  }
783  anchor->poisoned = 0;
784  spin_unlock_irqrestore(&anchor->lock, flags);
785 }
799 void usb_unlink_anchored_urbs(struct usb_anchor *anchor)
800 {
801  struct urb *victim;
802 
803  while ((victim = usb_get_from_anchor(anchor)) != NULL) {
804  usb_unlink_urb(victim);
805  usb_put_urb(victim);
806  }
807 }
809 
818 int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
819  unsigned int timeout)
820 {
821  return wait_event_timeout(anchor->wait, list_empty(&anchor->urb_list),
822  msecs_to_jiffies(timeout));
823 }
825 
833 struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
834 {
835  struct urb *victim;
836  unsigned long flags;
837 
838  spin_lock_irqsave(&anchor->lock, flags);
839  if (!list_empty(&anchor->urb_list)) {
840  victim = list_entry(anchor->urb_list.next, struct urb,
841  anchor_list);
842  usb_get_urb(victim);
843  __usb_unanchor_urb(victim, anchor);
844  } else {
845  victim = NULL;
846  }
847  spin_unlock_irqrestore(&anchor->lock, flags);
848 
849  return victim;
850 }
851 
853 
860 void usb_scuttle_anchored_urbs(struct usb_anchor *anchor)
861 {
862  struct urb *victim;
863  unsigned long flags;
864 
865  spin_lock_irqsave(&anchor->lock, flags);
866  while (!list_empty(&anchor->urb_list)) {
867  victim = list_entry(anchor->urb_list.prev, struct urb,
868  anchor_list);
869  __usb_unanchor_urb(victim, anchor);
870  }
871  spin_unlock_irqrestore(&anchor->lock, flags);
872 }
873 
875 
882 int usb_anchor_empty(struct usb_anchor *anchor)
883 {
884  return list_empty(&anchor->urb_list);
885 }
886 
888