Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
qset.c
Go to the documentation of this file.
1 /*
2  * Wireless Host Controller (WHC) qset management.
3  *
4  * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program. If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/kernel.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/uwb/umc.h>
22 #include <linux/usb.h>
23 
24 #include "../../wusbcore/wusbhc.h"
25 
26 #include "whcd.h"
27 
28 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
29 {
30  struct whc_qset *qset;
32 
33  qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
34  if (qset == NULL)
35  return NULL;
36  memset(qset, 0, sizeof(struct whc_qset));
37 
38  qset->qset_dma = dma;
39  qset->whc = whc;
40 
41  INIT_LIST_HEAD(&qset->list_node);
42  INIT_LIST_HEAD(&qset->stds);
43 
44  return qset;
45 }
46 
53 static void qset_fill_qh(struct whc *whc, struct whc_qset *qset, struct urb *urb)
54 {
55  struct usb_device *usb_dev = urb->dev;
56  struct wusb_dev *wusb_dev = usb_dev->wusb_dev;
58  bool is_out;
59  uint8_t phy_rate;
60 
61  is_out = usb_pipeout(urb->pipe);
62 
63  qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
64 
65  epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
66  if (epcd) {
67  qset->max_seq = epcd->bMaxSequence;
68  qset->max_burst = epcd->bMaxBurst;
69  } else {
70  qset->max_seq = 2;
71  qset->max_burst = 1;
72  }
73 
74  /*
75  * Initial PHY rate is 53.3 Mbit/s for control endpoints or
76  * the maximum supported by the device for other endpoints
77  * (unless limited by the user).
78  */
79  if (usb_pipecontrol(urb->pipe))
80  phy_rate = UWB_PHY_RATE_53;
81  else {
82  uint16_t phy_rates;
83 
84  phy_rates = le16_to_cpu(wusb_dev->wusb_cap_descr->wPHYRates);
85  phy_rate = fls(phy_rates) - 1;
86  if (phy_rate > whc->wusbhc.phy_rate)
87  phy_rate = whc->wusbhc.phy_rate;
88  }
89 
90  qset->qh.info1 = cpu_to_le32(
91  QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
92  | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
93  | usb_pipe_to_qh_type(urb->pipe)
94  | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
96  );
97  qset->qh.info2 = cpu_to_le32(
99  | QH_INFO2_DBP(0)
100  | QH_INFO2_MAX_COUNT(3)
101  | QH_INFO2_MAX_RETRY(3)
102  | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
103  );
104  /* FIXME: where can we obtain these Tx parameters from? Why
105  * doesn't the chip know what Tx power to use? It knows the Rx
106  * strength and can presumably guess the Tx power required
107  * from that? */
108  qset->qh.info3 = cpu_to_le32(
109  QH_INFO3_TX_RATE(phy_rate)
110  | QH_INFO3_TX_PWR(0) /* 0 == max power */
111  );
112 
113  qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
114 }
115 
123 void qset_clear(struct whc *whc, struct whc_qset *qset)
124 {
125  qset->td_start = qset->td_end = qset->ntds = 0;
126 
127  qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
128  qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
129  qset->qh.err_count = 0;
130  qset->qh.scratch[0] = 0;
131  qset->qh.scratch[1] = 0;
132  qset->qh.scratch[2] = 0;
133 
134  memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
135 
136  init_completion(&qset->remove_complete);
137 }
138 
145 void qset_reset(struct whc *whc, struct whc_qset *qset)
146 {
147  qset->reset = 0;
148 
149  qset->qh.status &= ~QH_STATUS_SEQ_MASK;
150  qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
151 }
152 
158 struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
159  gfp_t mem_flags)
160 {
161  struct whc_qset *qset;
162 
163  qset = urb->ep->hcpriv;
164  if (qset == NULL) {
165  qset = qset_alloc(whc, mem_flags);
166  if (qset == NULL)
167  return NULL;
168 
169  qset->ep = urb->ep;
170  urb->ep->hcpriv = qset;
171  qset_fill_qh(whc, qset, urb);
172  }
173  return qset;
174 }
175 
176 void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
177 {
178  qset->remove = 0;
179  list_del_init(&qset->list_node);
180  complete(&qset->remove_complete);
181 }
182 
189 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
190 {
191  struct whc_std *std;
192  enum whc_update update = 0;
193 
194  list_for_each_entry(std, &qset->stds, list_node) {
195  struct whc_qtd *qtd;
197 
198  if (qset->ntds >= WHCI_QSET_TD_MAX
199  || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
200  break;
201 
202  if (std->qtd)
203  continue; /* already has a qTD */
204 
205  qtd = std->qtd = &qset->qtd[qset->td_end];
206 
207  /* Fill in setup bytes for control transfers. */
208  if (usb_pipecontrol(std->urb->pipe))
209  memcpy(qtd->setup, std->urb->setup_packet, 8);
210 
211  status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
212 
213  if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
214  status |= QTD_STS_LAST_PKT;
215 
216  /*
217  * For an IN transfer the iAlt field should be set so
218  * the h/w will automatically advance to the next
219  * transfer. However, if there are 8 or more TDs
220  * remaining in this transfer then iAlt cannot be set
221  * as it could point to somewhere in this transfer.
222  */
223  if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
224  int ialt;
225  ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
226  status |= QTD_STS_IALT(ialt);
227  } else if (usb_pipein(std->urb->pipe))
228  qset->pause_after_urb = std->urb;
229 
230  if (std->num_pointers)
232  else
234  qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
235 
236  qtd->status = cpu_to_le32(status);
237 
238  if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
239  update = WHC_UPDATE_UPDATED;
240 
241  if (++qset->td_end >= WHCI_QSET_TD_MAX)
242  qset->td_end = 0;
243  qset->ntds++;
244  }
245 
246  return update;
247 }
248 
255 static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
256 {
257  qset->qtd[qset->td_start].status = 0;
258 
259  if (++qset->td_start >= WHCI_QSET_TD_MAX)
260  qset->td_start = 0;
261  qset->ntds--;
262 }
263 
264 static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
265 {
266  struct scatterlist *sg;
267  void *bounce;
268  size_t remaining, offset;
269 
270  bounce = std->bounce_buf;
271  remaining = std->len;
272 
273  sg = std->bounce_sg;
274  offset = std->bounce_offset;
275 
276  while (remaining) {
277  size_t len;
278 
279  len = min(sg->length - offset, remaining);
280  memcpy(sg_virt(sg) + offset, bounce, len);
281 
282  bounce += len;
283  remaining -= len;
284 
285  offset += len;
286  if (offset >= sg->length) {
287  sg = sg_next(sg);
288  offset = 0;
289  }
290  }
291 
292 }
293 
299 void qset_free_std(struct whc *whc, struct whc_std *std)
300 {
301  list_del(&std->list_node);
302  if (std->bounce_buf) {
303  bool is_out = usb_pipeout(std->urb->pipe);
305 
306  if (std->num_pointers)
307  dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
308  else
309  dma_addr = std->dma_addr;
310 
311  dma_unmap_single(whc->wusbhc.dev, dma_addr,
312  std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
313  if (!is_out)
314  qset_copy_bounce_to_sg(whc, std);
315  kfree(std->bounce_buf);
316  }
317  if (std->pl_virt) {
318  if (std->dma_addr)
319  dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
320  std->num_pointers * sizeof(struct whc_page_list_entry),
321  DMA_TO_DEVICE);
322  kfree(std->pl_virt);
323  std->pl_virt = NULL;
324  }
325  kfree(std);
326 }
327 
331 static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
332  struct urb *urb)
333 {
334  struct whc_std *std, *t;
335 
336  list_for_each_entry_safe(std, t, &qset->stds, list_node) {
337  if (std->urb != urb)
338  break;
339  if (std->qtd != NULL)
340  qset_remove_qtd(whc, qset);
341  qset_free_std(whc, std);
342  }
343 }
344 
348 static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
349 {
350  struct whc_std *std, *t;
351 
352  list_for_each_entry_safe(std, t, &qset->stds, list_node) {
353  if (std->urb == urb)
354  qset_free_std(qset->whc, std);
355  }
356 }
357 
358 static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
359 {
361  dma_addr_t sp, ep;
362  size_t pl_len;
363  int p;
364 
365  /* Short buffers don't need a page list. */
366  if (std->len <= WHCI_PAGE_SIZE) {
367  std->num_pointers = 0;
368  return 0;
369  }
370 
371  sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
372  ep = dma_addr + std->len;
373  std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
374 
375  pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
376  std->pl_virt = kmalloc(pl_len, mem_flags);
377  if (std->pl_virt == NULL)
378  return -ENOMEM;
379  std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
380 
381  for (p = 0; p < std->num_pointers; p++) {
382  std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
383  dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
384  }
385 
386  return 0;
387 }
388 
392 static void urb_dequeue_work(struct work_struct *work)
393 {
394  struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
395  struct whc_qset *qset = wurb->qset;
396  struct whc *whc = qset->whc;
397  unsigned long flags;
398 
399  if (wurb->is_async == true)
403  else
407 
408  spin_lock_irqsave(&whc->lock, flags);
409  qset_remove_urb(whc, qset, wurb->urb, wurb->status);
410  spin_unlock_irqrestore(&whc->lock, flags);
411 }
412 
413 static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
414  struct urb *urb, gfp_t mem_flags)
415 {
416  struct whc_std *std;
417 
418  std = kzalloc(sizeof(struct whc_std), mem_flags);
419  if (std == NULL)
420  return NULL;
421 
422  std->urb = urb;
423  std->qtd = NULL;
424 
425  INIT_LIST_HEAD(&std->list_node);
426  list_add_tail(&std->list_node, &qset->stds);
427 
428  return std;
429 }
430 
431 static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
432  gfp_t mem_flags)
433 {
434  size_t remaining;
435  struct scatterlist *sg;
436  int i;
437  int ntds = 0;
438  struct whc_std *std = NULL;
439  struct whc_page_list_entry *new_pl_virt;
440  dma_addr_t prev_end = 0;
441  size_t pl_len;
442  int p = 0;
443 
444  remaining = urb->transfer_buffer_length;
445 
446  for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
448  size_t dma_remaining;
449  dma_addr_t sp, ep;
450  int num_pointers;
451 
452  if (remaining == 0) {
453  break;
454  }
455 
456  dma_addr = sg_dma_address(sg);
457  dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
458 
459  while (dma_remaining) {
460  size_t dma_len;
461 
462  /*
463  * We can use the previous std (if it exists) provided that:
464  * - the previous one ended on a page boundary.
465  * - the current one begins on a page boundary.
466  * - the previous one isn't full.
467  *
468  * If a new std is needed but the previous one
469  * was not a whole number of packets then this
470  * sg list cannot be mapped onto multiple
471  * qTDs. Return an error and let the caller
472  * sort it out.
473  */
474  if (!std
475  || (prev_end & (WHCI_PAGE_SIZE-1))
476  || (dma_addr & (WHCI_PAGE_SIZE-1))
477  || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
478  if (std && std->len % qset->max_packet != 0)
479  return -EINVAL;
480  std = qset_new_std(whc, qset, urb, mem_flags);
481  if (std == NULL) {
482  return -ENOMEM;
483  }
484  ntds++;
485  p = 0;
486  }
487 
488  dma_len = dma_remaining;
489 
490  /*
491  * If the remainder of this element doesn't
492  * fit in a single qTD, limit the qTD to a
493  * whole number of packets. This allows the
494  * remainder to go into the next qTD.
495  */
496  if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
497  dma_len = (QTD_MAX_XFER_SIZE / qset->max_packet)
498  * qset->max_packet - std->len;
499  }
500 
501  std->len += dma_len;
502  std->ntds_remaining = -1; /* filled in later */
503 
504  sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
505  ep = dma_addr + dma_len;
506  num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
507  std->num_pointers += num_pointers;
508 
509  pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
510 
511  new_pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
512  if (new_pl_virt == NULL) {
513  kfree(std->pl_virt);
514  std->pl_virt = NULL;
515  return -ENOMEM;
516  }
517  std->pl_virt = new_pl_virt;
518 
519  for (;p < std->num_pointers; p++) {
520  std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
521  dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
522  }
523 
524  prev_end = dma_addr = ep;
525  dma_remaining -= dma_len;
526  remaining -= dma_len;
527  }
528  }
529 
530  /* Now the number of stds is know, go back and fill in
531  std->ntds_remaining. */
532  list_for_each_entry(std, &qset->stds, list_node) {
533  if (std->ntds_remaining == -1) {
534  pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
535  std->ntds_remaining = ntds--;
536  std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
537  pl_len, DMA_TO_DEVICE);
538  }
539  }
540  return 0;
541 }
542 
550 static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
551  struct urb *urb, gfp_t mem_flags)
552 {
553  bool is_out = usb_pipeout(urb->pipe);
554  size_t max_std_len;
555  size_t remaining;
556  int ntds = 0;
557  struct whc_std *std = NULL;
558  void *bounce = NULL;
559  struct scatterlist *sg;
560  int i;
561 
562  /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
563  max_std_len = qset->max_burst * qset->max_packet;
564 
565  remaining = urb->transfer_buffer_length;
566 
567  for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
568  size_t len;
569  size_t sg_remaining;
570  void *orig;
571 
572  if (remaining == 0) {
573  break;
574  }
575 
576  sg_remaining = min_t(size_t, remaining, sg->length);
577  orig = sg_virt(sg);
578 
579  while (sg_remaining) {
580  if (!std || std->len == max_std_len) {
581  std = qset_new_std(whc, qset, urb, mem_flags);
582  if (std == NULL)
583  return -ENOMEM;
584  std->bounce_buf = kmalloc(max_std_len, mem_flags);
585  if (std->bounce_buf == NULL)
586  return -ENOMEM;
587  std->bounce_sg = sg;
588  std->bounce_offset = orig - sg_virt(sg);
589  bounce = std->bounce_buf;
590  ntds++;
591  }
592 
593  len = min(sg_remaining, max_std_len - std->len);
594 
595  if (is_out)
596  memcpy(bounce, orig, len);
597 
598  std->len += len;
599  std->ntds_remaining = -1; /* filled in later */
600 
601  bounce += len;
602  orig += len;
603  sg_remaining -= len;
604  remaining -= len;
605  }
606  }
607 
608  /*
609  * For each of the new sTDs, map the bounce buffers, create
610  * page lists (if necessary), and fill in std->ntds_remaining.
611  */
612  list_for_each_entry(std, &qset->stds, list_node) {
613  if (std->ntds_remaining != -1)
614  continue;
615 
616  std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
617  is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
618 
619  if (qset_fill_page_list(whc, std, mem_flags) < 0)
620  return -ENOMEM;
621 
622  std->ntds_remaining = ntds--;
623  }
624 
625  return 0;
626 }
627 
635 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
636  gfp_t mem_flags)
637 {
638  struct whc_urb *wurb;
639  int remaining = urb->transfer_buffer_length;
640  u64 transfer_dma = urb->transfer_dma;
641  int ntds_remaining;
642  int ret;
643 
644  wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
645  if (wurb == NULL)
646  goto err_no_mem;
647  urb->hcpriv = wurb;
648  wurb->qset = qset;
649  wurb->urb = urb;
650  INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
651 
652  if (urb->num_sgs) {
653  ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
654  if (ret == -EINVAL) {
655  qset_free_stds(qset, urb);
656  ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
657  }
658  if (ret < 0)
659  goto err_no_mem;
660  return 0;
661  }
662 
663  ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
664  if (ntds_remaining == 0)
665  ntds_remaining = 1;
666 
667  while (ntds_remaining) {
668  struct whc_std *std;
669  size_t std_len;
670 
671  std_len = remaining;
672  if (std_len > QTD_MAX_XFER_SIZE)
673  std_len = QTD_MAX_XFER_SIZE;
674 
675  std = qset_new_std(whc, qset, urb, mem_flags);
676  if (std == NULL)
677  goto err_no_mem;
678 
679  std->dma_addr = transfer_dma;
680  std->len = std_len;
682 
683  if (qset_fill_page_list(whc, std, mem_flags) < 0)
684  goto err_no_mem;
685 
686  ntds_remaining--;
687  remaining -= std_len;
688  transfer_dma += std_len;
689  }
690 
691  return 0;
692 
693 err_no_mem:
694  qset_free_stds(qset, urb);
695  return -ENOMEM;
696 }
697 
703 void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
704  struct urb *urb, int status)
705 {
706  struct wusbhc *wusbhc = &whc->wusbhc;
707  struct whc_urb *wurb = urb->hcpriv;
708 
709  usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
710  /* Drop the lock as urb->complete() may enqueue another urb. */
711  spin_unlock(&whc->lock);
712  wusbhc_giveback_urb(wusbhc, urb, status);
713  spin_lock(&whc->lock);
714 
715  kfree(wurb);
716 }
717 
723 static int get_urb_status_from_qtd(struct urb *urb, u32 status)
724 {
725  if (status & QTD_STS_HALTED) {
726  if (status & QTD_STS_DBE)
727  return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
728  else if (status & QTD_STS_BABBLE)
729  return -EOVERFLOW;
730  else if (status & QTD_STS_RCE)
731  return -ETIME;
732  return -EPIPE;
733  }
734  if (usb_pipein(urb->pipe)
735  && (urb->transfer_flags & URB_SHORT_NOT_OK)
736  && urb->actual_length < urb->transfer_buffer_length)
737  return -EREMOTEIO;
738  return 0;
739 }
740 
749 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
750  struct whc_qtd *qtd)
751 {
752  struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
753  struct urb *urb = std->urb;
755  bool complete;
756 
757  status = le32_to_cpu(qtd->status);
758 
759  urb->actual_length += std->len - QTD_STS_TO_LEN(status);
760 
761  if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
762  complete = true;
763  else
764  complete = whc_std_last(std);
765 
766  qset_remove_qtd(whc, qset);
767  qset_free_std(whc, std);
768 
769  /*
770  * Transfers for this URB are complete? Then return it to the
771  * USB subsystem.
772  */
773  if (complete) {
774  qset_remove_qtds(whc, qset, urb);
775  qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
776 
777  /*
778  * If iAlt isn't valid then the hardware didn't
779  * advance iCur. Adjust the start and end pointers to
780  * match iCur.
781  */
782  if (!(status & QTD_STS_IALT_VALID))
783  qset->td_start = qset->td_end
784  = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
785  qset->pause_after_urb = NULL;
786  }
787 }
788 
801 void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
802  struct whc_qtd *qtd)
803 {
804  struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
805  struct urb *urb = std->urb;
806  int urb_status;
807 
808  urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
809 
810  qset_remove_qtds(whc, qset, urb);
811  qset_remove_urb(whc, qset, urb, urb_status);
812 
813  list_for_each_entry(std, &qset->stds, list_node) {
814  if (qset->ntds == 0)
815  break;
816  qset_remove_qtd(whc, qset);
817  std->qtd = NULL;
818  }
819 
820  qset->remove = 1;
821 }
822 
823 void qset_free(struct whc *whc, struct whc_qset *qset)
824 {
825  dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
826 }
827 
831 void qset_delete(struct whc *whc, struct whc_qset *qset)
832 {
834  qset_free(whc, qset);
835 }