84 #include <linux/slab.h>
85 #include <linux/hash.h>
87 #include <linux/export.h>
108 static void wa_xfer_delayed_run(
struct wa_rpipe *);
127 static void wa_seg_init(
struct wa_seg *
seg)
130 kref_init(&seg->
urb.kref);
144 struct usb_host_endpoint *
ep;
158 static inline void wa_xfer_init(
struct wa_xfer *xfer)
171 static void wa_xfer_destroy(
struct kref *_xfer)
176 for (cnt = 0; cnt < xfer->
segs; cnt++) {
178 usb_put_urb(xfer->
seg[cnt]->dto_urb);
179 usb_put_urb(&xfer->
seg[cnt]->urb);
185 static void wa_xfer_get(
struct wa_xfer *xfer)
190 static void wa_xfer_put(
struct wa_xfer *xfer)
192 kref_put(&xfer->
refcnt, wa_xfer_destroy);
205 static void wa_xfer_giveback(
struct wa_xfer *xfer)
211 spin_unlock_irqrestore(&xfer->
wa->xfer_list_lock, flags);
223 static void wa_xfer_completion(
struct wa_xfer *xfer)
227 rpipe_put(xfer->
ep->hcpriv);
228 wa_xfer_giveback(xfer);
236 static unsigned __wa_xfer_is_done(
struct wa_xfer *xfer)
242 unsigned found_short = 0;
247 urb->actual_length = 0;
248 for (cnt = 0; cnt < xfer->
segs; cnt++) {
252 if (found_short && seg->
result > 0) {
253 dev_dbg(dev,
"xfer %p#%u: bad short segments (%zu)\n",
258 urb->actual_length += seg->
result;
260 && cnt != xfer->
segs-1)
262 dev_dbg(dev,
"xfer %p#%u: DONE short %d "
263 "result %zu urb->actual_length %d\n",
269 dev_dbg(dev,
"xfer %p#%u: ERROR result %zu\n",
273 dev_dbg(dev,
"xfer %p#%u ABORTED: result %d\n",
274 xfer, seg->
index, urb->status);
275 xfer->
result = urb->status;
278 dev_warn(dev,
"xfer %p#%u: is_done bad state %d\n",
297 static void wa_xfer_id_init(
struct wa_xfer *xfer)
326 if (
id == xfer_itr->
id) {
327 wa_xfer_get(xfer_itr);
342 static void __wa_xfer_abort_cb(
struct urb *urb)
345 usb_put_urb(&b->
urb);
362 static void __wa_xfer_abort(
struct wa_xfer *xfer)
365 struct device *dev = &xfer->
wa->usb_iface->dev;
372 b->
cmd.bLength =
sizeof(b->
cmd);
374 b->
cmd.wRPipe = rpipe->
descr.wRPipeIndex;
375 b->
cmd.dwTransferID = wa_xfer_id(xfer);
378 usb_fill_bulk_urb(&b->
urb, xfer->
wa->usb_dev,
379 usb_sndbulkpipe(xfer->
wa->usb_dev,
380 xfer->
wa->dto_epd->bEndpointAddress),
381 &b->
cmd,
sizeof(b->
cmd), __wa_xfer_abort_cb, b);
389 if (printk_ratelimit())
390 dev_err(dev,
"xfer %p: Can't submit abort request: %d\n",
406 struct device *dev = &xfer->
wa->usb_iface->dev;
408 struct urb *urb = xfer->
urb;
411 switch (rpipe->
descr.bmAttribute & 0x3) {
422 dev_err(dev,
"FIXME: ISOC not implemented\n");
431 xfer->
is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
433 * 1 << (xfer->
wa->wa_descr->bRPipeBlockSize - 1);
439 dev_err(dev,
"HW BUG? seg_size %zu smaller than maxpktsize "
440 "%zu\n", xfer->
seg_size, maxpktsize);
445 xfer->
segs = (urb->transfer_buffer_length + xfer->
seg_size - 1)
448 dev_err(dev,
"BUG? ops, number of segments %d bigger than %d\n",
449 (
int)(urb->transfer_buffer_length / xfer->
seg_size),
461 static void __wa_xfer_setup_hdr0(
struct wa_xfer *xfer,
464 size_t xfer_hdr_size)
468 xfer_hdr0 = &xfer->
seg[0]->xfer_hdr;
469 xfer_hdr0->
bLength = xfer_hdr_size;
504 static void wa_seg_dto_cb(
struct urb *urb)
506 struct wa_seg *seg = urb->context;
512 unsigned rpipe_ready = 0;
515 switch (urb->status) {
520 dev_dbg(dev,
"xfer %p#%u: data out done (%d bytes)\n",
521 xfer, seg->
index, urb->actual_length);
524 seg->
result = urb->actual_length;
525 spin_unlock_irqrestore(&xfer->
lock, flags);
534 rpipe = xfer->
ep->hcpriv;
535 dev_dbg(dev,
"xfer %p#%u: data out error %d\n",
536 xfer, seg->
index, urb->status);
539 dev_err(dev,
"DTO: URB max acceptable errors "
540 "exceeded, resetting device\n");
545 seg->
result = urb->status;
547 __wa_xfer_abort(xfer);
548 rpipe_ready = rpipe_avail_inc(rpipe);
549 done = __wa_xfer_is_done(xfer);
551 spin_unlock_irqrestore(&xfer->
lock, flags);
553 wa_xfer_completion(xfer);
555 wa_xfer_delayed_run(rpipe);
577 static void wa_seg_cb(
struct urb *urb)
579 struct wa_seg *seg = urb->context;
585 unsigned rpipe_ready;
588 switch (urb->status) {
593 dev_dbg(dev,
"xfer %p#%u: request done\n", xfer, seg->
index);
596 spin_unlock_irqrestore(&xfer->
lock, flags);
605 rpipe = xfer->
ep->hcpriv;
606 if (printk_ratelimit())
607 dev_err(dev,
"xfer %p#%u: request error %d\n",
608 xfer, seg->
index, urb->status);
611 dev_err(dev,
"DTO: URB max acceptable errors "
612 "exceeded, resetting device\n");
617 seg->
result = urb->status;
619 __wa_xfer_abort(xfer);
620 rpipe_ready = rpipe_avail_inc(rpipe);
621 done = __wa_xfer_is_done(xfer);
622 spin_unlock_irqrestore(&xfer->
lock, flags);
624 wa_xfer_completion(xfer);
626 wa_xfer_delayed_run(rpipe);
638 static int __wa_xfer_setup_segs(
struct wa_xfer *xfer,
size_t xfer_hdr_size)
641 size_t alloc_size =
sizeof(*xfer->
seg[0])
642 -
sizeof(xfer->
seg[0]->xfer_hdr) + xfer_hdr_size;
643 struct usb_device *usb_dev = xfer->
wa->usb_dev;
646 size_t buf_itr,
buf_size, buf_itr_size;
651 goto error_segs_kzalloc;
653 buf_size = xfer->
urb->transfer_buffer_length;
654 for (cnt = 0; cnt < xfer->
segs; cnt++) {
657 goto error_seg_kzalloc;
661 usb_fill_bulk_urb(&seg->
urb, usb_dev,
662 usb_sndbulkpipe(usb_dev,
666 buf_itr_size = buf_size > xfer->
seg_size ?
671 goto error_dto_alloc;
674 usb_sndbulkpipe(usb_dev,
676 NULL, 0, wa_seg_dto_cb, seg);
679 xfer->
urb->transfer_dma + buf_itr;
680 seg->
dto_urb->transfer_flags |=
681 URB_NO_TRANSFER_DMA_MAP;
683 seg->
dto_urb->transfer_buffer =
684 xfer->
urb->transfer_buffer + buf_itr;
685 seg->
dto_urb->transfer_buffer_length = buf_itr_size;
688 buf_itr += buf_itr_size;
689 buf_size -= buf_itr_size;
698 for (; cnt > 0; cnt--) {
717 static int __wa_xfer_setup(
struct wa_xfer *xfer,
struct urb *urb)
720 struct device *dev = &xfer->
wa->usb_iface->dev;
725 result = __wa_xfer_setup_sizes(xfer, &xfer_type);
727 goto error_setup_sizes;
729 result = __wa_xfer_setup_segs(xfer, xfer_hdr_size);
731 dev_err(dev,
"xfer %p: Failed to allocate %d segments: %d\n",
732 xfer, xfer->
segs, result);
733 goto error_setup_segs;
736 xfer_hdr0 = &xfer->
seg[0]->xfer_hdr;
737 wa_xfer_id_init(xfer);
738 __wa_xfer_setup_hdr0(xfer, xfer_hdr0, xfer_type, xfer_hdr_size);
741 xfer_hdr = xfer_hdr0;
742 transfer_size = urb->transfer_buffer_length;
746 for (cnt = 1; cnt < xfer->
segs; cnt++) {
747 xfer_hdr = &xfer->
seg[
cnt]->xfer_hdr;
748 memcpy(xfer_hdr, xfer_hdr0, xfer_hdr_size);
768 static int __wa_seg_submit(
struct wa_rpipe *rpipe,
struct wa_xfer *xfer,
775 xfer, seg->
index, result);
776 goto error_seg_submit;
782 xfer, seg->
index, result);
783 goto error_dto_submit;
787 rpipe_avail_dec(rpipe);
805 static void wa_xfer_delayed_run(
struct wa_rpipe *rpipe)
808 struct device *dev = &rpipe->
wa->usb_iface->dev;
820 result = __wa_seg_submit(rpipe, xfer, seg);
821 dev_dbg(dev,
"xfer %p#%u submitted from delayed [%d segments available] %d\n",
824 spin_unlock_irqrestore(&rpipe->
seg_lock, flags);
826 __wa_xfer_abort(xfer);
828 spin_unlock_irqrestore(&xfer->
lock, flags);
832 spin_unlock_irqrestore(&rpipe->
seg_lock, flags);
842 static int __wa_xfer_submit(
struct wa_xfer *xfer)
845 struct wahc *wa = xfer->
wa;
862 for (cnt = 0; cnt < xfer->
segs; cnt++) {
864 empty = list_empty(&rpipe->
seg_list);
866 dev_dbg(dev,
"xfer %p#%u: available %u empty %u (%s)\n",
867 xfer, cnt, available, empty,
868 available == 0 || !empty ?
"delayed" :
"submitted");
869 if (available == 0 || !empty) {
870 dev_dbg(dev,
"xfer %p#%u: delayed\n", xfer, cnt);
874 result = __wa_seg_submit(rpipe, xfer, seg);
876 __wa_xfer_abort(xfer);
877 goto error_seg_submit;
883 spin_unlock_irqrestore(&rpipe->
seg_lock, flags);
909 static void wa_urb_enqueue_b(
struct wa_xfer *xfer)
913 struct urb *urb = xfer->
urb;
914 struct wahc *wa = xfer->
wa;
921 goto error_rpipe_get;
925 if (urb->dev ==
NULL) {
930 if (wusb_dev ==
NULL) {
938 result = urb->status;
942 result = __wa_xfer_setup(xfer, urb);
944 goto error_xfer_setup;
945 result = __wa_xfer_submit(xfer);
947 goto error_xfer_submit;
948 spin_unlock_irqrestore(&xfer->
lock, flags);
957 spin_unlock_irqrestore(&xfer->
lock, flags);
960 wusb_dev_put(wusb_dev);
962 rpipe_put(xfer->
ep->hcpriv);
965 wa_xfer_giveback(xfer);
969 done = __wa_xfer_is_done(xfer);
971 spin_unlock_irqrestore(&xfer->
lock, flags);
973 wa_xfer_completion(xfer);
999 wa_urb_enqueue_b(xfer);
1021 struct urb *urb,
gfp_t gfp)
1026 unsigned long my_flags;
1029 if (urb->transfer_buffer ==
NULL
1030 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
1031 && urb->transfer_buffer_length != 0) {
1032 dev_err(dev,
"BUG? urb %p: NULL xfer buffer & NODMA\n", urb);
1037 xfer = kzalloc(
sizeof(*xfer), gfp);
1043 goto error_dequeued;
1045 xfer->
wa = wa_get(wa);
1051 dev_dbg(dev,
"xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n",
1052 xfer, urb, urb->pipe, urb->transfer_buffer_length,
1053 urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ?
"dma" :
"nodma",
1054 urb->pipe &
USB_DIR_IN ?
"inbound" :
"outbound",
1055 cant_sleep ?
"deferred" :
"inline");
1064 wa_urb_enqueue_b(xfer);
1100 unsigned rpipe_ready = 0;
1112 rpipe = xfer->
ep->hcpriv;
1116 goto dequeue_delayed;
1121 __wa_xfer_abort(xfer);
1122 for (cnt = 0; cnt < xfer->
segs; cnt++) {
1136 rpipe_ready = rpipe_avail_inc(rpipe);
1137 spin_unlock_irqrestore(&rpipe->
seg_lock, flags2);
1145 rpipe_ready = rpipe_avail_inc(rpipe);
1150 rpipe_ready = rpipe_avail_inc(rpipe);
1156 rpipe_ready = rpipe_avail_inc(rpipe);
1164 xfer->
result = urb->status;
1165 __wa_xfer_is_done(xfer);
1166 spin_unlock_irqrestore(&xfer->
lock, flags);
1167 wa_xfer_completion(xfer);
1169 wa_xfer_delayed_run(rpipe);
1173 spin_unlock_irqrestore(&xfer->
lock, flags);
1180 xfer->
result = urb->status;
1181 spin_unlock_irqrestore(&xfer->
lock, flags);
1182 wa_xfer_giveback(xfer);
1198 static int wa_xfer_status_to_errno(
u8 status)
1202 static int xlat[] = {
1223 "Unknown WA transfer status 0x%02x\n",
1224 __func__, real_status);
1230 "Inconsistent WA status: 0x%02x\n",
1231 __func__, real_status);
1244 static void wa_xfer_result_chew(
struct wahc *wa,
struct wa_xfer *xfer)
1248 unsigned long flags;
1255 unsigned rpipe_ready = 0;
1261 seg = xfer->
seg[seg_idx];
1262 rpipe = xfer->
ep->hcpriv;
1264 dev_dbg(dev,
"xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n",
1265 xfer, seg_idx, usb_status, seg->
status);
1268 goto segment_aborted;
1272 if (printk_ratelimit())
1273 dev_err(dev,
"xfer %p#%u: Bad segment state %u\n",
1274 xfer, seg_idx, seg->
status);
1277 if (usb_status & 0x80) {
1278 seg->
result = wa_xfer_status_to_errno(usb_status);
1279 dev_err(dev,
"DTI: xfer %p#%u failed (0x%02x)\n",
1280 xfer, seg->
index, usb_status);
1281 goto error_complete;
1284 if (usb_status & 0x40)
1291 xfer->
urb->transfer_dma
1294 |= URB_NO_TRANSFER_DMA_MAP;
1297 xfer->
urb->transfer_buffer
1300 &= ~URB_NO_TRANSFER_DMA_MAP;
1307 goto error_submit_buf_in;
1313 rpipe_ready = rpipe_avail_inc(rpipe);
1314 done = __wa_xfer_is_done(xfer);
1316 spin_unlock_irqrestore(&xfer->
lock, flags);
1318 wa_xfer_completion(xfer);
1320 wa_xfer_delayed_run(rpipe);
1323 error_submit_buf_in:
1325 dev_err(dev,
"DTI: URB max acceptable errors "
1326 "exceeded, resetting device\n");
1329 if (printk_ratelimit())
1330 dev_err(dev,
"xfer %p#%u: can't submit DTI data phase: %d\n",
1331 xfer, seg_idx, result);
1336 rpipe_ready = rpipe_avail_inc(rpipe);
1337 __wa_xfer_abort(xfer);
1338 done = __wa_xfer_is_done(xfer);
1339 spin_unlock_irqrestore(&xfer->
lock, flags);
1341 wa_xfer_completion(xfer);
1343 wa_xfer_delayed_run(rpipe);
1347 spin_unlock_irqrestore(&xfer->
lock, flags);
1349 if (printk_ratelimit())
1350 dev_err(dev,
"xfer %p#%u: bad segment\n", xfer, seg_idx);
1352 dev_err(dev,
"DTI: URB max acceptable errors "
1353 "exceeded, resetting device\n");
1360 spin_unlock_irqrestore(&xfer->
lock, flags);
1373 static void wa_buf_in_cb(
struct urb *urb)
1375 struct wa_seg *seg = urb->context;
1380 unsigned rpipe_ready;
1381 unsigned long flags;
1384 switch (urb->status) {
1389 rpipe = xfer->
ep->hcpriv;
1390 dev_dbg(dev,
"xfer %p#%u: data in done (%zu bytes)\n",
1391 xfer, seg->
index, (
size_t)urb->actual_length);
1393 seg->
result = urb->actual_length;
1395 rpipe_ready = rpipe_avail_inc(rpipe);
1396 done = __wa_xfer_is_done(xfer);
1397 spin_unlock_irqrestore(&xfer->
lock, flags);
1399 wa_xfer_completion(xfer);
1401 wa_xfer_delayed_run(rpipe);
1410 rpipe = xfer->
ep->hcpriv;
1411 if (printk_ratelimit())
1412 dev_err(dev,
"xfer %p#%u: data in error %d\n",
1413 xfer, seg->
index, urb->status);
1416 dev_err(dev,
"DTO: URB max acceptable errors "
1417 "exceeded, resetting device\n");
1421 seg->
result = urb->status;
1423 rpipe_ready = rpipe_avail_inc(rpipe);
1424 __wa_xfer_abort(xfer);
1425 done = __wa_xfer_is_done(xfer);
1426 spin_unlock_irqrestore(&xfer->
lock, flags);
1428 wa_xfer_completion(xfer);
1430 wa_xfer_delayed_run(rpipe);
1460 static void wa_xfer_result_cb(
struct urb *urb)
1463 struct wahc *wa = urb->context;
1471 switch (wa->
dti_urb->status) {
1474 dev_dbg(dev,
"DTI: xfer result %d bytes at %p\n",
1475 urb->actual_length, urb->transfer_buffer);
1476 if (wa->
dti_urb->actual_length !=
sizeof(*xfer_result)) {
1477 dev_err(dev,
"DTI Error: xfer result--bad size "
1478 "xfer result (%d bytes vs %zu needed)\n",
1479 urb->actual_length,
sizeof(*xfer_result));
1483 if (xfer_result->
hdr.bLength !=
sizeof(*xfer_result)) {
1484 dev_err(dev,
"DTI Error: xfer result--"
1485 "bad header length %u\n",
1486 xfer_result->
hdr.bLength);
1490 dev_err(dev,
"DTI Error: xfer result--"
1491 "bad header type 0x%02x\n",
1492 xfer_result->
hdr.bNotifyType);
1501 xfer = wa_xfer_get_by_id(wa, xfer_id);
1504 dev_err(dev,
"DTI Error: xfer result--"
1505 "unknown xfer 0x%08x (status 0x%02x)\n",
1506 xfer_id, usb_status);
1509 wa_xfer_result_chew(wa, xfer);
1514 dev_dbg(dev,
"DTI: going down! %d\n", urb->status);
1520 dev_err(dev,
"DTI: URB max acceptable errors "
1521 "exceeded, resetting device\n");
1525 if (printk_ratelimit())
1526 dev_err(dev,
"DTI: URB error %d\n", urb->status);
1532 dev_err(dev,
"DTI Error: Could not submit DTI URB (%d), "
1533 "resetting\n", result);
1572 dev_err(dev,
"BUG: DTI ep is %u, not %u (hack me)\n",
1581 dev_err(dev,
"Can't allocate DTI URB\n");
1582 goto error_dti_urb_alloc;
1588 wa_xfer_result_cb, wa);
1592 dev_err(dev,
"Can't allocate BUF-IN URB\n");
1593 goto error_buf_in_urb_alloc;
1598 NULL, 0, wa_buf_in_cb, wa);
1601 dev_err(dev,
"DTI Error: Could not submit DTI URB (%d), "
1602 "resetting\n", result);
1603 goto error_dti_urb_submit;
1608 error_dti_urb_submit:
1610 error_buf_in_urb_alloc:
1613 error_dti_urb_alloc: