25 #include <linux/slab.h>
36 #define STATUS_STAGE 2
46 struct usb_host_endpoint *
hep;
47 struct usb_device *
dev;
67 #define TT_ISOCHRONOUS 1
69 #define TT_INTERRUPT 3
96 #define td_udev(td) ((td)->ep_data->dev)
100 #define TD_PIDEP_OFFSET 0x04
101 #define TD_PIDEPMASK_PID 0xF0
102 #define TD_PIDEPMASK_EP 0x0F
103 #define TD_PORTLENMASK_DL 0x02FF
104 #define TD_PORTLENMASK_PN 0xC000
106 #define TD_STATUS_OFFSET 0x07
107 #define TD_STATUSMASK_ACK 0x01
108 #define TD_STATUSMASK_ERR 0x02
109 #define TD_STATUSMASK_TMOUT 0x04
110 #define TD_STATUSMASK_SEQ 0x08
111 #define TD_STATUSMASK_SETUP 0x10
112 #define TD_STATUSMASK_OVF 0x20
113 #define TD_STATUSMASK_NAK 0x40
114 #define TD_STATUSMASK_STALL 0x80
116 #define TD_ERROR_MASK (TD_STATUSMASK_ERR | TD_STATUSMASK_TMOUT | \
119 #define TD_RETRYCNT_OFFSET 0x08
120 #define TD_RETRYCNTMASK_ACT_FLG 0x10
121 #define TD_RETRYCNTMASK_TX_TYPE 0x0C
122 #define TD_RETRYCNTMASK_RTY_CNT 0x03
124 #define TD_RESIDUE_OVERFLOW 0x80
126 #define TD_PID_IN 0x90
129 #define td_residue(td) ((__s8)(td->residue))
130 #define td_ly_base_addr(td) (__le16_to_cpu((td)->ly_base_addr))
131 #define td_port_length(td) (__le16_to_cpu((td)->port_length))
132 #define td_next_td_addr(td) (__le16_to_cpu((td)->next_td_addr))
134 #define td_active(td) ((td)->retry_cnt & TD_RETRYCNTMASK_ACT_FLG)
135 #define td_length(td) (td_port_length(td) & TD_PORTLENMASK_DL)
137 #define td_sequence_ok(td) (!td->status || \
138 (!(td->status & TD_STATUSMASK_SEQ) == \
139 !(td->ctrl_reg & SEQ_SEL)))
141 #define td_acked(td) (!td->status || \
142 (td->status & TD_STATUSMASK_ACK))
143 #define td_actual_bytes(td) (td_length(td) - td_residue(td))
158 dev_dbg(dev,
"endpoint: %4d\n", usb_pipeendpoint(td->
pipe));
159 dev_dbg(dev,
"pipeout: %4d\n", usb_pipeout(td->
pipe));
183 static inline u16 c67x00_get_current_frame_number(
struct c67x00_hcd *c67x00)
200 static inline int frame_after(
u16 a,
u16 b)
203 (HOST_FRAME_MASK / 2);
209 static inline int frame_after_eq(
u16 a,
u16 b)
212 (HOST_FRAME_MASK / 2);
231 if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
257 struct usb_host_endpoint *
hep = urb->ep;
261 c67x00->current_frame = c67x00_get_current_frame_number(c67x00);
265 ep_data = hep->hcpriv;
266 if (frame_after(c67x00->current_frame, ep_data->
next_frame))
268 frame_add(c67x00->current_frame, 1);
273 ep_data = kzalloc(
sizeof(*ep_data),
GFP_ATOMIC);
277 INIT_LIST_HEAD(&ep_data->
queue);
278 INIT_LIST_HEAD(&ep_data->
node);
287 ep_data->
next_frame = frame_add(c67x00->current_frame, 1);
291 type = usb_pipetype(urb->pipe);
292 if (list_empty(&ep_data->
node)) {
293 list_add(&ep_data->
node, &c67x00->list[type]);
298 if (prev->
hep->desc.bEndpointAddress >
299 hep->desc.bEndpointAddress) {
300 list_add(&ep_data->
node, prev->
node.prev);
309 static int c67x00_ep_data_free(
struct usb_host_endpoint *hep)
316 if (!list_empty(&ep_data->
queue))
331 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
334 if (!list_empty(&ep->urb_list))
340 while (c67x00_ep_data_free(ep)) {
342 spin_unlock_irqrestore(&c67x00->
lock, flags);
354 spin_unlock_irqrestore(&c67x00->
lock, flags);
359 static inline int get_root_port(
struct usb_device *
dev)
361 while (dev->parent->parent)
367 struct urb *urb,
gfp_t mem_flags)
372 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
373 int port = get_root_port(urb->dev)-1;
378 if (!HC_IS_RUNNING(hcd->state)) {
388 urbp = kzalloc(
sizeof(*urbp), mem_flags);
398 urbp->
ep_data = c67x00_ep_data_alloc(c67x00, urb);
410 urb->actual_length = 0;
412 switch (usb_pipetype(urb->pipe)) {
420 case PIPE_ISOCHRONOUS:
425 if (list_empty(&urbp->
ep_data->queue))
426 urb->start_frame = urbp->
ep_data->next_frame;
429 struct urb *last_urb;
435 frame_add(last_urb->start_frame,
436 last_urb->number_of_packets *
451 spin_unlock_irqrestore(&c67x00->
lock, flags);
460 spin_unlock_irqrestore(&c67x00->
lock, flags);
467 struct c67x00_hcd *c67x00 = hcd_to_c67x00_hcd(hcd);
476 c67x00_release_urb(c67x00, urb);
479 spin_unlock(&c67x00->
lock);
481 spin_lock(&c67x00->
lock);
483 spin_unlock_irqrestore(&c67x00->
lock, flags);
488 spin_unlock_irqrestore(&c67x00->
lock, flags);
510 c67x00_release_urb(c67x00, urb);
512 spin_unlock(&c67x00->
lock);
514 spin_lock(&c67x00->
lock);
519 static int c67x00_claim_frame_bw(
struct c67x00_hcd *c67x00,
struct urb *urb,
520 int len,
int periodic)
542 if (usb_pipein(urb->pipe))
543 bit_time = 80240 + 7578*len;
545 bit_time = 80260 + 7467*len;
548 if (usb_pipeisoc(urb->pipe))
549 bit_time = usb_pipein(urb->pipe) ? 9050 : 7840;
557 bit_time = ((bit_time+50) / 100) + 106;
587 static int c67x00_create_td(
struct c67x00_hcd *c67x00,
struct urb *urb,
589 unsigned long privdata)
593 const __u8 active_flag = 1, retry_cnt = 1;
597 if (c67x00_claim_frame_bw(c67x00, urb, len, usb_pipeisoc(urb->pipe)
598 || usb_pipeint(urb->pipe)))
605 td->
pipe = urb->pipe;
612 switch (usb_pipetype(td->
pipe)) {
613 case PIPE_ISOCHRONOUS:
640 (urbp->
port << 14) | (len & 0x3FF));
642 (usb_pipeendpoint(td->
pipe) & 0xF);
661 static inline void c67x00_release_td(
struct c67x00_td *td)
669 static int c67x00_add_data_urb(
struct c67x00_hcd *c67x00,
struct urb *urb)
678 toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe),
679 usb_pipeout(urb->pipe));
680 remaining = urb->transfer_buffer_length - urb->actual_length;
682 maxps = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
684 need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
685 usb_pipeout(urb->pipe) && !(remaining % maxps);
687 while (remaining || need_empty) {
691 len = (remaining > maxps) ? maxps : remaining;
696 td_buf = urb->transfer_buffer + urb->transfer_buffer_length -
698 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, toggle,
705 if (usb_pipecontrol(urb->pipe))
715 static int c67x00_add_ctrl_urb(
struct c67x00_hcd *c67x00,
struct urb *urb)
720 switch (urb->interval) {
723 ret = c67x00_create_td(c67x00, urb, urb->setup_packet,
728 usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
729 usb_pipeout(urb->pipe), 1);
732 if (urb->transfer_buffer_length) {
733 ret = c67x00_add_data_urb(c67x00, urb);
740 ret = c67x00_create_td(c67x00, urb, NULL, 0, pid, 1,
753 static int c67x00_add_int_urb(
struct c67x00_hcd *c67x00,
struct urb *urb)
759 frame_add(urbp->
ep_data->next_frame, urb->interval);
760 return c67x00_add_data_urb(c67x00, urb);
765 static int c67x00_add_iso_urb(
struct c67x00_hcd *c67x00,
struct urb *urb)
773 BUG_ON(urbp->
cnt >= urb->number_of_packets);
775 td_buf = urb->transfer_buffer +
776 urb->iso_frame_desc[urbp->
cnt].offset;
777 len = urb->iso_frame_desc[urbp->
cnt].length;
780 ret = c67x00_create_td(c67x00, urb, td_buf, len, pid, 0,
784 urb->iso_frame_desc[urbp->
cnt].actual_length = 0;
785 urb->iso_frame_desc[urbp->
cnt].status =
ret;
786 if (urbp->
cnt + 1 == urb->number_of_packets)
787 c67x00_giveback_urb(c67x00, urb, 0);
791 frame_add(urbp->
ep_data->next_frame, urb->interval);
799 static void c67x00_fill_from_list(
struct c67x00_hcd *c67x00,
int type,
807 if (!list_empty(&ep_data->
queue)) {
818 static void c67x00_fill_frame(
struct c67x00_hcd *c67x00)
823 if (!list_empty(&c67x00->
td_list)) {
825 "TD list not empty! This should not happen!\n");
827 dbg_td(c67x00, td,
"Unprocessed td");
828 c67x00_release_td(td);
840 c67x00_fill_from_list(c67x00, PIPE_ISOCHRONOUS, c67x00_add_iso_urb);
841 c67x00_fill_from_list(c67x00, PIPE_INTERRUPT, c67x00_add_int_urb);
842 c67x00_fill_from_list(c67x00, PIPE_CONTROL, c67x00_add_ctrl_urb);
843 c67x00_fill_from_list(c67x00, PIPE_BULK, c67x00_add_data_urb);
865 dbg_td(c67x00, td,
"ERROR_FLAG");
873 dbg_td(c67x00, td,
"TIMEOUT");
880 static inline int c67x00_end_of_data(
struct c67x00_td *td)
882 int maxps, need_empty, remaining;
883 struct urb *urb = td->
urb;
896 remaining = urb->transfer_buffer_length - urb->actual_length;
897 need_empty = (urb->transfer_flags & URB_ZERO_PACKET) &&
898 usb_pipeout(urb->pipe) && !(remaining % maxps);
900 if (
unlikely(!remaining && !need_empty))
911 static inline void c67x00_clear_pipe(
struct c67x00_hcd *c67x00,
920 c67x00_release_td(td);
929 static void c67x00_handle_successful_td(
struct c67x00_hcd *c67x00,
932 struct urb *urb = td->
urb;
939 switch (usb_pipetype(td->
pipe)) {
945 urb->transfer_buffer_length ?
948 urb->actual_length = 0;
952 if (c67x00_end_of_data(td)) {
954 c67x00_clear_pipe(c67x00, td);
960 c67x00_giveback_urb(c67x00, urb, 0);
967 if (
unlikely(c67x00_end_of_data(td))) {
968 c67x00_clear_pipe(c67x00, td);
969 c67x00_giveback_urb(c67x00, urb, 0);
977 struct urb *urb = td->
urb;
991 urb->iso_frame_desc[
cnt].status = c67x00_td_to_error(c67x00, td);
992 if (cnt + 1 == urb->number_of_packets)
993 c67x00_giveback_urb(c67x00, urb, 0);
1002 static inline void c67x00_check_td_list(
struct c67x00_hcd *c67x00)
1011 c67x00_parse_td(c67x00, td);
1017 if (usb_pipeisoc(td->
pipe)) {
1019 c67x00_handle_isoc(c67x00, td);
1027 c67x00_giveback_urb(c67x00, urb,
1028 c67x00_td_to_error(c67x00, td));
1042 c67x00_giveback_urb(c67x00, urb, -
EOVERFLOW);
1048 c67x00_handle_successful_td(c67x00, td);
1052 c67x00_clear_pipe(c67x00, td);
1054 usb_settoggle(
td_udev(td), usb_pipeendpoint(td->
pipe),
1055 usb_pipeout(td->
pipe),
1059 c67x00_release_td(td);
1065 static inline int c67x00_all_tds_processed(
struct c67x00_hcd *c67x00)
1088 static void c67x00_send_frame(
struct c67x00_hcd *c67x00)
1092 if (list_empty(&c67x00->
td_list))
1094 "%s: td list should not be empty here!\n",
1101 c67x00_send_td(c67x00, td);
1112 static void c67x00_do_work(
struct c67x00_hcd *c67x00)
1114 spin_lock(&c67x00->
lock);
1116 if (!c67x00_all_tds_processed(c67x00))
1119 c67x00_check_td_list(c67x00);
1125 if (!list_empty(&c67x00->
td_list))
1128 c67x00->
current_frame = c67x00_get_current_frame_number(c67x00);
1139 c67x00_fill_frame(c67x00);
1140 if (!list_empty(&c67x00->
td_list))
1142 c67x00_send_frame(c67x00);
1145 spin_unlock(&c67x00->
lock);
1150 static void c67x00_sched_tasklet(
unsigned long __c67x00)
1153 c67x00_do_work(c67x00);
1158 tasklet_hi_schedule(&c67x00->
tasklet);
1164 (
unsigned long)c67x00);