Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
htc_pipe.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2007-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include "core.h"
18 #include "debug.h"
19 #include "hif-ops.h"
20 
21 #define HTC_PACKET_CONTAINER_ALLOCATION 32
22 #define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
23 
24 static int ath6kl_htc_pipe_tx(struct htc_target *handle,
25  struct htc_packet *packet);
26 static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
27 
28 /* htc pipe tx path */
29 static inline void restore_tx_packet(struct htc_packet *packet)
30 {
31  if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
32  skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
33  packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
34  }
35 }
36 
37 static void do_send_completion(struct htc_endpoint *ep,
38  struct list_head *queue_to_indicate)
39 {
40  struct htc_packet *packet;
41 
42  if (list_empty(queue_to_indicate)) {
43  /* nothing to indicate */
44  return;
45  }
46 
47  if (ep->ep_cb.tx_comp_multi != NULL) {
48  ath6kl_dbg(ATH6KL_DBG_HTC,
49  "%s: calling ep %d, send complete multiple callback (%d pkts)\n",
50  __func__, ep->eid,
51  get_queue_depth(queue_to_indicate));
52  /*
53  * a multiple send complete handler is being used,
54  * pass the queue to the handler
55  */
56  ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
57  /*
58  * all packets are now owned by the callback,
59  * reset queue to be safe
60  */
61  INIT_LIST_HEAD(queue_to_indicate);
62  } else {
63  /* using legacy EpTxComplete */
64  do {
65  packet = list_first_entry(queue_to_indicate,
66  struct htc_packet, list);
67 
68  list_del(&packet->list);
69  ath6kl_dbg(ATH6KL_DBG_HTC,
70  "%s: calling ep %d send complete callback on packet 0x%p\n",
71  __func__, ep->eid, packet);
72  ep->ep_cb.tx_complete(ep->target, packet);
73  } while (!list_empty(queue_to_indicate));
74  }
75 }
76 
77 static void send_packet_completion(struct htc_target *target,
78  struct htc_packet *packet)
79 {
80  struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
81  struct list_head container;
82 
83  restore_tx_packet(packet);
84  INIT_LIST_HEAD(&container);
85  list_add_tail(&packet->list, &container);
86 
87  /* do completion */
88  do_send_completion(ep, &container);
89 }
90 
91 static void get_htc_packet_credit_based(struct htc_target *target,
92  struct htc_endpoint *ep,
93  struct list_head *queue)
94 {
95  int credits_required;
96  int remainder;
97  u8 send_flags;
98  struct htc_packet *packet;
99  unsigned int transfer_len;
100 
101  /* NOTE : the TX lock is held when this function is called */
102 
103  /* loop until we can grab as many packets out of the queue as we can */
104  while (true) {
105  send_flags = 0;
106  if (list_empty(&ep->txq))
107  break;
108 
109  /* get packet at head, but don't remove it */
110  packet = list_first_entry(&ep->txq, struct htc_packet, list);
111 
112  ath6kl_dbg(ATH6KL_DBG_HTC,
113  "%s: got head packet:0x%p , queue depth: %d\n",
114  __func__, packet, get_queue_depth(&ep->txq));
115 
116  transfer_len = packet->act_len + HTC_HDR_LENGTH;
117 
118  if (transfer_len <= target->tgt_cred_sz) {
119  credits_required = 1;
120  } else {
121  /* figure out how many credits this message requires */
122  credits_required = transfer_len / target->tgt_cred_sz;
123  remainder = transfer_len % target->tgt_cred_sz;
124 
125  if (remainder)
126  credits_required++;
127  }
128 
129  ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
130  __func__, credits_required, ep->cred_dist.credits);
131 
132  if (ep->eid == ENDPOINT_0) {
133  /*
134  * endpoint 0 is special, it always has a credit and
135  * does not require credit based flow control
136  */
137  credits_required = 0;
138 
139  } else {
140 
141  if (ep->cred_dist.credits < credits_required)
142  break;
143 
144  ep->cred_dist.credits -= credits_required;
145  ep->ep_st.cred_cosumd += credits_required;
146 
147  /* check if we need credits back from the target */
148  if (ep->cred_dist.credits <
149  ep->cred_dist.cred_per_msg) {
150  /* tell the target we need credits ASAP! */
151  send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
152  ep->ep_st.cred_low_indicate += 1;
153  ath6kl_dbg(ATH6KL_DBG_HTC,
154  "%s: host needs credits\n",
155  __func__);
156  }
157  }
158 
159  /* now we can fully dequeue */
160  packet = list_first_entry(&ep->txq, struct htc_packet, list);
161 
162  list_del(&packet->list);
163  /* save the number of credits this packet consumed */
164  packet->info.tx.cred_used = credits_required;
165  /* save send flags */
166  packet->info.tx.flags = send_flags;
167  packet->info.tx.seqno = ep->seqno;
168  ep->seqno++;
169  /* queue this packet into the caller's queue */
170  list_add_tail(&packet->list, queue);
171  }
172 
173 }
174 
175 static void get_htc_packet(struct htc_target *target,
176  struct htc_endpoint *ep,
177  struct list_head *queue, int resources)
178 {
179  struct htc_packet *packet;
180 
181  /* NOTE : the TX lock is held when this function is called */
182 
183  /* loop until we can grab as many packets out of the queue as we can */
184  while (resources) {
185  if (list_empty(&ep->txq))
186  break;
187 
188  packet = list_first_entry(&ep->txq, struct htc_packet, list);
189  list_del(&packet->list);
190 
191  ath6kl_dbg(ATH6KL_DBG_HTC,
192  "%s: got packet:0x%p , new queue depth: %d\n",
193  __func__, packet, get_queue_depth(&ep->txq));
194  packet->info.tx.seqno = ep->seqno;
195  packet->info.tx.flags = 0;
196  packet->info.tx.cred_used = 0;
197  ep->seqno++;
198 
199  /* queue this packet into the caller's queue */
200  list_add_tail(&packet->list, queue);
201  resources--;
202  }
203 }
204 
205 static int htc_issue_packets(struct htc_target *target,
206  struct htc_endpoint *ep,
207  struct list_head *pkt_queue)
208 {
209  int status = 0;
211  struct sk_buff *skb;
212  struct htc_frame_hdr *htc_hdr;
213  struct htc_packet *packet;
214 
215  ath6kl_dbg(ATH6KL_DBG_HTC,
216  "%s: queue: 0x%p, pkts %d\n", __func__,
217  pkt_queue, get_queue_depth(pkt_queue));
218 
219  while (!list_empty(pkt_queue)) {
220  packet = list_first_entry(pkt_queue, struct htc_packet, list);
221  list_del(&packet->list);
222 
223  skb = packet->skb;
224  if (!skb) {
225  WARN_ON_ONCE(1);
226  status = -EINVAL;
227  break;
228  }
229 
230  payload_len = packet->act_len;
231 
232  /* setup HTC frame header */
233  htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
234  sizeof(*htc_hdr));
235  if (!htc_hdr) {
236  WARN_ON_ONCE(1);
237  status = -EINVAL;
238  break;
239  }
240 
241  packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
242 
243  /* Endianess? */
244  put_unaligned((u16) payload_len, &htc_hdr->payld_len);
245  htc_hdr->flags = packet->info.tx.flags;
246  htc_hdr->eid = (u8) packet->endpoint;
247  htc_hdr->ctrl[0] = 0;
248  htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
249 
250  spin_lock_bh(&target->tx_lock);
251 
252  /* store in look up queue to match completions */
253  list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
254  ep->ep_st.tx_issued += 1;
255  spin_unlock_bh(&target->tx_lock);
256 
257  status = ath6kl_hif_pipe_send(target->dev->ar,
258  ep->pipe.pipeid_ul, NULL, skb);
259 
260  if (status != 0) {
261  if (status != -ENOMEM) {
262  /* TODO: if more than 1 endpoint maps to the
263  * same PipeID, it is possible to run out of
264  * resources in the HIF layer.
265  * Don't emit the error
266  */
267  ath6kl_dbg(ATH6KL_DBG_HTC,
268  "%s: failed status:%d\n",
269  __func__, status);
270  }
271  spin_lock_bh(&target->tx_lock);
272  list_del(&packet->list);
273 
274  /* reclaim credits */
275  ep->cred_dist.credits += packet->info.tx.cred_used;
276  spin_unlock_bh(&target->tx_lock);
277 
278  /* put it back into the callers queue */
279  list_add(&packet->list, pkt_queue);
280  break;
281  }
282 
283  }
284 
285  if (status != 0) {
286  while (!list_empty(pkt_queue)) {
287  if (status != -ENOMEM) {
288  ath6kl_dbg(ATH6KL_DBG_HTC,
289  "%s: failed pkt:0x%p status:%d\n",
290  __func__, packet, status);
291  }
292 
293  packet = list_first_entry(pkt_queue,
294  struct htc_packet, list);
295  list_del(&packet->list);
296  packet->status = status;
297  send_packet_completion(target, packet);
298  }
299  }
300 
301  return status;
302 }
303 
304 static enum htc_send_queue_result htc_try_send(struct htc_target *target,
305  struct htc_endpoint *ep,
306  struct list_head *txq)
307 {
308  struct list_head send_queue; /* temp queue to hold packets */
309  struct htc_packet *packet, *tmp_pkt;
310  struct ath6kl *ar = target->dev->ar;
312  int tx_resources, overflow, txqueue_depth, i, good_pkts;
313  u8 pipeid;
314 
315  ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
316  __func__, txq,
317  (txq == NULL) ? 0 : get_queue_depth(txq));
318 
319  /* init the local send queue */
320  INIT_LIST_HEAD(&send_queue);
321 
322  /*
323  * txq equals to NULL means
324  * caller didn't provide a queue, just wants us to
325  * check queues and send
326  */
327  if (txq != NULL) {
328  if (list_empty(txq)) {
329  /* empty queue */
330  return HTC_SEND_QUEUE_DROP;
331  }
332 
333  spin_lock_bh(&target->tx_lock);
334  txqueue_depth = get_queue_depth(&ep->txq);
335  spin_unlock_bh(&target->tx_lock);
336 
337  if (txqueue_depth >= ep->max_txq_depth) {
338  /* we've already overflowed */
339  overflow = get_queue_depth(txq);
340  } else {
341  /* get how much we will overflow by */
342  overflow = txqueue_depth;
343  overflow += get_queue_depth(txq);
344  /* get how much we will overflow the TX queue by */
345  overflow -= ep->max_txq_depth;
346  }
347 
348  /* if overflow is negative or zero, we are okay */
349  if (overflow > 0) {
350  ath6kl_dbg(ATH6KL_DBG_HTC,
351  "%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
352  __func__, ep->eid, overflow, txqueue_depth,
353  ep->max_txq_depth);
354  }
355  if ((overflow <= 0) ||
356  (ep->ep_cb.tx_full == NULL)) {
357  /*
358  * all packets will fit or caller did not provide send
359  * full indication handler -- just move all of them
360  * to the local send_queue object
361  */
362  list_splice_tail_init(txq, &send_queue);
363  } else {
364  good_pkts = get_queue_depth(txq) - overflow;
365  if (good_pkts < 0) {
366  WARN_ON_ONCE(1);
367  return HTC_SEND_QUEUE_DROP;
368  }
369 
370  /* we have overflowed, and a callback is provided */
371  /* dequeue all non-overflow packets to the sendqueue */
372  for (i = 0; i < good_pkts; i++) {
373  /* pop off caller's queue */
374  packet = list_first_entry(txq,
375  struct htc_packet,
376  list);
377  list_del(&packet->list);
378  /* insert into local queue */
379  list_add_tail(&packet->list, &send_queue);
380  }
381 
382  /*
383  * the caller's queue has all the packets that won't fit
384  * walk through the caller's queue and indicate each to
385  * the send full handler
386  */
387  list_for_each_entry_safe(packet, tmp_pkt,
388  txq, list) {
389 
390  ath6kl_dbg(ATH6KL_DBG_HTC,
391  "%s: Indicat overflowed TX pkts: %p\n",
392  __func__, packet);
393  action = ep->ep_cb.tx_full(ep->target, packet);
394  if (action == HTC_SEND_FULL_DROP) {
395  /* callback wants the packet dropped */
396  ep->ep_st.tx_dropped += 1;
397 
398  /* leave this one in the caller's queue
399  * for cleanup */
400  } else {
401  /* callback wants to keep this packet,
402  * remove from caller's queue */
403  list_del(&packet->list);
404  /* put it in the send queue */
405  list_add_tail(&packet->list,
406  &send_queue);
407  }
408 
409  }
410 
411  if (list_empty(&send_queue)) {
412  /* no packets made it in, caller will cleanup */
413  return HTC_SEND_QUEUE_DROP;
414  }
415  }
416  }
417 
418  if (!ep->pipe.tx_credit_flow_enabled) {
419  tx_resources =
420  ath6kl_hif_pipe_get_free_queue_number(ar,
421  ep->pipe.pipeid_ul);
422  } else {
423  tx_resources = 0;
424  }
425 
426  spin_lock_bh(&target->tx_lock);
427  if (!list_empty(&send_queue)) {
428  /* transfer packets to tail */
429  list_splice_tail_init(&send_queue, &ep->txq);
430  if (!list_empty(&send_queue)) {
431  WARN_ON_ONCE(1);
432  spin_unlock_bh(&target->tx_lock);
433  return HTC_SEND_QUEUE_DROP;
434  }
435  INIT_LIST_HEAD(&send_queue);
436  }
437 
438  /* increment tx processing count on entry */
439  ep->tx_proc_cnt++;
440 
441  if (ep->tx_proc_cnt > 1) {
442  /*
443  * Another thread or task is draining the TX queues on this
444  * endpoint that thread will reset the tx processing count
445  * when the queue is drained.
446  */
447  ep->tx_proc_cnt--;
448  spin_unlock_bh(&target->tx_lock);
449  return HTC_SEND_QUEUE_OK;
450  }
451 
452  /***** beyond this point only 1 thread may enter ******/
453 
454  /*
455  * Now drain the endpoint TX queue for transmission as long as we have
456  * enough transmit resources.
457  */
458  while (true) {
459 
460  if (get_queue_depth(&ep->txq) == 0)
461  break;
462 
463  if (ep->pipe.tx_credit_flow_enabled) {
464  /*
465  * Credit based mechanism provides flow control
466  * based on target transmit resource availability,
467  * we assume that the HIF layer will always have
468  * bus resources greater than target transmit
469  * resources.
470  */
471  get_htc_packet_credit_based(target, ep, &send_queue);
472  } else {
473  /*
474  * Get all packets for this endpoint that we can
475  * for this pass.
476  */
477  get_htc_packet(target, ep, &send_queue, tx_resources);
478  }
479 
480  if (get_queue_depth(&send_queue) == 0) {
481  /*
482  * Didn't get packets due to out of resources or TX
483  * queue was drained.
484  */
485  break;
486  }
487 
488  spin_unlock_bh(&target->tx_lock);
489 
490  /* send what we can */
491  htc_issue_packets(target, ep, &send_queue);
492 
493  if (!ep->pipe.tx_credit_flow_enabled) {
494  pipeid = ep->pipe.pipeid_ul;
495  tx_resources =
496  ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
497  }
498 
499  spin_lock_bh(&target->tx_lock);
500 
501  }
502  /* done with this endpoint, we can clear the count */
503  ep->tx_proc_cnt = 0;
504  spin_unlock_bh(&target->tx_lock);
505 
506  return HTC_SEND_QUEUE_OK;
507 }
508 
509 /* htc control packet manipulation */
510 static void destroy_htc_txctrl_packet(struct htc_packet *packet)
511 {
512  struct sk_buff *skb;
513  skb = packet->skb;
514  if (skb != NULL)
515  dev_kfree_skb(skb);
516 
517  kfree(packet);
518 }
519 
520 static struct htc_packet *build_htc_txctrl_packet(void)
521 {
522  struct htc_packet *packet = NULL;
523  struct sk_buff *skb;
524 
525  packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
526  if (packet == NULL)
527  return NULL;
528 
529  skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
530 
531  if (skb == NULL) {
532  kfree(packet);
533  return NULL;
534  }
535  packet->skb = skb;
536 
537  return packet;
538 }
539 
540 static void htc_free_txctrl_packet(struct htc_target *target,
541  struct htc_packet *packet)
542 {
543  destroy_htc_txctrl_packet(packet);
544 }
545 
546 static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
547 {
548  return build_htc_txctrl_packet();
549 }
550 
551 static void htc_txctrl_complete(struct htc_target *target,
552  struct htc_packet *packet)
553 {
554  htc_free_txctrl_packet(target, packet);
555 }
556 
557 #define MAX_MESSAGE_SIZE 1536
558 
559 static int htc_setup_target_buffer_assignments(struct htc_target *target)
560 {
561  int status, credits, credit_per_maxmsg, i;
563  unsigned int hif_usbaudioclass = 0;
564 
565  credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
566  if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
567  credit_per_maxmsg++;
568 
569  /* TODO, this should be configured by the caller! */
570 
571  credits = target->tgt_creds;
572  entry = &target->pipe.txcredit_alloc[0];
573 
574  status = -ENOMEM;
575 
576  /* FIXME: hif_usbaudioclass is always zero */
577  if (hif_usbaudioclass) {
578  ath6kl_dbg(ATH6KL_DBG_HTC,
579  "%s: For USB Audio Class- Total:%d\n",
580  __func__, credits);
581  entry++;
582  entry++;
583  /* Setup VO Service To have Max Credits */
584  entry->service_id = WMI_DATA_VO_SVC;
585  entry->credit_alloc = (credits - 6);
586  if (entry->credit_alloc == 0)
587  entry->credit_alloc++;
588 
589  credits -= (int) entry->credit_alloc;
590  if (credits <= 0)
591  return status;
592 
593  entry++;
594  entry->service_id = WMI_CONTROL_SVC;
595  entry->credit_alloc = credit_per_maxmsg;
596  credits -= (int) entry->credit_alloc;
597  if (credits <= 0)
598  return status;
599 
600  /* leftovers go to best effort */
601  entry++;
602  entry++;
603  entry->service_id = WMI_DATA_BE_SVC;
604  entry->credit_alloc = (u8) credits;
605  status = 0;
606  } else {
607  entry++;
608  entry->service_id = WMI_DATA_VI_SVC;
609  entry->credit_alloc = credits / 4;
610  if (entry->credit_alloc == 0)
611  entry->credit_alloc++;
612 
613  credits -= (int) entry->credit_alloc;
614  if (credits <= 0)
615  return status;
616 
617  entry++;
618  entry->service_id = WMI_DATA_VO_SVC;
619  entry->credit_alloc = credits / 4;
620  if (entry->credit_alloc == 0)
621  entry->credit_alloc++;
622 
623  credits -= (int) entry->credit_alloc;
624  if (credits <= 0)
625  return status;
626 
627  entry++;
628  entry->service_id = WMI_CONTROL_SVC;
629  entry->credit_alloc = credit_per_maxmsg;
630  credits -= (int) entry->credit_alloc;
631  if (credits <= 0)
632  return status;
633 
634  entry++;
635  entry->service_id = WMI_DATA_BK_SVC;
636  entry->credit_alloc = credit_per_maxmsg;
637  credits -= (int) entry->credit_alloc;
638  if (credits <= 0)
639  return status;
640 
641  /* leftovers go to best effort */
642  entry++;
643  entry->service_id = WMI_DATA_BE_SVC;
644  entry->credit_alloc = (u8) credits;
645  status = 0;
646  }
647 
648  if (status == 0) {
649  for (i = 0; i < ENDPOINT_MAX; i++) {
650  if (target->pipe.txcredit_alloc[i].service_id != 0) {
651  ath6kl_dbg(ATH6KL_DBG_HTC,
652  "HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
653  i,
654  target->pipe.txcredit_alloc[i].
655  service_id,
656  target->pipe.txcredit_alloc[i].
657  credit_alloc);
658  }
659  }
660  }
661  return status;
662 }
663 
664 /* process credit reports and call distribution function */
665 static void htc_process_credit_report(struct htc_target *target,
666  struct htc_credit_report *rpt,
667  int num_entries,
668  enum htc_endpoint_id from_ep)
669 {
670  int total_credits = 0, i;
671  struct htc_endpoint *ep;
672 
673  /* lock out TX while we update credits */
674  spin_lock_bh(&target->tx_lock);
675 
676  for (i = 0; i < num_entries; i++, rpt++) {
677  if (rpt->eid >= ENDPOINT_MAX) {
678  WARN_ON_ONCE(1);
679  spin_unlock_bh(&target->tx_lock);
680  return;
681  }
682 
683  ep = &target->endpoint[rpt->eid];
684  ep->cred_dist.credits += rpt->credits;
685 
686  if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
687  spin_unlock_bh(&target->tx_lock);
688  htc_try_send(target, ep, NULL);
689  spin_lock_bh(&target->tx_lock);
690  }
691 
692  total_credits += rpt->credits;
693  }
694  ath6kl_dbg(ATH6KL_DBG_HTC,
695  "Report indicated %d credits to distribute\n",
696  total_credits);
697 
698  spin_unlock_bh(&target->tx_lock);
699 }
700 
701 /* flush endpoint TX queue */
702 static void htc_flush_tx_endpoint(struct htc_target *target,
703  struct htc_endpoint *ep, u16 tag)
704 {
705  struct htc_packet *packet;
706 
707  spin_lock_bh(&target->tx_lock);
708  while (get_queue_depth(&ep->txq)) {
709  packet = list_first_entry(&ep->txq, struct htc_packet, list);
710  list_del(&packet->list);
711  packet->status = 0;
712  send_packet_completion(target, packet);
713  }
714  spin_unlock_bh(&target->tx_lock);
715 }
716 
717 /*
718  * In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
719  * since upper layers expects struct htc_packet containers we use the completed
720  * skb and lookup it's corresponding HTC packet buffer from a lookup list.
721  * This is extra overhead that can be fixed by re-aligning HIF interfaces with
722  * HTC.
723  */
724 static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
725  struct htc_endpoint *ep,
726  struct sk_buff *skb)
727 {
728  struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
729 
730  spin_lock_bh(&target->tx_lock);
731 
732  /*
733  * interate from the front of tx lookup queue
734  * this lookup should be fast since lower layers completes in-order and
735  * so the completed packet should be at the head of the list generally
736  */
737  list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
738  list) {
739  /* check for removal */
740  if (skb == packet->skb) {
741  /* found it */
742  list_del(&packet->list);
743  found_packet = packet;
744  break;
745  }
746  }
747 
748  spin_unlock_bh(&target->tx_lock);
749 
750  return found_packet;
751 }
752 
753 static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
754 {
755  struct htc_target *target = ar->htc_target;
756  struct htc_frame_hdr *htc_hdr;
757  struct htc_endpoint *ep;
758  struct htc_packet *packet;
759  u8 ep_id, *netdata;
760  u32 netlen;
761 
762  netdata = skb->data;
763  netlen = skb->len;
764 
765  htc_hdr = (struct htc_frame_hdr *) netdata;
766 
767  ep_id = htc_hdr->eid;
768  ep = &target->endpoint[ep_id];
769 
770  packet = htc_lookup_tx_packet(target, ep, skb);
771  if (packet == NULL) {
772  /* may have already been flushed and freed */
773  ath6kl_err("HTC TX lookup failed!\n");
774  } else {
775  /* will be giving this buffer back to upper layers */
776  packet->status = 0;
777  send_packet_completion(target, packet);
778  }
779  skb = NULL;
780 
781  if (!ep->pipe.tx_credit_flow_enabled) {
782  /*
783  * note: when using TX credit flow, the re-checking of queues
784  * happens when credits flow back from the target. in the
785  * non-TX credit case, we recheck after the packet completes
786  */
787  htc_try_send(target, ep, NULL);
788  }
789 
790  return 0;
791 }
792 
793 static int htc_send_packets_multiple(struct htc_target *target,
794  struct list_head *pkt_queue)
795 {
796  struct htc_endpoint *ep;
797  struct htc_packet *packet, *tmp_pkt;
798 
799  if (list_empty(pkt_queue))
800  return -EINVAL;
801 
802  /* get first packet to find out which ep the packets will go into */
803  packet = list_first_entry(pkt_queue, struct htc_packet, list);
804 
805  if (packet->endpoint >= ENDPOINT_MAX) {
806  WARN_ON_ONCE(1);
807  return -EINVAL;
808  }
809  ep = &target->endpoint[packet->endpoint];
810 
811  htc_try_send(target, ep, pkt_queue);
812 
813  /* do completion on any packets that couldn't get in */
814  if (!list_empty(pkt_queue)) {
815  list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
816  packet->status = -ENOMEM;
817  }
818 
819  do_send_completion(ep, pkt_queue);
820  }
821 
822  return 0;
823 }
824 
825 /* htc pipe rx path */
826 static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
827 {
828  struct htc_packet *packet;
829  spin_lock_bh(&target->rx_lock);
830 
831  if (target->pipe.htc_packet_pool == NULL) {
832  spin_unlock_bh(&target->rx_lock);
833  return NULL;
834  }
835 
836  packet = target->pipe.htc_packet_pool;
837  target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
838 
839  spin_unlock_bh(&target->rx_lock);
840 
841  packet->list.next = NULL;
842  return packet;
843 }
844 
845 static void free_htc_packet_container(struct htc_target *target,
846  struct htc_packet *packet)
847 {
848  struct list_head *lh;
849 
850  spin_lock_bh(&target->rx_lock);
851 
852  if (target->pipe.htc_packet_pool == NULL) {
853  target->pipe.htc_packet_pool = packet;
854  packet->list.next = NULL;
855  } else {
856  lh = (struct list_head *) target->pipe.htc_packet_pool;
857  packet->list.next = lh;
858  target->pipe.htc_packet_pool = packet;
859  }
860 
861  spin_unlock_bh(&target->rx_lock);
862 }
863 
864 static int htc_process_trailer(struct htc_target *target, u8 *buffer,
865  int len, enum htc_endpoint_id from_ep)
866 {
867  struct htc_credit_report *report;
868  struct htc_record_hdr *record;
869  u8 *record_buf, *orig_buf;
870  int orig_len, status;
871 
872  orig_buf = buffer;
873  orig_len = len;
874  status = 0;
875 
876  while (len > 0) {
877  if (len < sizeof(struct htc_record_hdr)) {
878  status = -EINVAL;
879  break;
880  }
881 
882  /* these are byte aligned structs */
883  record = (struct htc_record_hdr *) buffer;
884  len -= sizeof(struct htc_record_hdr);
885  buffer += sizeof(struct htc_record_hdr);
886 
887  if (record->len > len) {
888  /* no room left in buffer for record */
889  ath6kl_dbg(ATH6KL_DBG_HTC,
890  "invalid length: %d (id:%d) buffer has: %d bytes left\n",
891  record->len, record->rec_id, len);
892  status = -EINVAL;
893  break;
894  }
895 
896  /* start of record follows the header */
897  record_buf = buffer;
898 
899  switch (record->rec_id) {
900  case HTC_RECORD_CREDITS:
901  if (record->len < sizeof(struct htc_credit_report)) {
902  WARN_ON_ONCE(1);
903  return -EINVAL;
904  }
905 
906  report = (struct htc_credit_report *) record_buf;
907  htc_process_credit_report(target, report,
908  record->len / sizeof(*report),
909  from_ep);
910  break;
911  default:
912  ath6kl_dbg(ATH6KL_DBG_HTC,
913  "unhandled record: id:%d length:%d\n",
914  record->rec_id, record->len);
915  break;
916  }
917 
918  if (status != 0)
919  break;
920 
921  /* advance buffer past this record for next time around */
922  buffer += record->len;
923  len -= record->len;
924  }
925 
926  return status;
927 }
928 
929 static void do_recv_completion(struct htc_endpoint *ep,
930  struct list_head *queue_to_indicate)
931 {
932  struct htc_packet *packet;
933 
934  if (list_empty(queue_to_indicate)) {
935  /* nothing to indicate */
936  return;
937  }
938 
939  /* using legacy EpRecv */
940  while (!list_empty(queue_to_indicate)) {
941  packet = list_first_entry(queue_to_indicate,
942  struct htc_packet, list);
943  list_del(&packet->list);
944  ep->ep_cb.rx(ep->target, packet);
945  }
946 
947  return;
948 }
949 
950 static void recv_packet_completion(struct htc_target *target,
951  struct htc_endpoint *ep,
952  struct htc_packet *packet)
953 {
954  struct list_head container;
955  INIT_LIST_HEAD(&container);
956  list_add_tail(&packet->list, &container);
957 
958  /* do completion */
959  do_recv_completion(ep, &container);
960 }
961 
962 static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
963  u8 pipeid)
964 {
965  struct htc_target *target = ar->htc_target;
966  u8 *netdata, *trailer, hdr_info;
967  struct htc_frame_hdr *htc_hdr;
968  u32 netlen, trailerlen = 0;
969  struct htc_packet *packet;
970  struct htc_endpoint *ep;
972  int status = 0;
973 
974  netdata = skb->data;
975  netlen = skb->len;
976 
977  htc_hdr = (struct htc_frame_hdr *) netdata;
978 
979  ep = &target->endpoint[htc_hdr->eid];
980 
981  if (htc_hdr->eid >= ENDPOINT_MAX) {
982  ath6kl_dbg(ATH6KL_DBG_HTC,
983  "HTC Rx: invalid EndpointID=%d\n",
984  htc_hdr->eid);
985  status = -EINVAL;
986  goto free_skb;
987  }
988 
989  payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
990 
991  if (netlen < (payload_len + HTC_HDR_LENGTH)) {
992  ath6kl_dbg(ATH6KL_DBG_HTC,
993  "HTC Rx: insufficient length, got:%d expected =%u\n",
994  netlen, payload_len + HTC_HDR_LENGTH);
995  status = -EINVAL;
996  goto free_skb;
997  }
998 
999  /* get flags to check for trailer */
1000  hdr_info = htc_hdr->flags;
1001  if (hdr_info & HTC_FLG_RX_TRAILER) {
1002  /* extract the trailer length */
1003  hdr_info = htc_hdr->ctrl[0];
1004  if ((hdr_info < sizeof(struct htc_record_hdr)) ||
1005  (hdr_info > payload_len)) {
1006  ath6kl_dbg(ATH6KL_DBG_HTC,
1007  "invalid header: payloadlen should be %d, CB[0]: %d\n",
1008  payload_len, hdr_info);
1009  status = -EINVAL;
1010  goto free_skb;
1011  }
1012 
1013  trailerlen = hdr_info;
1014  /* process trailer after hdr/apps payload */
1015  trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
1016  payload_len - hdr_info;
1017  status = htc_process_trailer(target, trailer, hdr_info,
1018  htc_hdr->eid);
1019  if (status != 0)
1020  goto free_skb;
1021  }
1022 
1023  if (((int) payload_len - (int) trailerlen) <= 0) {
1024  /* zero length packet with trailer, just drop these */
1025  goto free_skb;
1026  }
1027 
1028  if (htc_hdr->eid == ENDPOINT_0) {
1029  /* handle HTC control message */
1030  if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
1031  /*
1032  * fatal: target should not send unsolicited
1033  * messageson the endpoint 0
1034  */
1035  ath6kl_dbg(ATH6KL_DBG_HTC,
1036  "HTC ignores Rx Ctrl after setup complete\n");
1037  status = -EINVAL;
1038  goto free_skb;
1039  }
1040 
1041  /* remove HTC header */
1042  skb_pull(skb, HTC_HDR_LENGTH);
1043 
1044  netdata = skb->data;
1045  netlen = skb->len;
1046 
1047  spin_lock_bh(&target->rx_lock);
1048 
1049  target->pipe.ctrl_response_valid = true;
1050  target->pipe.ctrl_response_len = min_t(int, netlen,
1052  memcpy(target->pipe.ctrl_response_buf, netdata,
1053  target->pipe.ctrl_response_len);
1054 
1055  spin_unlock_bh(&target->rx_lock);
1056 
1057  dev_kfree_skb(skb);
1058  skb = NULL;
1059  goto free_skb;
1060  }
1061 
1062  /*
1063  * TODO: the message based HIF architecture allocates net bufs
1064  * for recv packets since it bridges that HIF to upper layers,
1065  * which expects HTC packets, we form the packets here
1066  */
1067  packet = alloc_htc_packet_container(target);
1068  if (packet == NULL) {
1069  status = -ENOMEM;
1070  goto free_skb;
1071  }
1072 
1073  packet->status = 0;
1074  packet->endpoint = htc_hdr->eid;
1075  packet->pkt_cntxt = skb;
1076 
1077  /* TODO: for backwards compatibility */
1078  packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
1079  packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
1080 
1081  /*
1082  * TODO: this is a hack because the driver layer will set the
1083  * actual len of the skb again which will just double the len
1084  */
1085  skb_trim(skb, 0);
1086 
1087  recv_packet_completion(target, ep, packet);
1088 
1089  /* recover the packet container */
1090  free_htc_packet_container(target, packet);
1091  skb = NULL;
1092 
1093 free_skb:
1094  if (skb != NULL)
1095  dev_kfree_skb(skb);
1096 
1097  return status;
1098 
1099 }
1100 
1101 static void htc_flush_rx_queue(struct htc_target *target,
1102  struct htc_endpoint *ep)
1103 {
1104  struct list_head container;
1105  struct htc_packet *packet;
1106 
1107  spin_lock_bh(&target->rx_lock);
1108 
1109  while (1) {
1110  if (list_empty(&ep->rx_bufq))
1111  break;
1112 
1113  packet = list_first_entry(&ep->rx_bufq,
1114  struct htc_packet, list);
1115  list_del(&packet->list);
1116 
1117  spin_unlock_bh(&target->rx_lock);
1118  packet->status = -ECANCELED;
1119  packet->act_len = 0;
1120 
1121  ath6kl_dbg(ATH6KL_DBG_HTC,
1122  "Flushing RX packet:0x%p, length:%d, ep:%d\n",
1123  packet, packet->buf_len,
1124  packet->endpoint);
1125 
1126  INIT_LIST_HEAD(&container);
1127  list_add_tail(&packet->list, &container);
1128 
1129  /* give the packet back */
1130  do_recv_completion(ep, &container);
1131  spin_lock_bh(&target->rx_lock);
1132  }
1133 
1134  spin_unlock_bh(&target->rx_lock);
1135 }
1136 
1137 /* polling routine to wait for a control packet to be received */
1138 static int htc_wait_recv_ctrl_message(struct htc_target *target)
1139 {
1141 
1142  while (count > 0) {
1143  spin_lock_bh(&target->rx_lock);
1144 
1145  if (target->pipe.ctrl_response_valid) {
1146  target->pipe.ctrl_response_valid = false;
1147  spin_unlock_bh(&target->rx_lock);
1148  break;
1149  }
1150 
1151  spin_unlock_bh(&target->rx_lock);
1152 
1153  count--;
1154 
1156  }
1157 
1158  if (count <= 0) {
1159  ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__);
1160  return -ECOMM;
1161  }
1162 
1163  return 0;
1164 }
1165 
1166 static void htc_rxctrl_complete(struct htc_target *context,
1167  struct htc_packet *packet)
1168 {
1169  /* TODO, can't really receive HTC control messages yet.... */
1170  ath6kl_dbg(ATH6KL_DBG_HTC, "%s: invalid call function\n", __func__);
1171 }
1172 
1173 /* htc pipe initialization */
1174 static void reset_endpoint_states(struct htc_target *target)
1175 {
1176  struct htc_endpoint *ep;
1177  int i;
1178 
1179  for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
1180  ep = &target->endpoint[i];
1181  ep->svc_id = 0;
1182  ep->len_max = 0;
1183  ep->max_txq_depth = 0;
1184  ep->eid = i;
1185  INIT_LIST_HEAD(&ep->txq);
1186  INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
1187  INIT_LIST_HEAD(&ep->rx_bufq);
1188  ep->target = target;
1189  ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
1190  }
1191 }
1192 
1193 /* start HTC, this is called after all services are connected */
1194 static int htc_config_target_hif_pipe(struct htc_target *target)
1195 {
1196  return 0;
1197 }
1198 
1199 /* htc service functions */
1200 static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
1201 {
1202  u8 allocation = 0;
1203  int i;
1204 
1205  for (i = 0; i < ENDPOINT_MAX; i++) {
1206  if (target->pipe.txcredit_alloc[i].service_id == service_id)
1207  allocation =
1208  target->pipe.txcredit_alloc[i].credit_alloc;
1209  }
1210 
1211  if (allocation == 0) {
1212  ath6kl_dbg(ATH6KL_DBG_HTC,
1213  "HTC Service TX : 0x%2.2X : allocation is zero!\n",
1214  service_id);
1215  }
1216 
1217  return allocation;
1218 }
1219 
1220 static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
1221  struct htc_service_connect_req *conn_req,
1222  struct htc_service_connect_resp *conn_resp)
1223 {
1224  struct ath6kl *ar = target->dev->ar;
1225  struct htc_packet *packet = NULL;
1226  struct htc_conn_service_resp *resp_msg;
1227  struct htc_conn_service_msg *conn_msg;
1228  enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
1229  bool disable_credit_flowctrl = false;
1230  unsigned int max_msg_size = 0;
1231  struct htc_endpoint *ep;
1232  int length, status = 0;
1233  struct sk_buff *skb;
1234  u8 tx_alloc;
1235  u16 flags;
1236 
1237  if (conn_req->svc_id == 0) {
1238  WARN_ON_ONCE(1);
1239  status = -EINVAL;
1240  goto free_packet;
1241  }
1242 
1243  if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
1244  /* special case for pseudo control service */
1245  assigned_epid = ENDPOINT_0;
1246  max_msg_size = HTC_MAX_CTRL_MSG_LEN;
1247  tx_alloc = 0;
1248 
1249  } else {
1250 
1251  tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
1252  if (tx_alloc == 0) {
1253  status = -ENOMEM;
1254  goto free_packet;
1255  }
1256 
1257  /* allocate a packet to send to the target */
1258  packet = htc_alloc_txctrl_packet(target);
1259 
1260  if (packet == NULL) {
1261  WARN_ON_ONCE(1);
1262  status = -ENOMEM;
1263  goto free_packet;
1264  }
1265 
1266  skb = packet->skb;
1267  length = sizeof(struct htc_conn_service_msg);
1268 
1269  /* assemble connect service message */
1270  conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
1271  length);
1272  if (conn_msg == NULL) {
1273  WARN_ON_ONCE(1);
1274  status = -EINVAL;
1275  goto free_packet;
1276  }
1277 
1278  memset(conn_msg, 0,
1279  sizeof(struct htc_conn_service_msg));
1280  conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
1281  conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
1282  conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
1284 
1285  /* tell target desired recv alloc for this ep */
1286  flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
1287  conn_msg->conn_flags |= cpu_to_le16(flags);
1288 
1289  if (conn_req->conn_flags &
1291  disable_credit_flowctrl = true;
1292  }
1293 
1294  set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
1295  length,
1297 
1298  status = ath6kl_htc_pipe_tx(target, packet);
1299 
1300  /* we don't own it anymore */
1301  packet = NULL;
1302  if (status != 0)
1303  goto free_packet;
1304 
1305  /* wait for response */
1306  status = htc_wait_recv_ctrl_message(target);
1307  if (status != 0)
1308  goto free_packet;
1309 
1310  /* we controlled the buffer creation so it has to be
1311  * properly aligned
1312  */
1313  resp_msg = (struct htc_conn_service_resp *)
1314  target->pipe.ctrl_response_buf;
1315 
1316  if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
1317  (target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
1318  /* this message is not valid */
1319  WARN_ON_ONCE(1);
1320  status = -EINVAL;
1321  goto free_packet;
1322  }
1323 
1324  ath6kl_dbg(ATH6KL_DBG_TRC,
1325  "%s: service 0x%X conn resp: status: %d ep: %d\n",
1326  __func__, resp_msg->svc_id, resp_msg->status,
1327  resp_msg->eid);
1328 
1329  conn_resp->resp_code = resp_msg->status;
1330  /* check response status */
1331  if (resp_msg->status != HTC_SERVICE_SUCCESS) {
1332  ath6kl_dbg(ATH6KL_DBG_HTC,
1333  "Target failed service 0x%X connect request (status:%d)\n",
1334  resp_msg->svc_id, resp_msg->status);
1335  status = -EINVAL;
1336  goto free_packet;
1337  }
1338 
1339  assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
1340  max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
1341  }
1342 
1343  /* the rest are parameter checks so set the error status */
1344  status = -EINVAL;
1345 
1346  if (assigned_epid >= ENDPOINT_MAX) {
1347  WARN_ON_ONCE(1);
1348  goto free_packet;
1349  }
1350 
1351  if (max_msg_size == 0) {
1352  WARN_ON_ONCE(1);
1353  goto free_packet;
1354  }
1355 
1356  ep = &target->endpoint[assigned_epid];
1357  ep->eid = assigned_epid;
1358  if (ep->svc_id != 0) {
1359  /* endpoint already in use! */
1360  WARN_ON_ONCE(1);
1361  goto free_packet;
1362  }
1363 
1364  /* return assigned endpoint to caller */
1365  conn_resp->endpoint = assigned_epid;
1366  conn_resp->len_max = max_msg_size;
1367 
1368  /* setup the endpoint */
1369  ep->svc_id = conn_req->svc_id; /* this marks ep in use */
1370  ep->max_txq_depth = conn_req->max_txq_depth;
1371  ep->len_max = max_msg_size;
1372  ep->cred_dist.credits = tx_alloc;
1373  ep->cred_dist.cred_sz = target->tgt_cred_sz;
1374  ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
1375  if (max_msg_size % target->tgt_cred_sz)
1376  ep->cred_dist.cred_per_msg++;
1377 
1378  /* copy all the callbacks */
1379  ep->ep_cb = conn_req->ep_cb;
1380 
1381  /* initialize tx_drop_packet_threshold */
1383 
1384  status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
1385  &ep->pipe.pipeid_ul,
1386  &ep->pipe.pipeid_dl);
1387  if (status != 0)
1388  goto free_packet;
1389 
1390  ath6kl_dbg(ATH6KL_DBG_HTC,
1391  "SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
1392  ep->svc_id, ep->pipe.pipeid_ul,
1393  ep->pipe.pipeid_dl, ep->eid);
1394 
1395  if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
1396  ep->pipe.tx_credit_flow_enabled = false;
1397  ath6kl_dbg(ATH6KL_DBG_HTC,
1398  "SVC: 0x%4.4X ep:%d TX flow control off\n",
1399  ep->svc_id, assigned_epid);
1400  }
1401 
1402 free_packet:
1403  if (packet != NULL)
1404  htc_free_txctrl_packet(target, packet);
1405  return status;
1406 }
1407 
1408 /* htc export functions */
1409 static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
1410 {
1411  int status = 0;
1412  struct htc_endpoint *ep = NULL;
1413  struct htc_target *target = NULL;
1414  struct htc_packet *packet;
1415  int i;
1416 
1417  target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
1418  if (target == NULL) {
1419  ath6kl_err("htc create unable to allocate memory\n");
1420  status = -ENOMEM;
1421  goto fail_htc_create;
1422  }
1423 
1424  spin_lock_init(&target->htc_lock);
1425  spin_lock_init(&target->rx_lock);
1426  spin_lock_init(&target->tx_lock);
1427 
1428  reset_endpoint_states(target);
1429 
1430  for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
1431  packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
1432 
1433  if (packet != NULL)
1434  free_htc_packet_container(target, packet);
1435  }
1436 
1437  target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
1438  if (!target->dev) {
1439  ath6kl_err("unable to allocate memory\n");
1440  status = -ENOMEM;
1441  goto fail_htc_create;
1442  }
1443  target->dev->ar = ar;
1444  target->dev->htc_cnxt = target;
1445 
1446  /* Get HIF default pipe for HTC message exchange */
1447  ep = &target->endpoint[ENDPOINT_0];
1448 
1449  ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
1450  &ep->pipe.pipeid_dl);
1451 
1452  return target;
1453 
1454 fail_htc_create:
1455  if (status != 0) {
1456  if (target != NULL)
1457  ath6kl_htc_pipe_cleanup(target);
1458 
1459  target = NULL;
1460  }
1461  return target;
1462 }
1463 
1464 /* cleanup the HTC instance */
1465 static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
1466 {
1467  struct htc_packet *packet;
1468 
1469  while (true) {
1470  packet = alloc_htc_packet_container(target);
1471  if (packet == NULL)
1472  break;
1473  kfree(packet);
1474  }
1475 
1476  kfree(target->dev);
1477 
1478  /* kfree our instance */
1479  kfree(target);
1480 }
1481 
1482 static int ath6kl_htc_pipe_start(struct htc_target *target)
1483 {
1484  struct sk_buff *skb;
1485  struct htc_setup_comp_ext_msg *setup;
1486  struct htc_packet *packet;
1487 
1488  htc_config_target_hif_pipe(target);
1489 
1490  /* allocate a buffer to send */
1491  packet = htc_alloc_txctrl_packet(target);
1492  if (packet == NULL) {
1493  WARN_ON_ONCE(1);
1494  return -ENOMEM;
1495  }
1496 
1497  skb = packet->skb;
1498 
1499  /* assemble setup complete message */
1500  setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
1501  sizeof(*setup));
1502  memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
1504 
1505  ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
1506 
1507  set_htc_pkt_info(packet, NULL, (u8 *) setup,
1508  sizeof(struct htc_setup_comp_ext_msg),
1510 
1512 
1513  return ath6kl_htc_pipe_tx(target, packet);
1514 }
1515 
1516 static void ath6kl_htc_pipe_stop(struct htc_target *target)
1517 {
1518  int i;
1519  struct htc_endpoint *ep;
1520 
1521  /* cleanup endpoints */
1522  for (i = 0; i < ENDPOINT_MAX; i++) {
1523  ep = &target->endpoint[i];
1524  htc_flush_rx_queue(target, ep);
1525  htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
1526  }
1527 
1528  reset_endpoint_states(target);
1530 }
1531 
1532 static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
1534 {
1535  int num;
1536 
1537  spin_lock_bh(&target->rx_lock);
1538  num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
1539  spin_unlock_bh(&target->rx_lock);
1540 
1541  return num;
1542 }
1543 
1544 static int ath6kl_htc_pipe_tx(struct htc_target *target,
1545  struct htc_packet *packet)
1546 {
1547  struct list_head queue;
1548 
1549  ath6kl_dbg(ATH6KL_DBG_HTC,
1550  "%s: endPointId: %d, buffer: 0x%p, length: %d\n",
1551  __func__, packet->endpoint, packet->buf,
1552  packet->act_len);
1553 
1554  INIT_LIST_HEAD(&queue);
1555  list_add_tail(&packet->list, &queue);
1556 
1557  return htc_send_packets_multiple(target, &queue);
1558 }
1559 
1560 static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
1561 {
1562  struct htc_ready_ext_msg *ready_msg;
1563  struct htc_service_connect_req connect;
1565  int status = 0;
1566 
1567  status = htc_wait_recv_ctrl_message(target);
1568 
1569  if (status != 0)
1570  return status;
1571 
1572  if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
1573  ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n",
1574  target->pipe.ctrl_response_len);
1575  return -ECOMM;
1576  }
1577 
1578  ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
1579 
1580  if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
1581  ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n",
1582  ready_msg->ver2_0_info.msg_id);
1583  return -ECOMM;
1584  }
1585 
1586  ath6kl_dbg(ATH6KL_DBG_HTC,
1587  "Target Ready! : transmit resources : %d size:%d\n",
1588  ready_msg->ver2_0_info.cred_cnt,
1589  ready_msg->ver2_0_info.cred_sz);
1590 
1591  target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
1592  target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
1593 
1594  if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
1595  return -ECOMM;
1596 
1597  htc_setup_target_buffer_assignments(target);
1598 
1599  /* setup our pseudo HTC control endpoint connection */
1600  memset(&connect, 0, sizeof(connect));
1601  memset(&resp, 0, sizeof(resp));
1602  connect.ep_cb.tx_complete = htc_txctrl_complete;
1603  connect.ep_cb.rx = htc_rxctrl_complete;
1604  connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
1605  connect.svc_id = HTC_CTRL_RSVD_SVC;
1606 
1607  /* connect fake service */
1608  status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
1609 
1610  return status;
1611 }
1612 
1613 static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
1614  enum htc_endpoint_id endpoint, u16 tag)
1615 {
1616  struct htc_endpoint *ep = &target->endpoint[endpoint];
1617 
1618  if (ep->svc_id == 0) {
1619  WARN_ON_ONCE(1);
1620  /* not in use.. */
1621  return;
1622  }
1623 
1624  htc_flush_tx_endpoint(target, ep, tag);
1625 }
1626 
1627 static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
1628  struct list_head *pkt_queue)
1629 {
1630  struct htc_packet *packet, *tmp_pkt, *first;
1631  struct htc_endpoint *ep;
1632  int status = 0;
1633 
1634  if (list_empty(pkt_queue))
1635  return -EINVAL;
1636 
1637  first = list_first_entry(pkt_queue, struct htc_packet, list);
1638 
1639  if (first->endpoint >= ENDPOINT_MAX) {
1640  WARN_ON_ONCE(1);
1641  return -EINVAL;
1642  }
1643 
1644  ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
1645  __func__, first->endpoint, get_queue_depth(pkt_queue),
1646  first->buf_len);
1647 
1648  ep = &target->endpoint[first->endpoint];
1649 
1650  spin_lock_bh(&target->rx_lock);
1651 
1652  /* store receive packets */
1653  list_splice_tail_init(pkt_queue, &ep->rx_bufq);
1654 
1655  spin_unlock_bh(&target->rx_lock);
1656 
1657  if (status != 0) {
1658  /* walk through queue and mark each one canceled */
1659  list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
1660  packet->status = -ECANCELED;
1661  }
1662 
1663  do_recv_completion(ep, pkt_queue);
1664  }
1665 
1666  return status;
1667 }
1668 
1669 static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
1670  enum htc_endpoint_id ep,
1671  bool active)
1672 {
1673  /* TODO */
1674 }
1675 
1676 static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
1677 {
1678  /* TODO */
1679 }
1680 
1681 static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
1682  struct ath6kl_htc_credit_info *info)
1683 {
1684  return 0;
1685 }
1686 
1687 static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
1688  .create = ath6kl_htc_pipe_create,
1689  .wait_target = ath6kl_htc_pipe_wait_target,
1690  .start = ath6kl_htc_pipe_start,
1691  .conn_service = ath6kl_htc_pipe_conn_service,
1692  .tx = ath6kl_htc_pipe_tx,
1693  .stop = ath6kl_htc_pipe_stop,
1694  .cleanup = ath6kl_htc_pipe_cleanup,
1695  .flush_txep = ath6kl_htc_pipe_flush_txep,
1696  .flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
1697  .activity_changed = ath6kl_htc_pipe_activity_changed,
1698  .get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
1699  .add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
1700  .credit_setup = ath6kl_htc_pipe_credit_setup,
1701  .tx_complete = ath6kl_htc_pipe_tx_complete,
1702  .rx_complete = ath6kl_htc_pipe_rx_complete,
1703 };
1704 
1706 {
1707  ar->htc_ops = &ath6kl_htc_pipe_ops;
1708 }