Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mci.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #include <linux/dma-mapping.h>
18 #include <linux/slab.h>
19 
20 #include "ath9k.h"
21 #include "mci.h"
22 
23 static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 };
24 
25 static struct ath_mci_profile_info*
26 ath_mci_find_profile(struct ath_mci_profile *mci,
27  struct ath_mci_profile_info *info)
28 {
30 
31  if (list_empty(&mci->info))
32  return NULL;
33 
34  list_for_each_entry(entry, &mci->info, list) {
35  if (entry->conn_handle == info->conn_handle)
36  return entry;
37  }
38  return NULL;
39 }
40 
41 static bool ath_mci_add_profile(struct ath_common *common,
42  struct ath_mci_profile *mci,
43  struct ath_mci_profile_info *info)
44 {
46 
47  if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
49  return false;
50 
51  if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
53  return false;
54 
55  entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
56  if (!entry)
57  return false;
58 
59  memcpy(entry, info, 10);
60  INC_PROF(mci, info);
61  list_add_tail(&entry->list, &mci->info);
62 
63  return true;
64 }
65 
66 static void ath_mci_del_profile(struct ath_common *common,
67  struct ath_mci_profile *mci,
68  struct ath_mci_profile_info *entry)
69 {
70  if (!entry)
71  return;
72 
73  DEC_PROF(mci, entry);
74  list_del(&entry->list);
75  kfree(entry);
76 }
77 
79 {
81 
82  mci->aggr_limit = 0;
83  mci->num_mgmt = 0;
84 
85  if (list_empty(&mci->info))
86  return;
87 
88  list_for_each_entry_safe(info, tinfo, &mci->info, list) {
89  list_del(&info->list);
90  DEC_PROF(mci, info);
91  kfree(info);
92  }
93 }
94 
95 static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
96 {
97  struct ath_mci_profile *mci = &btcoex->mci;
98  u32 wlan_airtime = btcoex->btcoex_period *
99  (100 - btcoex->duty_cycle) / 100;
100 
101  /*
102  * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms.
103  * When wlan_airtime is less than 4ms, aggregation limit has to be
104  * adjusted half of wlan_airtime to ensure that the aggregation can fit
105  * without collision with BT traffic.
106  */
107  if ((wlan_airtime <= 4) &&
108  (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime))))
109  mci->aggr_limit = 2 * wlan_airtime;
110 }
111 
112 static void ath_mci_update_scheme(struct ath_softc *sc)
113 {
114  struct ath_common *common = ath9k_hw_common(sc->sc_ah);
115  struct ath_btcoex *btcoex = &sc->btcoex;
116  struct ath_mci_profile *mci = &btcoex->mci;
117  struct ath9k_hw_mci *mci_hw = &sc->sc_ah->btcoex_hw.mci;
118  struct ath_mci_profile_info *info;
119  u32 num_profile = NUM_PROF(mci);
120 
122  goto skip_tuning;
123 
124  mci->aggr_limit = 0;
125  btcoex->duty_cycle = ath_mci_duty_cycle[num_profile];
127  if (NUM_PROF(mci))
129  else
130  btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
132 
133  if (num_profile == 1) {
134  info = list_first_entry(&mci->info,
135  struct ath_mci_profile_info,
136  list);
137  if (mci->num_sco) {
138  if (info->T == 12)
139  mci->aggr_limit = 8;
140  else if (info->T == 6) {
141  mci->aggr_limit = 6;
142  btcoex->duty_cycle = 30;
143  } else
144  mci->aggr_limit = 6;
145  ath_dbg(common, MCI,
146  "Single SCO, aggregation limit %d 1/4 ms\n",
147  mci->aggr_limit);
148  } else if (mci->num_pan || mci->num_other_acl) {
149  /*
150  * For single PAN/FTP profile, allocate 35% for BT
151  * to improve WLAN throughput.
152  */
153  btcoex->duty_cycle = 35;
154  btcoex->btcoex_period = 53;
155  ath_dbg(common, MCI,
156  "Single PAN/FTP bt period %d ms dutycycle %d\n",
157  btcoex->duty_cycle, btcoex->btcoex_period);
158  } else if (mci->num_hid) {
159  btcoex->duty_cycle = 30;
160  mci->aggr_limit = 6;
161  ath_dbg(common, MCI,
162  "Multiple attempt/timeout single HID "
163  "aggregation limit 1.5 ms dutycycle 30%%\n");
164  }
165  } else if (num_profile == 2) {
166  if (mci->num_hid == 2)
167  btcoex->duty_cycle = 30;
168  mci->aggr_limit = 6;
169  ath_dbg(common, MCI,
170  "Two BT profiles aggr limit 1.5 ms dutycycle %d%%\n",
171  btcoex->duty_cycle);
172  } else if (num_profile >= 3) {
173  mci->aggr_limit = 4;
174  ath_dbg(common, MCI,
175  "Three or more profiles aggregation limit 1 ms\n");
176  }
177 
178 skip_tuning:
179  if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
180  if (IS_CHAN_HT(sc->sc_ah->curchan))
181  ath_mci_adjust_aggr_limit(btcoex);
182  else
183  btcoex->btcoex_period >>= 1;
184  }
185 
186  ath9k_btcoex_timer_pause(sc);
188 
189  if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
190  return;
191 
192  btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0);
193  if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
195 
196  btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 *
197  (100 - btcoex->duty_cycle) / 100;
198 
200  ath9k_btcoex_timer_resume(sc);
201 }
202 
203 static void ath_mci_wait_btcal_done(struct ath_softc *sc)
204 {
205  struct ath_hw *ah = sc->sc_ah;
206 
207  /* Stop tx & rx */
209  ath_stoprecv(sc);
210  ath_drain_all_txq(sc, false);
211 
212  /* Wait for cal done */
214 
215  /* Resume tx & rx */
216  ath_startrecv(sc);
218 }
219 
220 static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
221 {
222  struct ath_hw *ah = sc->sc_ah;
223  struct ath_common *common = ath9k_hw_common(ah);
224  struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
225  u32 payload[4] = {0, 0, 0, 0};
226 
227  switch (opcode) {
228  case MCI_GPM_BT_CAL_REQ:
229  if (mci_hw->bt_state == MCI_BT_AWAKE) {
230  mci_hw->bt_state = MCI_BT_CAL_START;
231  ath_mci_wait_btcal_done(sc);
232  }
233  ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state);
234  break;
237  ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
238  16, false, true);
239  break;
240  default:
241  ath_dbg(common, MCI, "Unknown GPM CAL message\n");
242  break;
243  }
244 }
245 
246 static void ath9k_mci_work(struct work_struct *work)
247 {
248  struct ath_softc *sc = container_of(work, struct ath_softc, mci_work);
249 
250  ath_mci_update_scheme(sc);
251 }
252 
253 static u8 ath_mci_process_profile(struct ath_softc *sc,
254  struct ath_mci_profile_info *info)
255 {
256  struct ath_common *common = ath9k_hw_common(sc->sc_ah);
257  struct ath_btcoex *btcoex = &sc->btcoex;
258  struct ath_mci_profile *mci = &btcoex->mci;
259  struct ath_mci_profile_info *entry = NULL;
260 
261  entry = ath_mci_find_profile(mci, info);
262  if (entry) {
263  /*
264  * Two MCI interrupts are generated while connecting to
265  * headset and A2DP profile, but only one MCI interrupt
266  * is generated with last added profile type while disconnecting
267  * both profiles.
268  * So while adding second profile type decrement
269  * the first one.
270  */
271  if (entry->type != info->type) {
272  DEC_PROF(mci, entry);
273  INC_PROF(mci, info);
274  }
275  memcpy(entry, info, 10);
276  }
277 
278  if (info->start) {
279  if (!entry && !ath_mci_add_profile(common, mci, info))
280  return 0;
281  } else
282  ath_mci_del_profile(common, mci, entry);
283 
284  return 1;
285 }
286 
287 static u8 ath_mci_process_status(struct ath_softc *sc,
289 {
290  struct ath_btcoex *btcoex = &sc->btcoex;
291  struct ath_mci_profile *mci = &btcoex->mci;
292  struct ath_mci_profile_info info;
293  int i = 0, old_num_mgmt = mci->num_mgmt;
294 
295  /* Link status type are not handled */
296  if (status->is_link)
297  return 0;
298 
299  info.conn_handle = status->conn_handle;
300  if (ath_mci_find_profile(mci, &info))
301  return 0;
302 
303  if (status->conn_handle >= ATH_MCI_MAX_PROFILE)
304  return 0;
305 
306  if (status->is_critical)
307  __set_bit(status->conn_handle, mci->status);
308  else
309  __clear_bit(status->conn_handle, mci->status);
310 
311  mci->num_mgmt = 0;
312  do {
313  if (test_bit(i, mci->status))
314  mci->num_mgmt++;
315  } while (++i < ATH_MCI_MAX_PROFILE);
316 
317  if (old_num_mgmt != mci->num_mgmt)
318  return 1;
319 
320  return 0;
321 }
322 
323 static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
324 {
325  struct ath_hw *ah = sc->sc_ah;
326  struct ath_mci_profile_info profile_info;
327  struct ath_mci_profile_status profile_status;
328  struct ath_common *common = ath9k_hw_common(sc->sc_ah);
329  u8 major, minor, update_scheme = 0;
330  u32 seq_num;
331 
334  ath_dbg(common, MCI, "(MCI) Need to flush BT profiles\n");
335  ath_mci_flush_profile(&sc->btcoex.mci);
337  }
338 
339  switch (opcode) {
342  break;
344  major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
345  minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
346  ar9003_mci_set_bt_version(ah, major, minor);
347  break;
350  break;
352  memcpy(&profile_info,
353  (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
354 
355  if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN) ||
356  (profile_info.type >= MCI_GPM_COEX_PROFILE_MAX)) {
357  ath_dbg(common, MCI,
358  "Illegal profile type = %d, state = %d\n",
359  profile_info.type,
360  profile_info.start);
361  break;
362  }
363 
364  update_scheme += ath_mci_process_profile(sc, &profile_info);
365  break;
367  profile_status.is_link = *(rx_payload +
369  profile_status.conn_handle = *(rx_payload +
371  profile_status.is_critical = *(rx_payload +
373 
374  seq_num = *((u32 *)(rx_payload + 12));
375  ath_dbg(common, MCI,
376  "BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%u\n",
377  profile_status.is_link, profile_status.conn_handle,
378  profile_status.is_critical, seq_num);
379 
380  update_scheme += ath_mci_process_status(sc, &profile_status);
381  break;
382  default:
383  ath_dbg(common, MCI, "Unknown GPM COEX message = 0x%02x\n", opcode);
384  break;
385  }
386  if (update_scheme)
387  ieee80211_queue_work(sc->hw, &sc->mci_work);
388 }
389 
390 int ath_mci_setup(struct ath_softc *sc)
391 {
392  struct ath_common *common = ath9k_hw_common(sc->sc_ah);
393  struct ath_mci_coex *mci = &sc->mci_coex;
394  struct ath_mci_buf *buf = &mci->sched_buf;
395  int ret;
396 
397  buf->bf_addr = dma_alloc_coherent(sc->dev,
399  &buf->bf_paddr, GFP_KERNEL);
400 
401  if (buf->bf_addr == NULL) {
402  ath_dbg(common, FATAL, "MCI buffer alloc failed\n");
403  return -ENOMEM;
404  }
405 
408 
409  mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
410 
411  mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
412  mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr + mci->sched_buf.bf_len;
413  mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
414 
415  ret = ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
416  mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
417  mci->sched_buf.bf_paddr);
418  if (ret) {
419  ath_err(common, "Failed to initialize MCI\n");
420  return ret;
421  }
422 
423  INIT_WORK(&sc->mci_work, ath9k_mci_work);
424  ath_dbg(common, MCI, "MCI Initialized\n");
425 
426  return 0;
427 }
428 
429 void ath_mci_cleanup(struct ath_softc *sc)
430 {
431  struct ath_common *common = ath9k_hw_common(sc->sc_ah);
432  struct ath_hw *ah = sc->sc_ah;
433  struct ath_mci_coex *mci = &sc->mci_coex;
434  struct ath_mci_buf *buf = &mci->sched_buf;
435 
436  if (buf->bf_addr)
439  buf->bf_addr, buf->bf_paddr);
440 
441  ar9003_mci_cleanup(ah);
442 
443  ath_dbg(common, MCI, "MCI De-Initialized\n");
444 }
445 
446 void ath_mci_intr(struct ath_softc *sc)
447 {
448  struct ath_mci_coex *mci = &sc->mci_coex;
449  struct ath_hw *ah = sc->sc_ah;
450  struct ath_common *common = ath9k_hw_common(ah);
451  struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
452  u32 mci_int, mci_int_rxmsg;
454  u32 *pgpm;
455  u32 more_data = MCI_GPM_MORE;
456  bool skip_gpm = false;
457 
458  ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
459 
460  if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) {
462  return;
463  }
464 
465  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
466  u32 payload[4] = { 0xffffffff, 0xffffffff,
467  0xffffffff, 0xffffff00};
468 
469  /*
470  * The following REMOTE_RESET and SYS_WAKING used to sent
471  * only when BT wake up. Now they are always sent, as a
472  * recovery method to reset BT MCI's RX alignment.
473  */
475  payload, 16, true, false);
477  NULL, 0, true, false);
478 
479  mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
481 
482  /*
483  * always do this for recovery and 2G/5G toggling and LNA_TRANS
484  */
486  }
487 
488  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
489  mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
490 
491  if ((mci_hw->bt_state == MCI_BT_SLEEP) &&
493  MCI_BT_SLEEP))
495  }
496 
497  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
498  mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
499 
500  if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
502  MCI_BT_AWAKE))
503  mci_hw->bt_state = MCI_BT_SLEEP;
504  }
505 
506  if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
509  skip_gpm = true;
510  }
511 
512  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
513  mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
515  }
516 
517  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
518  mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
519 
520  while (more_data == MCI_GPM_MORE) {
521 
522  pgpm = mci->gpm_buf.bf_addr;
523  offset = ar9003_mci_get_next_gpm_offset(ah, false,
524  &more_data);
525 
526  if (offset == MCI_GPM_INVALID)
527  break;
528 
529  pgpm += (offset >> 2);
530 
531  /*
532  * The first dword is timer.
533  * The real data starts from 2nd dword.
534  */
535  subtype = MCI_GPM_TYPE(pgpm);
536  opcode = MCI_GPM_OPCODE(pgpm);
537 
538  if (skip_gpm)
539  goto recycle;
540 
541  if (MCI_GPM_IS_CAL_TYPE(subtype)) {
542  ath_mci_cal_msg(sc, subtype, (u8 *)pgpm);
543  } else {
544  switch (subtype) {
545  case MCI_GPM_COEX_AGENT:
546  ath_mci_msg(sc, opcode, (u8 *)pgpm);
547  break;
548  default:
549  break;
550  }
551  }
552  recycle:
553  MCI_GPM_RECYCLE(pgpm);
554  }
555  }
556 
557  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
558  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
559  mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
560 
561  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO)
562  mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
563 
564  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
565  int value_dbm = MS(mci_hw->cont_status,
567 
568  mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
569 
570  ath_dbg(common, MCI,
571  "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n",
572  MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ?
573  "tx" : "rx",
575  value_dbm);
576  }
577 
578  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK)
579  mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
580 
581  if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
582  mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
583  }
584 
585  if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
586  (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
587  mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
589  ath_mci_msg(sc, MCI_GPM_COEX_NOOP, NULL);
590  }
591 }
592 
593 void ath_mci_enable(struct ath_softc *sc)
594 {
595  struct ath_common *common = ath9k_hw_common(sc->sc_ah);
596 
597  if (!common->btcoex_enabled)
598  return;
599 
600  if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
601  sc->sc_ah->imask |= ATH9K_INT_MCI;
602 }