Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
pci.c
Go to the documentation of this file.
1 /******************************************************************************
2  *
3  * Copyright(c) 2009-2012 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc.,
16  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17  *
18  * The full GNU General Public License is included in this distribution in the
19  * file called LICENSE.
20  *
21  * Contact Information:
22  * wlanfae <[email protected]>
23  * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24  * Hsinchu 300, Taiwan.
25  *
26  * Larry Finger <[email protected]>
27  *
28  *****************************************************************************/
29 
30 #include "wifi.h"
31 #include "core.h"
32 #include "pci.h"
33 #include "base.h"
34 #include "ps.h"
35 #include "efuse.h"
36 #include <linux/export.h>
37 #include <linux/kmemleak.h>
38 
39 static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
44 };
45 
46 static const u8 ac_to_hwq[] = {
47  VO_QUEUE,
48  VI_QUEUE,
49  BE_QUEUE,
50  BK_QUEUE
51 };
52 
53 static u8 _rtl_mac_to_hwqueue(struct ieee80211_hw *hw,
54  struct sk_buff *skb)
55 {
56  struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
57  __le16 fc = rtl_get_fc(skb);
58  u8 queue_index = skb_get_queue_mapping(skb);
59 
60  if (unlikely(ieee80211_is_beacon(fc)))
61  return BEACON_QUEUE;
62  if (ieee80211_is_mgmt(fc))
63  return MGNT_QUEUE;
64  if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
65  if (ieee80211_is_nullfunc(fc))
66  return HIGH_QUEUE;
67 
68  return ac_to_hwq[queue_index];
69 }
70 
71 /* Update PCI dependent default settings*/
72 static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
73 {
74  struct rtl_priv *rtlpriv = rtl_priv(hw);
75  struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
76  struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
77  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
78  u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
79  u8 init_aspm;
80 
81  ppsc->reg_rfps_level = 0;
82  ppsc->support_aspm = false;
83 
84  /*Update PCI ASPM setting */
85  ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
86  switch (rtlpci->const_pci_aspm) {
87  case 0:
88  /*No ASPM */
89  break;
90 
91  case 1:
92  /*ASPM dynamically enabled/disable. */
94  break;
95 
96  case 2:
97  /*ASPM with Clock Req dynamically enabled/disable. */
100  break;
101 
102  case 3:
103  /*
104  * Always enable ASPM and Clock Req
105  * from initialization to halt.
106  * */
110  break;
111 
112  case 4:
113  /*
114  * Always enable ASPM without Clock Req
115  * from initialization to halt.
116  * */
120  break;
121  }
122 
124 
125  /*Update Radio OFF setting */
126  switch (rtlpci->const_hwsw_rfoff_d3) {
127  case 1:
130  break;
131 
132  case 2:
136  break;
137 
138  case 3:
140  break;
141  }
142 
143  /*Set HW definition to determine if it supports ASPM. */
144  switch (rtlpci->const_support_pciaspm) {
145  case 0:{
146  /*Not support ASPM. */
147  bool support_aspm = false;
148  ppsc->support_aspm = support_aspm;
149  break;
150  }
151  case 1:{
152  /*Support ASPM. */
153  bool support_aspm = true;
154  bool support_backdoor = true;
155  ppsc->support_aspm = support_aspm;
156 
157  /*if (priv->oem_id == RT_CID_TOSHIBA &&
158  !priv->ndis_adapter.amd_l1_patch)
159  support_backdoor = false; */
160 
162 
163  break;
164  }
165  case 2:
166  /*ASPM value set by chipset. */
167  if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
168  bool support_aspm = true;
169  ppsc->support_aspm = support_aspm;
170  }
171  break;
172  default:
173  RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
174  "switch case not processed\n");
175  break;
176  }
177 
178  /* toshiba aspm issue, toshiba will set aspm selfly
179  * so we should not set aspm in driver */
180  pci_read_config_byte(rtlpci->pdev, 0x80, &init_aspm);
181  if (rtlpriv->rtlhal.hw_type == HARDWARE_TYPE_RTL8192SE &&
182  init_aspm == 0x43)
183  ppsc->support_aspm = false;
184 }
185 
186 static bool _rtl_pci_platform_switch_device_pci_aspm(
187  struct ieee80211_hw *hw,
188  u8 value)
189 {
190  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
191  struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
192 
193  if (rtlhal->hw_type != HARDWARE_TYPE_RTL8192SE)
194  value |= 0x40;
195 
196  pci_write_config_byte(rtlpci->pdev, 0x80, value);
197 
198  return false;
199 }
200 
201 /*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
202 static void _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
203 {
204  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
205  struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
206 
207  pci_write_config_byte(rtlpci->pdev, 0x81, value);
208 
209  if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE)
210  udelay(100);
211 }
212 
213 /*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
214 static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
215 {
216  struct rtl_priv *rtlpriv = rtl_priv(hw);
217  struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
218  struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
219  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
220  u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
221  u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
222  /*Retrieve original configuration settings. */
223  u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
224  u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
225  pcibridge_linkctrlreg;
226  u16 aspmlevel = 0;
227  u8 tmp_u1b = 0;
228 
229  if (!ppsc->support_aspm)
230  return;
231 
232  if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
233  RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
234  "PCI(Bridge) UNKNOWN\n");
235 
236  return;
237  }
238 
241  _rtl_pci_switch_clk_req(hw, 0x0);
242  }
243 
244  /*for promising device will in L0 state after an I/O. */
245  pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
246 
247  /*Set corresponding value. */
248  aspmlevel |= BIT(0) | BIT(1);
249  linkctrl_reg &= ~aspmlevel;
250  pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
251 
252  _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
253  udelay(50);
254 
255  /*4 Disable Pci Bridge ASPM */
256  pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
257  pcibridge_linkctrlreg);
258 
259  udelay(50);
260 }
261 
262 /*
263  *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
264  *power saving We should follow the sequence to enable
265  *RTL8192SE first then enable Pci Bridge ASPM
266  *or the system will show bluescreen.
267  */
268 static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
269 {
270  struct rtl_priv *rtlpriv = rtl_priv(hw);
271  struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
272  struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
273  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
274  u8 pcibridge_busnum = pcipriv->ndis_adapter.pcibridge_busnum;
275  u8 pcibridge_devnum = pcipriv->ndis_adapter.pcibridge_devnum;
276  u8 pcibridge_funcnum = pcipriv->ndis_adapter.pcibridge_funcnum;
277  u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
278  u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
279  u16 aspmlevel;
280  u8 u_pcibridge_aspmsetting;
281  u8 u_device_aspmsetting;
282 
283  if (!ppsc->support_aspm)
284  return;
285 
286  if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
287  RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
288  "PCI(Bridge) UNKNOWN\n");
289  return;
290  }
291 
292  /*4 Enable Pci Bridge ASPM */
293 
294  u_pcibridge_aspmsetting =
295  pcipriv->ndis_adapter.pcibridge_linkctrlreg |
297 
298  if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
299  u_pcibridge_aspmsetting &= ~BIT(0);
300 
301  pci_write_config_byte(rtlpci->pdev, (num4bytes << 2),
302  u_pcibridge_aspmsetting);
303 
304  RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
305  "PlatformEnableASPM():PciBridge busnumber[%x], DevNumbe[%x], funcnumber[%x], Write reg[%x] = %x\n",
306  pcibridge_busnum, pcibridge_devnum, pcibridge_funcnum,
307  (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
308  u_pcibridge_aspmsetting);
309 
310  udelay(50);
311 
312  /*Get ASPM level (with/without Clock Req) */
313  aspmlevel = rtlpci->const_devicepci_aspm_setting;
314  u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
315 
316  /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
317  /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
318 
319  u_device_aspmsetting |= aspmlevel;
320 
321  _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
322 
324  _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
325  RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
327  }
328  udelay(100);
329 }
330 
331 static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
332 {
333  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
334 
335  bool status = false;
336  u8 offset_e0;
337  unsigned offset_e4;
338 
339  pci_write_config_byte(rtlpci->pdev, 0xe0, 0xa0);
340 
341  pci_read_config_byte(rtlpci->pdev, 0xe0, &offset_e0);
342 
343  if (offset_e0 == 0xA0) {
344  pci_read_config_dword(rtlpci->pdev, 0xe4, &offset_e4);
345  if (offset_e4 & BIT(23))
346  status = true;
347  }
348 
349  return status;
350 }
351 
352 static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
353 {
354  struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
355  struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
356  u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
357  u8 linkctrl_reg;
358  u8 num4bbytes;
359 
360  num4bbytes = (capabilityoffset + 0x10) / 4;
361 
362  /*Read Link Control Register */
363  pci_read_config_byte(rtlpci->pdev, (num4bbytes << 2), &linkctrl_reg);
364 
365  pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
366 }
367 
368 static void rtl_pci_parse_configuration(struct pci_dev *pdev,
369  struct ieee80211_hw *hw)
370 {
371  struct rtl_priv *rtlpriv = rtl_priv(hw);
372  struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
373 
374  u8 tmp;
375  u16 linkctrl_reg;
376 
377  /*Link Control Register */
378  pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &linkctrl_reg);
379  pcipriv->ndis_adapter.linkctrl_reg = (u8)linkctrl_reg;
380 
381  RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Link Control Register =%x\n",
382  pcipriv->ndis_adapter.linkctrl_reg);
383 
384  pci_read_config_byte(pdev, 0x98, &tmp);
385  tmp |= BIT(4);
386  pci_write_config_byte(pdev, 0x98, tmp);
387 
388  tmp = 0x17;
389  pci_write_config_byte(pdev, 0x70f, tmp);
390 }
391 
392 static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
393 {
394  struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
395 
396  _rtl_pci_update_default_setting(hw);
397 
399  /*Always enable ASPM & Clock Req. */
400  rtl_pci_enable_aspm(hw);
402  }
403 
404 }
405 
406 static void _rtl_pci_io_handler_init(struct device *dev,
407  struct ieee80211_hw *hw)
408 {
409  struct rtl_priv *rtlpriv = rtl_priv(hw);
410 
411  rtlpriv->io.dev = dev;
412 
413  rtlpriv->io.write8_async = pci_write8_async;
414  rtlpriv->io.write16_async = pci_write16_async;
415  rtlpriv->io.write32_async = pci_write32_async;
416 
417  rtlpriv->io.read8_sync = pci_read8_sync;
418  rtlpriv->io.read16_sync = pci_read16_sync;
419  rtlpriv->io.read32_sync = pci_read32_sync;
420 
421 }
422 
423 static void _rtl_pci_io_handler_release(struct ieee80211_hw *hw)
424 {
425 }
426 
427 static bool _rtl_update_earlymode_info(struct ieee80211_hw *hw,
428  struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc, u8 tid)
429 {
430  struct rtl_priv *rtlpriv = rtl_priv(hw);
431  struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
432  u8 additionlen = FCS_LEN;
433  struct sk_buff *next_skb;
434 
435  /* here open is 4, wep/tkip is 8, aes is 12*/
436  if (info->control.hw_key)
437  additionlen += info->control.hw_key->icv_len;
438 
439  /* The most skb num is 6 */
440  tcb_desc->empkt_num = 0;
441  spin_lock_bh(&rtlpriv->locks.waitq_lock);
442  skb_queue_walk(&rtlpriv->mac80211.skb_waitq[tid], next_skb) {
443  struct ieee80211_tx_info *next_info;
444 
445  next_info = IEEE80211_SKB_CB(next_skb);
446  if (next_info->flags & IEEE80211_TX_CTL_AMPDU) {
447  tcb_desc->empkt_len[tcb_desc->empkt_num] =
448  next_skb->len + additionlen;
449  tcb_desc->empkt_num++;
450  } else {
451  break;
452  }
453 
454  if (skb_queue_is_last(&rtlpriv->mac80211.skb_waitq[tid],
455  next_skb))
456  break;
457 
458  if (tcb_desc->empkt_num >= 5)
459  break;
460  }
461  spin_unlock_bh(&rtlpriv->locks.waitq_lock);
462 
463  return true;
464 }
465 
466 /* just for early mode now */
467 static void _rtl_pci_tx_chk_waitq(struct ieee80211_hw *hw)
468 {
469  struct rtl_priv *rtlpriv = rtl_priv(hw);
470  struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
471  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
472  struct sk_buff *skb = NULL;
473  struct ieee80211_tx_info *info = NULL;
474  int tid;
475 
476  if (!rtlpriv->rtlhal.earlymode_enable)
477  return;
478 
479  /* we juse use em for BE/BK/VI/VO */
480  for (tid = 7; tid >= 0; tid--) {
481  u8 hw_queue = ac_to_hwq[rtl_tid_to_ac(tid)];
482  struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
483  while (!mac->act_scanning &&
484  rtlpriv->psc.rfpwr_state == ERFON) {
485  struct rtl_tcb_desc tcb_desc;
486  memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
487 
488  spin_lock_bh(&rtlpriv->locks.waitq_lock);
489  if (!skb_queue_empty(&mac->skb_waitq[tid]) &&
490  (ring->entries - skb_queue_len(&ring->queue) > 5)) {
491  skb = skb_dequeue(&mac->skb_waitq[tid]);
492  } else {
493  spin_unlock_bh(&rtlpriv->locks.waitq_lock);
494  break;
495  }
496  spin_unlock_bh(&rtlpriv->locks.waitq_lock);
497 
498  /* Some macaddr can't do early mode. like
499  * multicast/broadcast/no_qos data */
500  info = IEEE80211_SKB_CB(skb);
501  if (info->flags & IEEE80211_TX_CTL_AMPDU)
502  _rtl_update_earlymode_info(hw, skb,
503  &tcb_desc, tid);
504 
505  rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
506  }
507  }
508 }
509 
510 
511 static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
512 {
513  struct rtl_priv *rtlpriv = rtl_priv(hw);
514  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
515 
516  struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
517 
518  while (skb_queue_len(&ring->queue)) {
519  struct rtl_tx_desc *entry = &ring->desc[ring->idx];
520  struct sk_buff *skb;
521  struct ieee80211_tx_info *info;
522  __le16 fc;
523  u8 tid;
524 
525  u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
526  HW_DESC_OWN);
527 
528  /*
529  *beacon packet will only use the first
530  *descriptor defautly,and the own may not
531  *be cleared by the hardware
532  */
533  if (own)
534  return;
535  ring->idx = (ring->idx + 1) % ring->entries;
536 
537  skb = __skb_dequeue(&ring->queue);
538  pci_unmap_single(rtlpci->pdev,
539  rtlpriv->cfg->ops->
540  get_desc((u8 *) entry, true,
542  skb->len, PCI_DMA_TODEVICE);
543 
544  /* remove early mode header */
545  if (rtlpriv->rtlhal.earlymode_enable)
546  skb_pull(skb, EM_HDR_LEN);
547 
548  RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
549  "new ring->idx:%d, free: skb_queue_len:%d, free: seq:%x\n",
550  ring->idx,
551  skb_queue_len(&ring->queue),
552  *(u16 *) (skb->data + 22));
553 
554  if (prio == TXCMD_QUEUE) {
555  dev_kfree_skb(skb);
556  goto tx_status_ok;
557 
558  }
559 
560  /* for sw LPS, just after NULL skb send out, we can
561  * sure AP kown we are sleeped, our we should not let
562  * rf to sleep*/
563  fc = rtl_get_fc(skb);
564  if (ieee80211_is_nullfunc(fc)) {
565  if (ieee80211_has_pm(fc)) {
566  rtlpriv->mac80211.offchan_delay = true;
567  rtlpriv->psc.state_inap = true;
568  } else {
569  rtlpriv->psc.state_inap = false;
570  }
571  }
572 
573  /* update tid tx pkt num */
574  tid = rtl_get_tid(skb);
575  if (tid <= 7)
576  rtlpriv->link_info.tidtx_inperiod[tid]++;
577 
578  info = IEEE80211_SKB_CB(skb);
579  ieee80211_tx_info_clear_status(info);
580 
581  info->flags |= IEEE80211_TX_STAT_ACK;
582  /*info->status.rates[0].count = 1; */
583 
585 
586  if ((ring->entries - skb_queue_len(&ring->queue))
587  == 2) {
588 
589  RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
590  "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%d\n",
591  prio, ring->idx,
592  skb_queue_len(&ring->queue));
593 
595  skb_get_queue_mapping
596  (skb));
597  }
598 tx_status_ok:
599  skb = NULL;
600  }
601 
602  if (((rtlpriv->link_info.num_rx_inperiod +
603  rtlpriv->link_info.num_tx_inperiod) > 8) ||
604  (rtlpriv->link_info.num_rx_inperiod > 2)) {
605  schedule_work(&rtlpriv->works.lps_leave_work);
606  }
607 }
608 
609 static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb,
611 {
612  struct rtl_priv *rtlpriv = rtl_priv(hw);
613  struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
614  __le16 fc = rtl_get_fc(skb);
615  bool unicast = false;
616  struct sk_buff *uskb = NULL;
617  u8 *pdata;
618 
619 
620  memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
621 
622  if (is_broadcast_ether_addr(hdr->addr1)) {
623  ;/*TODO*/
624  } else if (is_multicast_ether_addr(hdr->addr1)) {
625  ;/*TODO*/
626  } else {
627  unicast = true;
628  rtlpriv->stats.rxbytesunicast += skb->len;
629  }
630 
631  rtl_is_special_data(hw, skb, false);
632 
633  if (ieee80211_is_data(fc)) {
634  rtlpriv->cfg->ops->led_control(hw, LED_CTL_RX);
635 
636  if (unicast)
637  rtlpriv->link_info.num_rx_inperiod++;
638  }
639 
640  /* for sw lps */
641  rtl_swlps_beacon(hw, (void *)skb->data, skb->len);
642  rtl_recognize_peer(hw, (void *)skb->data, skb->len);
643  if ((rtlpriv->mac80211.opmode == NL80211_IFTYPE_AP) &&
644  (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) &&
645  (ieee80211_is_beacon(fc) || ieee80211_is_probe_resp(fc)))
646  return;
647 
648  if (unlikely(!rtl_action_proc(hw, skb, false)))
649  return;
650 
651  uskb = dev_alloc_skb(skb->len + 128);
652  if (!uskb)
653  return; /* exit if allocation failed */
654  memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status));
655  pdata = (u8 *)skb_put(uskb, skb->len);
656  memcpy(pdata, skb->data, skb->len);
657 
658  ieee80211_rx_irqsafe(hw, uskb);
659 }
660 
661 static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
662 {
663  struct rtl_priv *rtlpriv = rtl_priv(hw);
664  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
665  int rx_queue_idx = RTL_PCI_RX_MPDU_QUEUE;
666 
667  struct ieee80211_rx_status rx_status = { 0 };
668  unsigned int count = rtlpci->rxringcount;
669  u8 own;
670  u8 tmp_one;
671  u32 bufferaddress;
672 
673  struct rtl_stats stats = {
674  .signal = 0,
675  .noise = -98,
676  .rate = 0,
677  };
678  int index = rtlpci->rx_ring[rx_queue_idx].idx;
679 
680  /*RX NORMAL PKT */
681  while (count--) {
682  /*rx descriptor */
683  struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
684  index];
685  /*rx pkt */
686  struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
687  index];
688  struct sk_buff *new_skb = NULL;
689 
690  own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
691  false, HW_DESC_OWN);
692 
693  /*wait data to be filled by hardware */
694  if (own)
695  break;
696 
697  rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
698  &rx_status,
699  (u8 *) pdesc, skb);
700 
701  if (stats.crc || stats.hwerror)
702  goto done;
703 
704  new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
705  if (unlikely(!new_skb)) {
706  RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV), DBG_DMESG,
707  "can't alloc skb for rx\n");
708  goto done;
709  }
710 
711  pci_unmap_single(rtlpci->pdev,
712  *((dma_addr_t *) skb->cb),
713  rtlpci->rxbuffersize,
715 
716  skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, false,
718  skb_reserve(skb, stats.rx_drvinfo_size + stats.rx_bufshift);
719 
720  /*
721  * NOTICE This can not be use for mac80211,
722  * this is done in mac80211 code,
723  * if you done here sec DHCP will fail
724  * skb_trim(skb, skb->len - 4);
725  */
726 
727  _rtl_receive_one(hw, skb, rx_status);
728 
729  if (((rtlpriv->link_info.num_rx_inperiod +
730  rtlpriv->link_info.num_tx_inperiod) > 8) ||
731  (rtlpriv->link_info.num_rx_inperiod > 2)) {
732  schedule_work(&rtlpriv->works.lps_leave_work);
733  }
734 
735  dev_kfree_skb_any(skb);
736  skb = new_skb;
737 
738  rtlpci->rx_ring[rx_queue_idx].rx_buf[index] = skb;
739  *((dma_addr_t *) skb->cb) =
740  pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
741  rtlpci->rxbuffersize,
743 
744 done:
745  bufferaddress = (*((dma_addr_t *)skb->cb));
746  tmp_one = 1;
747  rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
749  (u8 *)&bufferaddress);
750  rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
752  (u8 *)&rtlpci->rxbuffersize);
753 
754  if (index == rtlpci->rxringcount - 1)
755  rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
757  &tmp_one);
758 
759  rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
760  &tmp_one);
761 
762  index = (index + 1) % rtlpci->rxringcount;
763  }
764 
765  rtlpci->rx_ring[rx_queue_idx].idx = index;
766 }
767 
768 static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
769 {
770  struct ieee80211_hw *hw = dev_id;
771  struct rtl_priv *rtlpriv = rtl_priv(hw);
772  struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
773  unsigned long flags;
774  u32 inta = 0;
775  u32 intb = 0;
777 
778  spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
779 
780  /*read ISR: 4/8bytes */
781  rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
782 
783  /*Shared IRQ or HW disappared */
784  if (!inta || inta == 0xffff) {
785  ret = IRQ_NONE;
786  goto done;
787  }
788 
789  /*<1> beacon related */
790  if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
791  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
792  "beacon ok interrupt!\n");
793  }
794 
795  if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
796  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
797  "beacon err interrupt!\n");
798  }
799 
800  if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
801  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "beacon interrupt!\n");
802  }
803 
804  if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
805  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
806  "prepare beacon for interrupt!\n");
807  tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
808  }
809 
810  /*<3> Tx related */
811  if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
812  RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "IMR_TXFOVW!\n");
813 
814  if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
815  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
816  "Manage ok interrupt!\n");
817  _rtl_pci_tx_isr(hw, MGNT_QUEUE);
818  }
819 
820  if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
821  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
822  "HIGH_QUEUE ok interrupt!\n");
823  _rtl_pci_tx_isr(hw, HIGH_QUEUE);
824  }
825 
826  if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
827  rtlpriv->link_info.num_tx_inperiod++;
828 
829  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
830  "BK Tx OK interrupt!\n");
831  _rtl_pci_tx_isr(hw, BK_QUEUE);
832  }
833 
834  if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
835  rtlpriv->link_info.num_tx_inperiod++;
836 
837  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
838  "BE TX OK interrupt!\n");
839  _rtl_pci_tx_isr(hw, BE_QUEUE);
840  }
841 
842  if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
843  rtlpriv->link_info.num_tx_inperiod++;
844 
845  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
846  "VI TX OK interrupt!\n");
847  _rtl_pci_tx_isr(hw, VI_QUEUE);
848  }
849 
850  if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
851  rtlpriv->link_info.num_tx_inperiod++;
852 
853  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
854  "Vo TX OK interrupt!\n");
855  _rtl_pci_tx_isr(hw, VO_QUEUE);
856  }
857 
858  if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192SE) {
859  if (inta & rtlpriv->cfg->maps[RTL_IMR_COMDOK]) {
860  rtlpriv->link_info.num_tx_inperiod++;
861 
862  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
863  "CMD TX OK interrupt!\n");
864  _rtl_pci_tx_isr(hw, TXCMD_QUEUE);
865  }
866  }
867 
868  /*<2> Rx related */
869  if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
870  RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, "Rx ok interrupt!\n");
871  _rtl_pci_rx_interrupt(hw);
872  }
873 
874  if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
875  RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
876  "rx descriptor unavailable!\n");
877  _rtl_pci_rx_interrupt(hw);
878  }
879 
880  if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
881  RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, "rx overflow !\n");
882  _rtl_pci_rx_interrupt(hw);
883  }
884 
885  if (rtlpriv->rtlhal.earlymode_enable)
886  tasklet_schedule(&rtlpriv->works.irq_tasklet);
887 
888 done:
889  spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
890  return ret;
891 }
892 
893 static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
894 {
895  _rtl_pci_tx_chk_waitq(hw);
896 }
897 
898 static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
899 {
900  struct rtl_priv *rtlpriv = rtl_priv(hw);
901  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
902  struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
903  struct rtl8192_tx_ring *ring = NULL;
904  struct ieee80211_hdr *hdr = NULL;
905  struct ieee80211_tx_info *info = NULL;
906  struct sk_buff *pskb = NULL;
907  struct rtl_tx_desc *pdesc = NULL;
908  struct rtl_tcb_desc tcb_desc;
909  u8 temp_one = 1;
910 
911  memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
912  ring = &rtlpci->tx_ring[BEACON_QUEUE];
913  pskb = __skb_dequeue(&ring->queue);
914  if (pskb) {
915  struct rtl_tx_desc *entry = &ring->desc[ring->idx];
916  pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc(
917  (u8 *) entry, true, HW_DESC_TXBUFF_ADDR),
918  pskb->len, PCI_DMA_TODEVICE);
919  kfree_skb(pskb);
920  }
921 
922  /*NB: the beacon data buffer must be 32-bit aligned. */
923  pskb = ieee80211_beacon_get(hw, mac->vif);
924  if (pskb == NULL)
925  return;
926  hdr = rtl_get_hdr(pskb);
927  info = IEEE80211_SKB_CB(pskb);
928  pdesc = &ring->desc[0];
929  rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
930  info, NULL, pskb, BEACON_QUEUE, &tcb_desc);
931 
932  __skb_queue_tail(&ring->queue, pskb);
933 
934  rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
935  &temp_one);
936 
937  return;
938 }
939 
940 static void rtl_lps_leave_work_callback(struct work_struct *work)
941 {
942  struct rtl_works *rtlworks =
943  container_of(work, struct rtl_works, lps_leave_work);
944  struct ieee80211_hw *hw = rtlworks->hw;
945 
946  rtl_lps_leave(hw);
947 }
948 
949 static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
950 {
951  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
952  u8 i;
953 
954  for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
955  rtlpci->txringcount[i] = RT_TXDESC_NUM;
956 
957  /*
958  *we just alloc 2 desc for beacon queue,
959  *because we just need first desc in hw beacon.
960  */
961  rtlpci->txringcount[BEACON_QUEUE] = 2;
962 
963  /*
964  *BE queue need more descriptor for performance
965  *consideration or, No more tx desc will happen,
966  *and may cause mac80211 mem leakage.
967  */
969 
970  rtlpci->rxbuffersize = 9100; /*2048/1024; */
971  rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
972 }
973 
974 static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
975  struct pci_dev *pdev)
976 {
977  struct rtl_priv *rtlpriv = rtl_priv(hw);
978  struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
979  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
980  struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
981 
982  rtlpci->up_first_time = true;
983  rtlpci->being_init_adapter = false;
984 
985  rtlhal->hw = hw;
986  rtlpci->pdev = pdev;
987 
988  /*Tx/Rx related var */
989  _rtl_pci_init_trx_var(hw);
990 
991  /*IBSS*/ mac->beacon_interval = 100;
992 
993  /*AMPDU*/
994  mac->min_space_cfg = 0;
995  mac->max_mss_density = 0;
996  /*set sane AMPDU defaults */
997  mac->current_ampdu_density = 7;
998  mac->current_ampdu_factor = 3;
999 
1000  /*QOS*/
1001  rtlpci->acm_method = eAcmWay2_SW;
1002 
1003  /*task */
1004  tasklet_init(&rtlpriv->works.irq_tasklet,
1005  (void (*)(unsigned long))_rtl_pci_irq_tasklet,
1006  (unsigned long)hw);
1007  tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
1008  (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
1009  (unsigned long)hw);
1010  INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback);
1011 }
1012 
1013 static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
1014  unsigned int prio, unsigned int entries)
1015 {
1016  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1017  struct rtl_priv *rtlpriv = rtl_priv(hw);
1018  struct rtl_tx_desc *ring;
1019  dma_addr_t dma;
1020  u32 nextdescaddress;
1021  int i;
1022 
1023  ring = pci_alloc_consistent(rtlpci->pdev,
1024  sizeof(*ring) * entries, &dma);
1025 
1026  if (!ring || (unsigned long)ring & 0xFF) {
1027  RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1028  "Cannot allocate TX ring (prio = %d)\n", prio);
1029  return -ENOMEM;
1030  }
1031 
1032  memset(ring, 0, sizeof(*ring) * entries);
1033  rtlpci->tx_ring[prio].desc = ring;
1034  rtlpci->tx_ring[prio].dma = dma;
1035  rtlpci->tx_ring[prio].idx = 0;
1036  rtlpci->tx_ring[prio].entries = entries;
1037  skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
1038 
1039  RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "queue:%d, ring_addr:%p\n",
1040  prio, ring);
1041 
1042  for (i = 0; i < entries; i++) {
1043  nextdescaddress = (u32) dma +
1044  ((i + 1) % entries) *
1045  sizeof(*ring);
1046 
1047  rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
1049  (u8 *)&nextdescaddress);
1050  }
1051 
1052  return 0;
1053 }
1054 
1055 static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1056 {
1057  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1058  struct rtl_priv *rtlpriv = rtl_priv(hw);
1059  struct rtl_rx_desc *entry = NULL;
1060  int i, rx_queue_idx;
1061  u8 tmp_one = 1;
1062 
1063  /*
1064  *rx_queue_idx 0:RX_MPDU_QUEUE
1065  *rx_queue_idx 1:RX_CMD_QUEUE
1066  */
1067  for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1068  rx_queue_idx++) {
1069  rtlpci->rx_ring[rx_queue_idx].desc =
1070  pci_alloc_consistent(rtlpci->pdev,
1071  sizeof(*rtlpci->rx_ring[rx_queue_idx].
1072  desc) * rtlpci->rxringcount,
1073  &rtlpci->rx_ring[rx_queue_idx].dma);
1074 
1075  if (!rtlpci->rx_ring[rx_queue_idx].desc ||
1076  (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
1077  RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1078  "Cannot allocate RX ring\n");
1079  return -ENOMEM;
1080  }
1081 
1082  memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
1083  sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
1084  rtlpci->rxringcount);
1085 
1086  rtlpci->rx_ring[rx_queue_idx].idx = 0;
1087 
1088  /* If amsdu_8k is disabled, set buffersize to 4096. This
1089  * change will reduce memory fragmentation.
1090  */
1091  if (rtlpci->rxbuffersize > 4096 &&
1092  rtlpriv->rtlhal.disable_amsdu_8k)
1093  rtlpci->rxbuffersize = 4096;
1094 
1095  for (i = 0; i < rtlpci->rxringcount; i++) {
1096  struct sk_buff *skb =
1097  dev_alloc_skb(rtlpci->rxbuffersize);
1098  u32 bufferaddress;
1099  if (!skb)
1100  return 0;
1101  kmemleak_not_leak(skb);
1102  entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1103 
1104  /*skb->dev = dev; */
1105 
1106  rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb;
1107 
1108  /*
1109  *just set skb->cb to mapping addr
1110  *for pci_unmap_single use
1111  */
1112  *((dma_addr_t *) skb->cb) =
1113  pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
1114  rtlpci->rxbuffersize,
1116 
1117  bufferaddress = (*((dma_addr_t *)skb->cb));
1118  rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1120  (u8 *)&bufferaddress);
1121  rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1123  (u8 *)&rtlpci->
1124  rxbuffersize);
1125  rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1126  HW_DESC_RXOWN,
1127  &tmp_one);
1128  }
1129 
1130  rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1131  HW_DESC_RXERO, &tmp_one);
1132  }
1133  return 0;
1134 }
1135 
1136 static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1137  unsigned int prio)
1138 {
1139  struct rtl_priv *rtlpriv = rtl_priv(hw);
1140  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1141  struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
1142 
1143  while (skb_queue_len(&ring->queue)) {
1144  struct rtl_tx_desc *entry = &ring->desc[ring->idx];
1145  struct sk_buff *skb = __skb_dequeue(&ring->queue);
1146 
1147  pci_unmap_single(rtlpci->pdev,
1148  rtlpriv->cfg->
1149  ops->get_desc((u8 *) entry, true,
1151  skb->len, PCI_DMA_TODEVICE);
1152  kfree_skb(skb);
1153  ring->idx = (ring->idx + 1) % ring->entries;
1154  }
1155 
1156  if (ring->desc) {
1157  pci_free_consistent(rtlpci->pdev,
1158  sizeof(*ring->desc) * ring->entries,
1159  ring->desc, ring->dma);
1160  ring->desc = NULL;
1161  }
1162 }
1163 
1164 static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
1165 {
1166  int i, rx_queue_idx;
1167 
1168  /*rx_queue_idx 0:RX_MPDU_QUEUE */
1169  /*rx_queue_idx 1:RX_CMD_QUEUE */
1170  for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1171  rx_queue_idx++) {
1172  for (i = 0; i < rtlpci->rxringcount; i++) {
1173  struct sk_buff *skb =
1174  rtlpci->rx_ring[rx_queue_idx].rx_buf[i];
1175  if (!skb)
1176  continue;
1177 
1178  pci_unmap_single(rtlpci->pdev,
1179  *((dma_addr_t *) skb->cb),
1180  rtlpci->rxbuffersize,
1182  kfree_skb(skb);
1183  }
1184 
1185  if (rtlpci->rx_ring[rx_queue_idx].desc) {
1186  pci_free_consistent(rtlpci->pdev,
1187  sizeof(*rtlpci->rx_ring[rx_queue_idx].
1188  desc) * rtlpci->rxringcount,
1189  rtlpci->rx_ring[rx_queue_idx].desc,
1190  rtlpci->rx_ring[rx_queue_idx].dma);
1191  rtlpci->rx_ring[rx_queue_idx].desc = NULL;
1192  }
1193  }
1194 }
1195 
1196 static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
1197 {
1198  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1199  int ret;
1200  int i;
1201 
1202  ret = _rtl_pci_init_rx_ring(hw);
1203  if (ret)
1204  return ret;
1205 
1206  for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1207  ret = _rtl_pci_init_tx_ring(hw, i,
1208  rtlpci->txringcount[i]);
1209  if (ret)
1210  goto err_free_rings;
1211  }
1212 
1213  return 0;
1214 
1215 err_free_rings:
1216  _rtl_pci_free_rx_ring(rtlpci);
1217 
1218  for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1219  if (rtlpci->tx_ring[i].desc)
1220  _rtl_pci_free_tx_ring(hw, i);
1221 
1222  return 1;
1223 }
1224 
1225 static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
1226 {
1227  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1228  u32 i;
1229 
1230  /*free rx rings */
1231  _rtl_pci_free_rx_ring(rtlpci);
1232 
1233  /*free tx rings */
1234  for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1235  _rtl_pci_free_tx_ring(hw, i);
1236 
1237  return 0;
1238 }
1239 
1241 {
1242  struct rtl_priv *rtlpriv = rtl_priv(hw);
1243  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1244  int i, rx_queue_idx;
1245  unsigned long flags;
1246  u8 tmp_one = 1;
1247 
1248  /*rx_queue_idx 0:RX_MPDU_QUEUE */
1249  /*rx_queue_idx 1:RX_CMD_QUEUE */
1250  for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1251  rx_queue_idx++) {
1252  /*
1253  *force the rx_ring[RX_MPDU_QUEUE/
1254  *RX_CMD_QUEUE].idx to the first one
1255  */
1256  if (rtlpci->rx_ring[rx_queue_idx].desc) {
1257  struct rtl_rx_desc *entry = NULL;
1258 
1259  for (i = 0; i < rtlpci->rxringcount; i++) {
1260  entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1261  rtlpriv->cfg->ops->set_desc((u8 *) entry,
1262  false,
1263  HW_DESC_RXOWN,
1264  &tmp_one);
1265  }
1266  rtlpci->rx_ring[rx_queue_idx].idx = 0;
1267  }
1268  }
1269 
1270  /*
1271  *after reset, release previous pending packet,
1272  *and force the tx idx to the first one
1273  */
1274  for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1275  if (rtlpci->tx_ring[i].desc) {
1276  struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1277 
1278  while (skb_queue_len(&ring->queue)) {
1279  struct rtl_tx_desc *entry;
1280  struct sk_buff *skb;
1281 
1282  spin_lock_irqsave(&rtlpriv->locks.irq_th_lock,
1283  flags);
1284  entry = &ring->desc[ring->idx];
1285  skb = __skb_dequeue(&ring->queue);
1286  pci_unmap_single(rtlpci->pdev,
1287  rtlpriv->cfg->ops->
1288  get_desc((u8 *)
1289  entry,
1290  true,
1292  skb->len, PCI_DMA_TODEVICE);
1293  ring->idx = (ring->idx + 1) % ring->entries;
1294  spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
1295  flags);
1296  kfree_skb(skb);
1297  }
1298  ring->idx = 0;
1299  }
1300  }
1301 
1302  return 0;
1303 }
1304 
1305 static bool rtl_pci_tx_chk_waitq_insert(struct ieee80211_hw *hw,
1306  struct ieee80211_sta *sta,
1307  struct sk_buff *skb)
1308 {
1309  struct rtl_priv *rtlpriv = rtl_priv(hw);
1310  struct rtl_sta_info *sta_entry = NULL;
1311  u8 tid = rtl_get_tid(skb);
1312 
1313  if (!sta)
1314  return false;
1315  sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1316 
1317  if (!rtlpriv->rtlhal.earlymode_enable)
1318  return false;
1319  if (sta_entry->tids[tid].agg.agg_state != RTL_AGG_OPERATIONAL)
1320  return false;
1321  if (_rtl_mac_to_hwqueue(hw, skb) > VO_QUEUE)
1322  return false;
1323  if (tid > 7)
1324  return false;
1325 
1326  /* maybe every tid should be checked */
1327  if (!rtlpriv->link_info.higher_busytxtraffic[tid])
1328  return false;
1329 
1330  spin_lock_bh(&rtlpriv->locks.waitq_lock);
1331  skb_queue_tail(&rtlpriv->mac80211.skb_waitq[tid], skb);
1332  spin_unlock_bh(&rtlpriv->locks.waitq_lock);
1333 
1334  return true;
1335 }
1336 
1337 static int rtl_pci_tx(struct ieee80211_hw *hw,
1338  struct ieee80211_sta *sta,
1339  struct sk_buff *skb,
1340  struct rtl_tcb_desc *ptcb_desc)
1341 {
1342  struct rtl_priv *rtlpriv = rtl_priv(hw);
1343  struct rtl_sta_info *sta_entry = NULL;
1344  struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1345  struct rtl8192_tx_ring *ring;
1346  struct rtl_tx_desc *pdesc;
1347  u8 idx;
1348  u8 hw_queue = _rtl_mac_to_hwqueue(hw, skb);
1349  unsigned long flags;
1350  struct ieee80211_hdr *hdr = rtl_get_hdr(skb);
1351  __le16 fc = rtl_get_fc(skb);
1352  u8 *pda_addr = hdr->addr1;
1353  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1354  /*ssn */
1355  u8 tid = 0;
1356  u16 seq_number = 0;
1357  u8 own;
1358  u8 temp_one = 1;
1359 
1360  if (ieee80211_is_auth(fc)) {
1361  RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, "MAC80211_LINKING\n");
1362  rtl_ips_nic_on(hw);
1363  }
1364 
1365  if (rtlpriv->psc.sw_ps_enabled) {
1366  if (ieee80211_is_data(fc) && !ieee80211_is_nullfunc(fc) &&
1367  !ieee80211_has_pm(fc))
1369  }
1370 
1371  rtl_action_proc(hw, skb, true);
1372 
1373  if (is_multicast_ether_addr(pda_addr))
1374  rtlpriv->stats.txbytesmulticast += skb->len;
1375  else if (is_broadcast_ether_addr(pda_addr))
1376  rtlpriv->stats.txbytesbroadcast += skb->len;
1377  else
1378  rtlpriv->stats.txbytesunicast += skb->len;
1379 
1380  spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1381  ring = &rtlpci->tx_ring[hw_queue];
1382  if (hw_queue != BEACON_QUEUE)
1383  idx = (ring->idx + skb_queue_len(&ring->queue)) %
1384  ring->entries;
1385  else
1386  idx = 0;
1387 
1388  pdesc = &ring->desc[idx];
1389  own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
1390  true, HW_DESC_OWN);
1391 
1392  if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
1393  RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1394  "No more TX desc@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1395  hw_queue, ring->idx, idx,
1396  skb_queue_len(&ring->queue));
1397 
1398  spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1399  return skb->len;
1400  }
1401 
1402  if (ieee80211_is_data_qos(fc)) {
1403  tid = rtl_get_tid(skb);
1404  if (sta) {
1405  sta_entry = (struct rtl_sta_info *)sta->drv_priv;
1406  seq_number = (le16_to_cpu(hdr->seq_ctrl) &
1407  IEEE80211_SCTL_SEQ) >> 4;
1408  seq_number += 1;
1409 
1410  if (!ieee80211_has_morefrags(hdr->frame_control))
1411  sta_entry->tids[tid].seq_number = seq_number;
1412  }
1413  }
1414 
1415  if (ieee80211_is_data(fc))
1416  rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1417 
1418  rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *)pdesc,
1419  info, sta, skb, hw_queue, ptcb_desc);
1420 
1421  __skb_queue_tail(&ring->queue, skb);
1422 
1423  rtlpriv->cfg->ops->set_desc((u8 *)pdesc, true,
1424  HW_DESC_OWN, &temp_one);
1425 
1426 
1427  if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1428  hw_queue != BEACON_QUEUE) {
1429 
1430  RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
1431  "less desc left, stop skb_queue@%d, ring->idx = %d, idx = %d, skb_queue_len = 0x%d\n",
1432  hw_queue, ring->idx, idx,
1433  skb_queue_len(&ring->queue));
1434 
1435  ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
1436  }
1437 
1438  spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1439 
1440  rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
1441 
1442  return 0;
1443 }
1444 
1445 static void rtl_pci_flush(struct ieee80211_hw *hw, bool drop)
1446 {
1447  struct rtl_priv *rtlpriv = rtl_priv(hw);
1448  struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1449  struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1450  u16 i = 0;
1451  int queue_id;
1452  struct rtl8192_tx_ring *ring;
1453 
1454  for (queue_id = RTL_PCI_MAX_TX_QUEUE_COUNT - 1; queue_id >= 0;) {
1455  u32 queue_len;
1456  ring = &pcipriv->dev.tx_ring[queue_id];
1457  queue_len = skb_queue_len(&ring->queue);
1458  if (queue_len == 0 || queue_id == BEACON_QUEUE ||
1459  queue_id == TXCMD_QUEUE) {
1460  queue_id--;
1461  continue;
1462  } else {
1463  msleep(20);
1464  i++;
1465  }
1466 
1467  /* we just wait 1s for all queues */
1468  if (rtlpriv->psc.rfpwr_state == ERFOFF ||
1469  is_hal_stop(rtlhal) || i >= 200)
1470  return;
1471  }
1472 }
1473 
1474 static void rtl_pci_deinit(struct ieee80211_hw *hw)
1475 {
1476  struct rtl_priv *rtlpriv = rtl_priv(hw);
1477  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1478 
1479  _rtl_pci_deinit_trx_ring(hw);
1480 
1481  synchronize_irq(rtlpci->pdev->irq);
1482  tasklet_kill(&rtlpriv->works.irq_tasklet);
1483  cancel_work_sync(&rtlpriv->works.lps_leave_work);
1484 
1485  flush_workqueue(rtlpriv->works.rtl_wq);
1486  destroy_workqueue(rtlpriv->works.rtl_wq);
1487 
1488 }
1489 
1490 static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1491 {
1492  struct rtl_priv *rtlpriv = rtl_priv(hw);
1493  int err;
1494 
1495  _rtl_pci_init_struct(hw, pdev);
1496 
1497  err = _rtl_pci_init_trx_ring(hw);
1498  if (err) {
1499  RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1500  "tx ring initialization failed\n");
1501  return err;
1502  }
1503 
1504  return 0;
1505 }
1506 
1507 static int rtl_pci_start(struct ieee80211_hw *hw)
1508 {
1509  struct rtl_priv *rtlpriv = rtl_priv(hw);
1510  struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1511  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1512  struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1513 
1514  int err;
1515 
1517 
1518  rtlpci->driver_is_goingto_unload = false;
1519  err = rtlpriv->cfg->ops->hw_init(hw);
1520  if (err) {
1521  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1522  "Failed to config hardware!\n");
1523  return err;
1524  }
1525 
1526  rtlpriv->cfg->ops->enable_interrupt(hw);
1527  RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, "enable_interrupt OK\n");
1528 
1529  rtl_init_rx_config(hw);
1530 
1531  /*should be after adapter start and interrupt enable. */
1532  set_hal_start(rtlhal);
1533 
1535 
1536  rtlpci->up_first_time = false;
1537 
1538  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, "OK\n");
1539  return 0;
1540 }
1541 
1542 static void rtl_pci_stop(struct ieee80211_hw *hw)
1543 {
1544  struct rtl_priv *rtlpriv = rtl_priv(hw);
1545  struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1546  struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1547  struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1548  unsigned long flags;
1549  u8 RFInProgressTimeOut = 0;
1550 
1551  /*
1552  *should be before disable interrupt&adapter
1553  *and will do it immediately.
1554  */
1555  set_hal_stop(rtlhal);
1556 
1557  rtlpriv->cfg->ops->disable_interrupt(hw);
1558  cancel_work_sync(&rtlpriv->works.lps_leave_work);
1559 
1560  spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1561  while (ppsc->rfchange_inprogress) {
1562  spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1563  if (RFInProgressTimeOut > 100) {
1564  spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1565  break;
1566  }
1567  mdelay(1);
1568  RFInProgressTimeOut++;
1569  spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1570  }
1571  ppsc->rfchange_inprogress = true;
1572  spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1573 
1574  rtlpci->driver_is_goingto_unload = true;
1575  rtlpriv->cfg->ops->hw_disable(hw);
1576  /* some things are not needed if firmware not available */
1577  if (!rtlpriv->max_fw_size)
1578  return;
1579  rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1580 
1581  spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1582  ppsc->rfchange_inprogress = false;
1583  spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1584 
1585  rtl_pci_enable_aspm(hw);
1586 }
1587 
1588 static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1589  struct ieee80211_hw *hw)
1590 {
1591  struct rtl_priv *rtlpriv = rtl_priv(hw);
1592  struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1593  struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1594  struct pci_dev *bridge_pdev = pdev->bus->self;
1595  u16 venderid;
1596  u16 deviceid;
1597  u8 revisionid;
1598  u16 irqline;
1599  u8 tmp;
1600 
1601  pcipriv->ndis_adapter.pcibridge_vendor = PCI_BRIDGE_VENDOR_UNKNOWN;
1602  venderid = pdev->vendor;
1603  deviceid = pdev->device;
1604  pci_read_config_byte(pdev, 0x8, &revisionid);
1605  pci_read_config_word(pdev, 0x3C, &irqline);
1606 
1607  /* PCI ID 0x10ec:0x8192 occurs for both RTL8192E, which uses
1608  * r8192e_pci, and RTL8192SE, which uses this driver. If the
1609  * revision ID is RTL_PCI_REVISION_ID_8192PCIE (0x01), then
1610  * the correct driver is r8192e_pci, thus this routine should
1611  * return false.
1612  */
1613  if (deviceid == RTL_PCI_8192SE_DID &&
1614  revisionid == RTL_PCI_REVISION_ID_8192PCIE)
1615  return false;
1616 
1617  if (deviceid == RTL_PCI_8192_DID ||
1618  deviceid == RTL_PCI_0044_DID ||
1619  deviceid == RTL_PCI_0047_DID ||
1620  deviceid == RTL_PCI_8192SE_DID ||
1621  deviceid == RTL_PCI_8174_DID ||
1622  deviceid == RTL_PCI_8173_DID ||
1623  deviceid == RTL_PCI_8172_DID ||
1624  deviceid == RTL_PCI_8171_DID) {
1625  switch (revisionid) {
1627  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1628  "8192 PCI-E is found - vid/did=%x/%x\n",
1629  venderid, deviceid);
1630  rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
1631  break;
1633  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1634  "8192SE is found - vid/did=%x/%x\n",
1635  venderid, deviceid);
1636  rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1637  break;
1638  default:
1639  RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1640  "Err: Unknown device - vid/did=%x/%x\n",
1641  venderid, deviceid);
1642  rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1643  break;
1644 
1645  }
1646  } else if (deviceid == RTL_PCI_8192CET_DID ||
1647  deviceid == RTL_PCI_8192CE_DID ||
1648  deviceid == RTL_PCI_8191CE_DID ||
1649  deviceid == RTL_PCI_8188CE_DID) {
1650  rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
1651  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1652  "8192C PCI-E is found - vid/did=%x/%x\n",
1653  venderid, deviceid);
1654  } else if (deviceid == RTL_PCI_8192DE_DID ||
1655  deviceid == RTL_PCI_8192DE_DID2) {
1656  rtlhal->hw_type = HARDWARE_TYPE_RTL8192DE;
1657  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1658  "8192D PCI-E is found - vid/did=%x/%x\n",
1659  venderid, deviceid);
1660  } else {
1661  RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1662  "Err: Unknown device - vid/did=%x/%x\n",
1663  venderid, deviceid);
1664 
1666  }
1667 
1668  if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE) {
1669  if (revisionid == 0 || revisionid == 1) {
1670  if (revisionid == 0) {
1671  RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1672  "Find 92DE MAC0\n");
1673  rtlhal->interfaceindex = 0;
1674  } else if (revisionid == 1) {
1675  RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1676  "Find 92DE MAC1\n");
1677  rtlhal->interfaceindex = 1;
1678  }
1679  } else {
1680  RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1681  "Unknown device - VendorID/DeviceID=%x/%x, Revision=%x\n",
1682  venderid, deviceid, revisionid);
1683  rtlhal->interfaceindex = 0;
1684  }
1685  }
1686  /*find bus info */
1687  pcipriv->ndis_adapter.busnumber = pdev->bus->number;
1688  pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1689  pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1690 
1691  if (bridge_pdev) {
1692  /*find bridge info if available */
1693  pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
1694  for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
1695  if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
1696  pcipriv->ndis_adapter.pcibridge_vendor = tmp;
1697  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1698  "Pci Bridge Vendor is found index: %d\n",
1699  tmp);
1700  break;
1701  }
1702  }
1703  }
1704 
1705  if (pcipriv->ndis_adapter.pcibridge_vendor !=
1707  pcipriv->ndis_adapter.pcibridge_busnum =
1708  bridge_pdev->bus->number;
1709  pcipriv->ndis_adapter.pcibridge_devnum =
1710  PCI_SLOT(bridge_pdev->devfn);
1711  pcipriv->ndis_adapter.pcibridge_funcnum =
1712  PCI_FUNC(bridge_pdev->devfn);
1713  pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
1714  pci_pcie_cap(bridge_pdev);
1715  pcipriv->ndis_adapter.num4bytes =
1716  (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
1717 
1718  rtl_pci_get_linkcontrol_field(hw);
1719 
1720  if (pcipriv->ndis_adapter.pcibridge_vendor ==
1722  pcipriv->ndis_adapter.amd_l1_patch =
1723  rtl_pci_get_amd_l1_patch(hw);
1724  }
1725  }
1726 
1727  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1728  "pcidev busnumber:devnumber:funcnumber:vendor:link_ctl %d:%d:%d:%x:%x\n",
1729  pcipriv->ndis_adapter.busnumber,
1730  pcipriv->ndis_adapter.devnumber,
1731  pcipriv->ndis_adapter.funcnumber,
1732  pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg);
1733 
1734  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1735  "pci_bridge busnumber:devnumber:funcnumber:vendor:pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
1736  pcipriv->ndis_adapter.pcibridge_busnum,
1737  pcipriv->ndis_adapter.pcibridge_devnum,
1738  pcipriv->ndis_adapter.pcibridge_funcnum,
1739  pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
1740  pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
1741  pcipriv->ndis_adapter.pcibridge_linkctrlreg,
1742  pcipriv->ndis_adapter.amd_l1_patch);
1743 
1744  rtl_pci_parse_configuration(pdev, hw);
1745 
1746  return true;
1747 }
1748 
1750  const struct pci_device_id *id)
1751 {
1752  struct ieee80211_hw *hw = NULL;
1753 
1754  struct rtl_priv *rtlpriv = NULL;
1755  struct rtl_pci_priv *pcipriv = NULL;
1756  struct rtl_pci *rtlpci;
1757  unsigned long pmem_start, pmem_len, pmem_flags;
1758  int err;
1759 
1760  err = pci_enable_device(pdev);
1761  if (err) {
1762  RT_ASSERT(false, "%s : Cannot enable new PCI device\n",
1763  pci_name(pdev));
1764  return err;
1765  }
1766 
1767  if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
1768  if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1769  RT_ASSERT(false,
1770  "Unable to obtain 32bit DMA for consistent allocations\n");
1771  err = -ENOMEM;
1772  goto fail1;
1773  }
1774  }
1775 
1776  pci_set_master(pdev);
1777 
1778  hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
1779  sizeof(struct rtl_priv), &rtl_ops);
1780  if (!hw) {
1781  RT_ASSERT(false,
1782  "%s : ieee80211 alloc failed\n", pci_name(pdev));
1783  err = -ENOMEM;
1784  goto fail1;
1785  }
1786 
1787  SET_IEEE80211_DEV(hw, &pdev->dev);
1788  pci_set_drvdata(pdev, hw);
1789 
1790  rtlpriv = hw->priv;
1791  pcipriv = (void *)rtlpriv->priv;
1792  pcipriv->dev.pdev = pdev;
1793  init_completion(&rtlpriv->firmware_loading_complete);
1794 
1795  /* init cfg & intf_ops */
1796  rtlpriv->rtlhal.interface = INTF_PCI;
1797  rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1798  rtlpriv->intf_ops = &rtl_pci_ops;
1799 
1800  /*
1801  *init dbgp flags before all
1802  *other functions, because we will
1803  *use it in other funtions like
1804  *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
1805  *you can not use these macro
1806  *before this
1807  */
1808  rtl_dbgp_flag_init(hw);
1809 
1810  /* MEM map */
1811  err = pci_request_regions(pdev, KBUILD_MODNAME);
1812  if (err) {
1813  RT_ASSERT(false, "Can't obtain PCI resources\n");
1814  goto fail1;
1815  }
1816 
1817  pmem_start = pci_resource_start(pdev, rtlpriv->cfg->bar_id);
1818  pmem_len = pci_resource_len(pdev, rtlpriv->cfg->bar_id);
1819  pmem_flags = pci_resource_flags(pdev, rtlpriv->cfg->bar_id);
1820 
1821  /*shared mem start */
1822  rtlpriv->io.pci_mem_start =
1823  (unsigned long)pci_iomap(pdev,
1824  rtlpriv->cfg->bar_id, pmem_len);
1825  if (rtlpriv->io.pci_mem_start == 0) {
1826  RT_ASSERT(false, "Can't map PCI mem\n");
1827  err = -ENOMEM;
1828  goto fail2;
1829  }
1830 
1831  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1832  "mem mapped space: start: 0x%08lx len:%08lx flags:%08lx, after map:0x%08lx\n",
1833  pmem_start, pmem_len, pmem_flags,
1834  rtlpriv->io.pci_mem_start);
1835 
1836  /* Disable Clk Request */
1837  pci_write_config_byte(pdev, 0x81, 0);
1838  /* leave D3 mode */
1839  pci_write_config_byte(pdev, 0x44, 0);
1840  pci_write_config_byte(pdev, 0x04, 0x06);
1841  pci_write_config_byte(pdev, 0x04, 0x07);
1842 
1843  /* find adapter */
1844  if (!_rtl_pci_find_adapter(pdev, hw)) {
1845  err = -ENODEV;
1846  goto fail3;
1847  }
1848 
1849  /* Init IO handler */
1850  _rtl_pci_io_handler_init(&pdev->dev, hw);
1851 
1852  /*like read eeprom and so on */
1853  rtlpriv->cfg->ops->read_eeprom_info(hw);
1854 
1855  /*aspm */
1856  rtl_pci_init_aspm(hw);
1857 
1858  /* Init mac80211 sw */
1859  err = rtl_init_core(hw);
1860  if (err) {
1861  RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1862  "Can't allocate sw for mac80211\n");
1863  goto fail3;
1864  }
1865 
1866  /* Init PCI sw */
1867  err = rtl_pci_init(hw, pdev);
1868  if (err) {
1869  RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Failed to init PCI\n");
1870  goto fail3;
1871  }
1872 
1873  if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
1874  RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
1875  err = -ENODEV;
1876  goto fail3;
1877  }
1878 
1879  rtlpriv->cfg->ops->init_sw_leds(hw);
1880 
1881  err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
1882  if (err) {
1883  RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1884  "failed to create sysfs device attributes\n");
1885  goto fail3;
1886  }
1887 
1888  rtlpci = rtl_pcidev(pcipriv);
1889  err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
1890  IRQF_SHARED, KBUILD_MODNAME, hw);
1891  if (err) {
1892  RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1893  "%s: failed to register IRQ handler\n",
1894  wiphy_name(hw->wiphy));
1895  goto fail3;
1896  }
1897  rtlpci->irq_alloc = 1;
1898 
1899  return 0;
1900 
1901 fail3:
1902  rtl_deinit_core(hw);
1903  _rtl_pci_io_handler_release(hw);
1904 
1905  if (rtlpriv->io.pci_mem_start != 0)
1906  pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
1907 
1908 fail2:
1909  pci_release_regions(pdev);
1911 
1912 fail1:
1913  if (hw)
1914  ieee80211_free_hw(hw);
1915  pci_set_drvdata(pdev, NULL);
1916  pci_disable_device(pdev);
1917 
1918  return err;
1919 
1920 }
1922 
1923 void rtl_pci_disconnect(struct pci_dev *pdev)
1924 {
1925  struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1926  struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1927  struct rtl_priv *rtlpriv = rtl_priv(hw);
1928  struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
1929  struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
1930 
1931  /* just in case driver is removed before firmware callback */
1934 
1936 
1937  /*ieee80211_unregister_hw will call ops_stop */
1938  if (rtlmac->mac80211_registered == 1) {
1940  rtlmac->mac80211_registered = 0;
1941  } else {
1943  rtlpriv->intf_ops->adapter_stop(hw);
1944  }
1945  rtlpriv->cfg->ops->disable_interrupt(hw);
1946 
1947  /*deinit rfkill */
1948  rtl_deinit_rfkill(hw);
1949 
1950  rtl_pci_deinit(hw);
1951  rtl_deinit_core(hw);
1952  _rtl_pci_io_handler_release(hw);
1953  rtlpriv->cfg->ops->deinit_sw_vars(hw);
1954 
1955  if (rtlpci->irq_alloc) {
1956  free_irq(rtlpci->pdev->irq, hw);
1957  rtlpci->irq_alloc = 0;
1958  }
1959 
1960  if (rtlpriv->io.pci_mem_start != 0) {
1961  pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
1962  pci_release_regions(pdev);
1963  }
1964 
1965  pci_disable_device(pdev);
1966 
1967  rtl_pci_disable_aspm(hw);
1968 
1969  pci_set_drvdata(pdev, NULL);
1970 
1971  ieee80211_free_hw(hw);
1972 }
1974 
1975 /***************************************
1976 kernel pci power state define:
1977 PCI_D0 ((pci_power_t __force) 0)
1978 PCI_D1 ((pci_power_t __force) 1)
1979 PCI_D2 ((pci_power_t __force) 2)
1980 PCI_D3hot ((pci_power_t __force) 3)
1981 PCI_D3cold ((pci_power_t __force) 4)
1982 PCI_UNKNOWN ((pci_power_t __force) 5)
1983 
1984 This function is called when system
1985 goes into suspend state mac80211 will
1986 call rtl_mac_stop() from the mac80211
1987 suspend function first, So there is
1988 no need to call hw_disable here.
1989 ****************************************/
1991 {
1992  struct pci_dev *pdev = to_pci_dev(dev);
1993  struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1994  struct rtl_priv *rtlpriv = rtl_priv(hw);
1995 
1996  rtlpriv->cfg->ops->hw_suspend(hw);
1997  rtl_deinit_rfkill(hw);
1998 
1999  return 0;
2000 }
2002 
2004 {
2005  struct pci_dev *pdev = to_pci_dev(dev);
2006  struct ieee80211_hw *hw = pci_get_drvdata(pdev);
2007  struct rtl_priv *rtlpriv = rtl_priv(hw);
2008 
2009  rtlpriv->cfg->ops->hw_resume(hw);
2010  rtl_init_rfkill(hw);
2011  return 0;
2012 }
2014 
2016  .read_efuse_byte = read_efuse_byte,
2017  .adapter_start = rtl_pci_start,
2018  .adapter_stop = rtl_pci_stop,
2019  .adapter_tx = rtl_pci_tx,
2020  .flush = rtl_pci_flush,
2021  .reset_trx_ring = rtl_pci_reset_trx_ring,
2022  .waitq_insert = rtl_pci_tx_chk_waitq_insert,
2023 
2024  .disable_aspm = rtl_pci_disable_aspm,
2025  .enable_aspm = rtl_pci_enable_aspm,
2026 };