Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
trans.c
Go to the documentation of this file.
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license. When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22  * USA
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called LICENSE.GPL.
26  *
27  * Contact Information:
28  * Intel Linux Wireless <[email protected]>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  *
40  * * Redistributions of source code must retain the above copyright
41  * notice, this list of conditions and the following disclaimer.
42  * * Redistributions in binary form must reproduce the above copyright
43  * notice, this list of conditions and the following disclaimer in
44  * the documentation and/or other materials provided with the
45  * distribution.
46  * * Neither the name Intel Corporation nor the names of its
47  * contributors may be used to endorse or promote products derived
48  * from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  *
62  *****************************************************************************/
63 #include <linux/pci.h>
64 #include <linux/pci-aspm.h>
65 #include <linux/interrupt.h>
66 #include <linux/debugfs.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
69 #include <linux/gfp.h>
70 
71 #include "iwl-drv.h"
72 #include "iwl-trans.h"
73 #include "iwl-csr.h"
74 #include "iwl-prph.h"
75 #include "iwl-agn-hw.h"
76 #include "internal.h"
77 /* FIXME: need to abstract out TX command (once we know what it looks like) */
78 #include "dvm/commands.h"
79 
80 #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
81  (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
82  (~(1<<(trans_pcie)->cmd_queue)))
83 
84 static int iwl_trans_rx_alloc(struct iwl_trans *trans)
85 {
86  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
87  struct iwl_rx_queue *rxq = &trans_pcie->rxq;
88  struct device *dev = trans->dev;
89 
90  memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
91 
92  spin_lock_init(&rxq->lock);
93 
94  if (WARN_ON(rxq->bd || rxq->rb_stts))
95  return -EINVAL;
96 
97  /* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
98  rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
99  &rxq->bd_dma, GFP_KERNEL);
100  if (!rxq->bd)
101  goto err_bd;
102 
103  /*Allocate the driver's pointer to receive buffer status */
104  rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
105  &rxq->rb_stts_dma, GFP_KERNEL);
106  if (!rxq->rb_stts)
107  goto err_rb_stts;
108 
109  return 0;
110 
111 err_rb_stts:
112  dma_free_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
113  rxq->bd, rxq->bd_dma);
114  memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
115  rxq->bd = NULL;
116 err_bd:
117  return -ENOMEM;
118 }
119 
120 static void iwl_trans_rxq_free_rx_bufs(struct iwl_trans *trans)
121 {
122  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
123  struct iwl_rx_queue *rxq = &trans_pcie->rxq;
124  int i;
125 
126  /* Fill the rx_used queue with _all_ of the Rx buffers */
127  for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
128  /* In the reset function, these buffers may have been allocated
129  * to an SKB, so we need to unmap and free potential storage */
130  if (rxq->pool[i].page != NULL) {
131  dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
132  PAGE_SIZE << trans_pcie->rx_page_order,
134  __free_pages(rxq->pool[i].page,
135  trans_pcie->rx_page_order);
136  rxq->pool[i].page = NULL;
137  }
138  list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
139  }
140 }
141 
142 static void iwl_trans_rx_hw_init(struct iwl_trans *trans,
143  struct iwl_rx_queue *rxq)
144 {
145  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
146  u32 rb_size;
147  const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
148  u32 rb_timeout = RX_RB_TIMEOUT; /* FIXME: RX_RB_TIMEOUT for all devices? */
149 
150  if (trans_pcie->rx_buf_size_8k)
152  else
154 
155  /* Stop Rx DMA */
157 
158  /* Reset driver's Rx queue write index */
160 
161  /* Tell device where to find RBD circular buffer in DRAM */
163  (u32)(rxq->bd_dma >> 8));
164 
165  /* Tell device where in DRAM to update its Rx status */
167  rxq->rb_stts_dma >> 4);
168 
169  /* Enable Rx DMA
170  * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
171  * the credit mechanism in 5000 HW RX FIFO
172  * Direct rx interrupts to hosts
173  * Rx buffer size 4 or 8k
174  * RB timeout 0x10
175  * 256 RBDs
176  */
181  rb_size|
182  (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
183  (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
184 
185  /* Set interrupt coalescing timer to default (2048 usecs) */
186  iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
187 }
188 
189 static int iwl_rx_init(struct iwl_trans *trans)
190 {
191  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
192  struct iwl_rx_queue *rxq = &trans_pcie->rxq;
193 
194  int i, err;
195  unsigned long flags;
196 
197  if (!rxq->bd) {
198  err = iwl_trans_rx_alloc(trans);
199  if (err)
200  return err;
201  }
202 
203  spin_lock_irqsave(&rxq->lock, flags);
204  INIT_LIST_HEAD(&rxq->rx_free);
205  INIT_LIST_HEAD(&rxq->rx_used);
206 
207  iwl_trans_rxq_free_rx_bufs(trans);
208 
209  for (i = 0; i < RX_QUEUE_SIZE; i++)
210  rxq->queue[i] = NULL;
211 
212  /* Set us so that we have processed and used all buffers, but have
213  * not restocked the Rx queue with fresh buffers */
214  rxq->read = rxq->write = 0;
215  rxq->write_actual = 0;
216  rxq->free_count = 0;
217  spin_unlock_irqrestore(&rxq->lock, flags);
218 
219  iwl_rx_replenish(trans);
220 
221  iwl_trans_rx_hw_init(trans, rxq);
222 
223  spin_lock_irqsave(&trans_pcie->irq_lock, flags);
224  rxq->need_update = 1;
225  iwl_rx_queue_update_write_ptr(trans, rxq);
226  spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
227 
228  return 0;
229 }
230 
231 static void iwl_trans_pcie_rx_free(struct iwl_trans *trans)
232 {
233  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
234  struct iwl_rx_queue *rxq = &trans_pcie->rxq;
235  unsigned long flags;
236 
237  /*if rxq->bd is NULL, it means that nothing has been allocated,
238  * exit now */
239  if (!rxq->bd) {
240  IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
241  return;
242  }
243 
244  spin_lock_irqsave(&rxq->lock, flags);
245  iwl_trans_rxq_free_rx_bufs(trans);
246  spin_unlock_irqrestore(&rxq->lock, flags);
247 
248  dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
249  rxq->bd, rxq->bd_dma);
250  memset(&rxq->bd_dma, 0, sizeof(rxq->bd_dma));
251  rxq->bd = NULL;
252 
253  if (rxq->rb_stts)
254  dma_free_coherent(trans->dev,
255  sizeof(struct iwl_rb_status),
256  rxq->rb_stts, rxq->rb_stts_dma);
257  else
258  IWL_DEBUG_INFO(trans, "Free rxq->rb_stts which is NULL\n");
259  memset(&rxq->rb_stts_dma, 0, sizeof(rxq->rb_stts_dma));
260  rxq->rb_stts = NULL;
261 }
262 
263 static int iwl_trans_rx_stop(struct iwl_trans *trans)
264 {
265 
266  /* stop Rx DMA */
270 }
271 
272 static int iwlagn_alloc_dma_ptr(struct iwl_trans *trans,
273  struct iwl_dma_ptr *ptr, size_t size)
274 {
275  if (WARN_ON(ptr->addr))
276  return -EINVAL;
277 
278  ptr->addr = dma_alloc_coherent(trans->dev, size,
279  &ptr->dma, GFP_KERNEL);
280  if (!ptr->addr)
281  return -ENOMEM;
282  ptr->size = size;
283  return 0;
284 }
285 
286 static void iwlagn_free_dma_ptr(struct iwl_trans *trans,
287  struct iwl_dma_ptr *ptr)
288 {
289  if (unlikely(!ptr->addr))
290  return;
291 
292  dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
293  memset(ptr, 0, sizeof(*ptr));
294 }
295 
296 static void iwl_trans_pcie_queue_stuck_timer(unsigned long data)
297 {
298  struct iwl_tx_queue *txq = (void *)data;
299  struct iwl_queue *q = &txq->q;
300  struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
301  struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
302  u32 scd_sram_addr = trans_pcie->scd_base_addr +
303  SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id);
304  u8 buf[16];
305  int i;
306 
307  spin_lock(&txq->lock);
308  /* check if triggered erroneously */
309  if (txq->q.read_ptr == txq->q.write_ptr) {
310  spin_unlock(&txq->lock);
311  return;
312  }
313  spin_unlock(&txq->lock);
314 
315  IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id,
316  jiffies_to_msecs(trans_pcie->wd_timeout));
317  IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
318  txq->q.read_ptr, txq->q.write_ptr);
319 
320  iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
321 
322  iwl_print_hex_error(trans, buf, sizeof(buf));
323 
324  for (i = 0; i < FH_TCSR_CHNL_NUM; i++)
325  IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i,
326  iwl_read_direct32(trans, FH_TX_TRB_REG(i)));
327 
328  for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
329  u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i));
330  u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
331  bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
332  u32 tbl_dw =
333  iwl_read_targ_mem(trans,
334  trans_pcie->scd_base_addr +
336 
337  if (i & 0x1)
338  tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
339  else
340  tbl_dw = tbl_dw & 0x0000FFFF;
341 
342  IWL_ERR(trans,
343  "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
344  i, active ? "" : "in", fifo, tbl_dw,
345  iwl_read_prph(trans,
346  SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1),
347  iwl_read_prph(trans, SCD_QUEUE_WRPTR(i)));
348  }
349 
350  for (i = q->read_ptr; i != q->write_ptr;
351  i = iwl_queue_inc_wrap(i, q->n_bd)) {
352  struct iwl_tx_cmd *tx_cmd =
353  (struct iwl_tx_cmd *)txq->entries[i].cmd->payload;
354  IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
355  get_unaligned_le32(&tx_cmd->scratch));
356  }
357 
358  iwl_op_mode_nic_error(trans->op_mode);
359 }
360 
361 static int iwl_trans_txq_alloc(struct iwl_trans *trans,
362  struct iwl_tx_queue *txq, int slots_num,
363  u32 txq_id)
364 {
365  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
366  size_t tfd_sz = sizeof(struct iwl_tfd) * TFD_QUEUE_SIZE_MAX;
367  int i;
368 
369  if (WARN_ON(txq->entries || txq->tfds))
370  return -EINVAL;
371 
372  setup_timer(&txq->stuck_timer, iwl_trans_pcie_queue_stuck_timer,
373  (unsigned long)txq);
374  txq->trans_pcie = trans_pcie;
375 
376  txq->q.n_window = slots_num;
377 
378  txq->entries = kcalloc(slots_num,
379  sizeof(struct iwl_pcie_tx_queue_entry),
380  GFP_KERNEL);
381 
382  if (!txq->entries)
383  goto error;
384 
385  if (txq_id == trans_pcie->cmd_queue)
386  for (i = 0; i < slots_num; i++) {
387  txq->entries[i].cmd =
388  kmalloc(sizeof(struct iwl_device_cmd),
389  GFP_KERNEL);
390  if (!txq->entries[i].cmd)
391  goto error;
392  }
393 
394  /* Circular buffer of transmit frame descriptors (TFDs),
395  * shared with device */
396  txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
397  &txq->q.dma_addr, GFP_KERNEL);
398  if (!txq->tfds) {
399  IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
400  goto error;
401  }
402  txq->q.id = txq_id;
403 
404  return 0;
405 error:
406  if (txq->entries && txq_id == trans_pcie->cmd_queue)
407  for (i = 0; i < slots_num; i++)
408  kfree(txq->entries[i].cmd);
409  kfree(txq->entries);
410  txq->entries = NULL;
411 
412  return -ENOMEM;
413 
414 }
415 
416 static int iwl_trans_txq_init(struct iwl_trans *trans, struct iwl_tx_queue *txq,
417  int slots_num, u32 txq_id)
418 {
419  int ret;
420 
421  txq->need_update = 0;
422 
423  /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
424  * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
425  BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
426 
427  /* Initialize queue's high/low-water marks, and head/tail indexes */
428  ret = iwl_queue_init(&txq->q, TFD_QUEUE_SIZE_MAX, slots_num,
429  txq_id);
430  if (ret)
431  return ret;
432 
433  spin_lock_init(&txq->lock);
434 
435  /*
436  * Tell nic where to find circular buffer of Tx Frame Descriptors for
437  * given Tx queue, and enable the DMA channel used for that queue.
438  * Circular buffer (TFD queue in DRAM) physical base address */
439  iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(txq_id),
440  txq->q.dma_addr >> 8);
441 
442  return 0;
443 }
444 
448 static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
449 {
450  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
451  struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
452  struct iwl_queue *q = &txq->q;
453  enum dma_data_direction dma_dir;
454 
455  if (!q->n_bd)
456  return;
457 
458  /* In the command queue, all the TBs are mapped as BIDI
459  * so unmap them as such.
460  */
461  if (txq_id == trans_pcie->cmd_queue)
462  dma_dir = DMA_BIDIRECTIONAL;
463  else
464  dma_dir = DMA_TO_DEVICE;
465 
466  spin_lock_bh(&txq->lock);
467  while (q->write_ptr != q->read_ptr) {
468  iwl_txq_free_tfd(trans, txq, dma_dir);
469  q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
470  }
471  spin_unlock_bh(&txq->lock);
472 }
473 
482 static void iwl_tx_queue_free(struct iwl_trans *trans, int txq_id)
483 {
484  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
485  struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
486  struct device *dev = trans->dev;
487  int i;
488 
489  if (WARN_ON(!txq))
490  return;
491 
492  iwl_tx_queue_unmap(trans, txq_id);
493 
494  /* De-alloc array of command/tx buffers */
495  if (txq_id == trans_pcie->cmd_queue)
496  for (i = 0; i < txq->q.n_window; i++) {
497  kfree(txq->entries[i].cmd);
498  kfree(txq->entries[i].copy_cmd);
499  }
500 
501  /* De-alloc circular buffer of TFDs */
502  if (txq->q.n_bd) {
503  dma_free_coherent(dev, sizeof(struct iwl_tfd) *
504  txq->q.n_bd, txq->tfds, txq->q.dma_addr);
505  memset(&txq->q.dma_addr, 0, sizeof(txq->q.dma_addr));
506  }
507 
508  kfree(txq->entries);
509  txq->entries = NULL;
510 
512 
513  /* 0-fill queue descriptor structure */
514  memset(txq, 0, sizeof(*txq));
515 }
516 
522 static void iwl_trans_pcie_tx_free(struct iwl_trans *trans)
523 {
524  int txq_id;
525  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
526 
527  /* Tx queues */
528  if (trans_pcie->txq) {
529  for (txq_id = 0;
530  txq_id < trans->cfg->base_params->num_of_queues; txq_id++)
531  iwl_tx_queue_free(trans, txq_id);
532  }
533 
534  kfree(trans_pcie->txq);
535  trans_pcie->txq = NULL;
536 
537  iwlagn_free_dma_ptr(trans, &trans_pcie->kw);
538 
539  iwlagn_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
540 }
541 
549 static int iwl_trans_tx_alloc(struct iwl_trans *trans)
550 {
551  int ret;
552  int txq_id, slots_num;
553  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
554 
555  u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
556  sizeof(struct iwlagn_scd_bc_tbl);
557 
558  /*It is not allowed to alloc twice, so warn when this happens.
559  * We cannot rely on the previous allocation, so free and fail */
560  if (WARN_ON(trans_pcie->txq)) {
561  ret = -EINVAL;
562  goto error;
563  }
564 
565  ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
566  scd_bc_tbls_size);
567  if (ret) {
568  IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
569  goto error;
570  }
571 
572  /* Alloc keep-warm buffer */
573  ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
574  if (ret) {
575  IWL_ERR(trans, "Keep Warm allocation failed\n");
576  goto error;
577  }
578 
579  trans_pcie->txq = kcalloc(trans->cfg->base_params->num_of_queues,
580  sizeof(struct iwl_tx_queue), GFP_KERNEL);
581  if (!trans_pcie->txq) {
582  IWL_ERR(trans, "Not enough memory for txq\n");
583  ret = ENOMEM;
584  goto error;
585  }
586 
587  /* Alloc and init all Tx queues, including the command queue (#4/#9) */
588  for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
589  txq_id++) {
590  slots_num = (txq_id == trans_pcie->cmd_queue) ?
592  ret = iwl_trans_txq_alloc(trans, &trans_pcie->txq[txq_id],
593  slots_num, txq_id);
594  if (ret) {
595  IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
596  goto error;
597  }
598  }
599 
600  return 0;
601 
602 error:
603  iwl_trans_pcie_tx_free(trans);
604 
605  return ret;
606 }
607 static int iwl_tx_init(struct iwl_trans *trans)
608 {
609  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
610  int ret;
611  int txq_id, slots_num;
612  unsigned long flags;
613  bool alloc = false;
614 
615  if (!trans_pcie->txq) {
616  ret = iwl_trans_tx_alloc(trans);
617  if (ret)
618  goto error;
619  alloc = true;
620  }
621 
622  spin_lock_irqsave(&trans_pcie->irq_lock, flags);
623 
624  /* Turn off all Tx DMA fifos */
625  iwl_write_prph(trans, SCD_TXFACT, 0);
626 
627  /* Tell NIC where to find the "keep warm" buffer */
629  trans_pcie->kw.dma >> 4);
630 
631  spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
632 
633  /* Alloc and init all Tx queues, including the command queue (#4/#9) */
634  for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
635  txq_id++) {
636  slots_num = (txq_id == trans_pcie->cmd_queue) ?
638  ret = iwl_trans_txq_init(trans, &trans_pcie->txq[txq_id],
639  slots_num, txq_id);
640  if (ret) {
641  IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
642  goto error;
643  }
644  }
645 
646  return 0;
647 error:
648  /*Upon error, free only if we allocated something */
649  if (alloc)
650  iwl_trans_pcie_tx_free(trans);
651  return ret;
652 }
653 
654 static void iwl_set_pwr_vmain(struct iwl_trans *trans)
655 {
656 /*
657  * (for documentation purposes)
658  * to set power to V_AUX, do:
659 
660  if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
661  iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
662  APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
663  ~APMG_PS_CTRL_MSK_PWR_SRC);
664  */
665 
669 }
670 
671 /* PCI registers */
672 #define PCI_CFG_RETRY_TIMEOUT 0x041
673 #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
674 #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
675 
676 static u16 iwl_pciexp_link_ctrl(struct iwl_trans *trans)
677 {
678  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
679  u16 pci_lnk_ctl;
680 
682  &pci_lnk_ctl);
683  return pci_lnk_ctl;
684 }
685 
686 static void iwl_apm_config(struct iwl_trans *trans)
687 {
688  /*
689  * HW bug W/A for instability in PCIe bus L0S->L1 transition.
690  * Check if BIOS (or OS) enabled L1-ASPM on this device.
691  * If so (likely), disable L0S, so device moves directly L0->L1;
692  * costs negligible amount of power savings.
693  * If not (unlikely), enable L0S, so there is at least some
694  * power savings, even without L1.
695  */
696  u16 lctl = iwl_pciexp_link_ctrl(trans);
697 
698  if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
699  PCI_CFG_LINK_CTRL_VAL_L1_EN) {
700  /* L1-ASPM enabled; disable(!) L0S */
702  dev_printk(KERN_INFO, trans->dev,
703  "L1 Enabled; Disabling L0S\n");
704  } else {
705  /* L1-ASPM disabled; enable(!) L0S */
707  dev_printk(KERN_INFO, trans->dev,
708  "L1 Disabled; Enabling L0S\n");
709  }
710  trans->pm_support = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
711 }
712 
713 /*
714  * Start up NIC's basic functionality after it has been reset
715  * (e.g. after platform boot, or shutdown via iwl_apm_stop())
716  * NOTE: This does not load uCode nor start the embedded processor
717  */
718 static int iwl_apm_init(struct iwl_trans *trans)
719 {
720  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
721  int ret = 0;
722  IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
723 
724  /*
725  * Use "set_bit" below rather than "write", to preserve any hardware
726  * bits already set by default after reset.
727  */
728 
729  /* Disable L0S exit timer (platform NMI Work/Around) */
732 
733  /*
734  * Disable L0s without affecting L1;
735  * don't wait for ICH L0s (ICH bug W/A)
736  */
739 
740  /* Set FH wait threshold to maximum (HW error during stress W/A) */
742 
743  /*
744  * Enable HAP INTA (interrupt from management bus) to
745  * wake device's PCI Express link L1a -> L0s
746  */
749 
750  iwl_apm_config(trans);
751 
752  /* Configure analog phase-lock-loop before activating to D0A */
753  if (trans->cfg->base_params->pll_cfg_val)
755  trans->cfg->base_params->pll_cfg_val);
756 
757  /*
758  * Set "initialization complete" bit to move adapter from
759  * D0U* --> D0A* (powered-up active) state.
760  */
762 
763  /*
764  * Wait for clock stabilization; once stabilized, access to
765  * device-internal resources is supported, e.g. iwl_write_prph()
766  * and accesses to uCode SRAM.
767  */
768  ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
771  if (ret < 0) {
772  IWL_DEBUG_INFO(trans, "Failed to init the card\n");
773  goto out;
774  }
775 
776  /*
777  * Enable DMA clock and wait for it to stabilize.
778  *
779  * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
780  * do not disable clocks. This preserves any hardware bits already
781  * set by default in "CLK_CTRL_REG" after reset.
782  */
784  udelay(20);
785 
786  /* Disable L1-Active */
789 
790  set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
791 
792 out:
793  return ret;
794 }
795 
796 static int iwl_apm_stop_master(struct iwl_trans *trans)
797 {
798  int ret = 0;
799 
800  /* stop device's busmaster DMA activity */
802 
803  ret = iwl_poll_bit(trans, CSR_RESET,
806  if (ret)
807  IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
808 
809  IWL_DEBUG_INFO(trans, "stop master\n");
810 
811  return ret;
812 }
813 
814 static void iwl_apm_stop(struct iwl_trans *trans)
815 {
816  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
817  IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
818 
819  clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
820 
821  /* Stop device's DMA activity */
822  iwl_apm_stop_master(trans);
823 
824  /* Reset the entire device */
826 
827  udelay(10);
828 
829  /*
830  * Clear "initialization complete" bit to move adapter from
831  * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
832  */
835 }
836 
837 static int iwl_nic_init(struct iwl_trans *trans)
838 {
839  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
840  unsigned long flags;
841 
842  /* nic_init */
843  spin_lock_irqsave(&trans_pcie->irq_lock, flags);
844  iwl_apm_init(trans);
845 
846  /* Set interrupt coalescing calibration timer to default (512 usecs) */
848 
849  spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
850 
851  iwl_set_pwr_vmain(trans);
852 
853  iwl_op_mode_nic_config(trans->op_mode);
854 
855  /* Allocate the RX queue, or reset if it is already allocated */
856  iwl_rx_init(trans);
857 
858  /* Allocate or reset and init all Tx and Command queues */
859  if (iwl_tx_init(trans))
860  return -ENOMEM;
861 
862  if (trans->cfg->base_params->shadow_reg_enable) {
863  /* enable shadow regs in HW */
864  iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
865  IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
866  }
867 
868  return 0;
869 }
870 
871 #define HW_READY_TIMEOUT (50)
872 
873 /* Note: returns poll_bit return value, which is >= 0 if success */
874 static int iwl_set_hw_ready(struct iwl_trans *trans)
875 {
876  int ret;
877 
880 
881  /* See if we got it */
882  ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
886 
887  IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
888  return ret;
889 }
890 
891 /* Note: returns standard 0/-ERROR code */
892 static int iwl_prepare_card_hw(struct iwl_trans *trans)
893 {
894  int ret;
895  int t = 0;
896 
897  IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
898 
899  ret = iwl_set_hw_ready(trans);
900  /* If the card is ready, exit 0 */
901  if (ret >= 0)
902  return 0;
903 
904  /* If HW is not ready, prepare the conditions to check again */
907 
908  do {
909  ret = iwl_set_hw_ready(trans);
910  if (ret >= 0)
911  return 0;
912 
913  usleep_range(200, 1000);
914  t += 200;
915  } while (t < 150000);
916 
917  return ret;
918 }
919 
920 /*
921  * ucode
922  */
923 static int iwl_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
924  dma_addr_t phy_addr, u32 byte_cnt)
925 {
926  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
927  int ret;
928 
929  trans_pcie->ucode_write_complete = false;
930 
931  iwl_write_direct32(trans,
934 
935  iwl_write_direct32(trans,
937  dst_addr);
938 
939  iwl_write_direct32(trans,
941  phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
942 
943  iwl_write_direct32(trans,
945  (iwl_get_dma_hi_addr(phy_addr)
946  << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
947 
948  iwl_write_direct32(trans,
953 
954  iwl_write_direct32(trans,
959 
960  ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
961  trans_pcie->ucode_write_complete, 5 * HZ);
962  if (!ret) {
963  IWL_ERR(trans, "Failed to load firmware chunk!\n");
964  return -ETIMEDOUT;
965  }
966 
967  return 0;
968 }
969 
970 static int iwl_load_section(struct iwl_trans *trans, u8 section_num,
971  const struct fw_desc *section)
972 {
973  u8 *v_addr;
974  dma_addr_t p_addr;
975  u32 offset;
976  int ret = 0;
977 
978  IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
979  section_num);
980 
981  v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
982  if (!v_addr)
983  return -ENOMEM;
984 
985  for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
986  u32 copy_size;
987 
988  copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
989 
990  memcpy(v_addr, (u8 *)section->data + offset, copy_size);
991  ret = iwl_load_firmware_chunk(trans, section->offset + offset,
992  p_addr, copy_size);
993  if (ret) {
994  IWL_ERR(trans,
995  "Could not load the [%d] uCode section\n",
996  section_num);
997  break;
998  }
999  }
1000 
1001  dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
1002  return ret;
1003 }
1004 
1005 static int iwl_load_given_ucode(struct iwl_trans *trans,
1006  const struct fw_img *image)
1007 {
1008  int i, ret = 0;
1009 
1010  for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
1011  if (!image->sec[i].data)
1012  break;
1013 
1014  ret = iwl_load_section(trans, i, &image->sec[i]);
1015  if (ret)
1016  return ret;
1017  }
1018 
1019  /* Remove all resets to allow NIC to operate */
1020  iwl_write32(trans, CSR_RESET, 0);
1021 
1022  return 0;
1023 }
1024 
1025 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1026  const struct fw_img *fw)
1027 {
1028  int ret;
1029  bool hw_rfkill;
1030 
1031  /* This may fail if AMT took ownership of the device */
1032  if (iwl_prepare_card_hw(trans)) {
1033  IWL_WARN(trans, "Exit HW not ready\n");
1034  return -EIO;
1035  }
1036 
1037  iwl_enable_rfkill_int(trans);
1038 
1039  /* If platform's RF_KILL switch is NOT set to KILL */
1040  hw_rfkill = iwl_is_rfkill_set(trans);
1041  iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1042  if (hw_rfkill)
1043  return -ERFKILL;
1044 
1045  iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1046 
1047  ret = iwl_nic_init(trans);
1048  if (ret) {
1049  IWL_ERR(trans, "Unable to init nic\n");
1050  return ret;
1051  }
1052 
1053  /* make sure rfkill handshake bits are cleared */
1054  iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1055  iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1057 
1058  /* clear (again), then enable host interrupts */
1059  iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1060  iwl_enable_interrupts(trans);
1061 
1062  /* really make sure rfkill handshake bits are cleared */
1063  iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1064  iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1065 
1066  /* Load the given image to the HW */
1067  return iwl_load_given_ucode(trans, fw);
1068 }
1069 
1070 /*
1071  * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
1072  */
1073 static void iwl_trans_txq_set_sched(struct iwl_trans *trans, u32 mask)
1074 {
1075  struct iwl_trans_pcie __maybe_unused *trans_pcie =
1076  IWL_TRANS_GET_PCIE_TRANS(trans);
1077 
1078  iwl_write_prph(trans, SCD_TXFACT, mask);
1079 }
1080 
1081 static void iwl_tx_start(struct iwl_trans *trans)
1082 {
1083  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1084  u32 a;
1085  int chan;
1086  u32 reg_val;
1087 
1088  /* make sure all queue are not stopped/used */
1089  memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
1090  memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
1091 
1092  trans_pcie->scd_base_addr =
1094  a = trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_LOWER_BOUND;
1095  /* reset conext data memory */
1096  for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
1097  a += 4)
1098  iwl_write_targ_mem(trans, a, 0);
1099  /* reset tx status memory */
1100  for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
1101  a += 4)
1102  iwl_write_targ_mem(trans, a, 0);
1103  for (; a < trans_pcie->scd_base_addr +
1105  trans->cfg->base_params->num_of_queues);
1106  a += 4)
1107  iwl_write_targ_mem(trans, a, 0);
1108 
1110  trans_pcie->scd_bc_tbls.dma >> 10);
1111 
1112  /* The chain extension of the SCD doesn't work well. This feature is
1113  * enabled by default by the HW, so we need to disable it manually.
1114  */
1115  iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1116 
1117  iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
1118  trans_pcie->cmd_fifo);
1119 
1120  /* Activate all Tx DMA/FIFO channels */
1121  iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7));
1122 
1123  /* Enable DMA channel */
1124  for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1128 
1129  /* Update FH chicken bits */
1130  reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
1133 
1134  /* Enable L1-Active */
1137 }
1138 
1139 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans)
1140 {
1141  iwl_reset_ict(trans);
1142  iwl_tx_start(trans);
1143 }
1144 
1148 static int iwl_trans_tx_stop(struct iwl_trans *trans)
1149 {
1150  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1151  int ch, txq_id, ret;
1152  unsigned long flags;
1153 
1154  /* Turn off all Tx DMA fifos */
1155  spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1156 
1157  iwl_trans_txq_set_sched(trans, 0);
1158 
1159  /* Stop each Tx DMA channel, and wait for it to be idle */
1160  for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
1161  iwl_write_direct32(trans,
1162  FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
1165  if (ret < 0)
1166  IWL_ERR(trans,
1167  "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
1168  ch,
1169  iwl_read_direct32(trans,
1171  }
1172  spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1173 
1174  if (!trans_pcie->txq) {
1175  IWL_WARN(trans,
1176  "Stopping tx queues that aren't allocated...\n");
1177  return 0;
1178  }
1179 
1180  /* Unmap DMA from host system and free skb's */
1181  for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1182  txq_id++)
1183  iwl_tx_queue_unmap(trans, txq_id);
1184 
1185  return 0;
1186 }
1187 
1188 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1189 {
1190  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1191  unsigned long flags;
1192 
1193  /* tell the device to stop sending interrupts */
1194  spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1195  iwl_disable_interrupts(trans);
1196  spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1197 
1198  /* device going down, Stop using ICT table */
1199  iwl_disable_ict(trans);
1200 
1201  /*
1202  * If a HW restart happens during firmware loading,
1203  * then the firmware loading might call this function
1204  * and later it might be called again due to the
1205  * restart. So don't process again if the device is
1206  * already dead.
1207  */
1208  if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
1209  iwl_trans_tx_stop(trans);
1210  iwl_trans_rx_stop(trans);
1211 
1212  /* Power-down device's busmaster DMA clocks */
1215  udelay(5);
1216  }
1217 
1218  /* Make sure (redundant) we've released our request to stay awake */
1219  iwl_clear_bit(trans, CSR_GP_CNTRL,
1221 
1222  /* Stop the device, and put it in low power state */
1223  iwl_apm_stop(trans);
1224 
1225  /* Upon stop, the APM issues an interrupt if HW RF kill is set.
1226  * Clean again the interrupt here
1227  */
1228  spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1229  iwl_disable_interrupts(trans);
1230  spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1231 
1232  iwl_enable_rfkill_int(trans);
1233 
1234  /* wait to make sure we flush pending tasklet*/
1235  synchronize_irq(trans_pcie->irq);
1236  tasklet_kill(&trans_pcie->irq_tasklet);
1237 
1238  cancel_work_sync(&trans_pcie->rx_replenish);
1239 
1240  /* stop and reset the on-board processor */
1241  iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
1242 
1243  /* clear all status bits */
1244  clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
1245  clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
1246  clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
1247  clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1248 }
1249 
1250 static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
1251 {
1252  /* let the ucode operate on its own */
1253  iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
1255 
1256  iwl_disable_interrupts(trans);
1257  iwl_clear_bit(trans, CSR_GP_CNTRL,
1259 }
1260 
1261 static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1262  struct iwl_device_cmd *dev_cmd, int txq_id)
1263 {
1264  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1265  struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1266  struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload;
1267  struct iwl_cmd_meta *out_meta;
1268  struct iwl_tx_queue *txq;
1269  struct iwl_queue *q;
1270  dma_addr_t phys_addr = 0;
1271  dma_addr_t txcmd_phys;
1272  dma_addr_t scratch_phys;
1273  u16 len, firstlen, secondlen;
1274  u8 wait_write_ptr = 0;
1275  __le16 fc = hdr->frame_control;
1276  u8 hdr_len = ieee80211_hdrlen(fc);
1277  u16 __maybe_unused wifi_seq;
1278 
1279  txq = &trans_pcie->txq[txq_id];
1280  q = &txq->q;
1281 
1282  if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
1283  WARN_ON_ONCE(1);
1284  return -EINVAL;
1285  }
1286 
1287  spin_lock(&txq->lock);
1288 
1289  /* In AGG mode, the index in the ring must correspond to the WiFi
1290  * sequence number. This is a HW requirements to help the SCD to parse
1291  * the BA.
1292  * Check here that the packets are in the right place on the ring.
1293  */
1294 #ifdef CONFIG_IWLWIFI_DEBUG
1295  wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
1296  WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) &&
1297  ((wifi_seq & 0xff) != q->write_ptr),
1298  "Q: %d WiFi Seq %d tfdNum %d",
1299  txq_id, wifi_seq, q->write_ptr);
1300 #endif
1301 
1302  /* Set up driver data for this TFD */
1303  txq->entries[q->write_ptr].skb = skb;
1304  txq->entries[q->write_ptr].cmd = dev_cmd;
1305 
1306  dev_cmd->hdr.cmd = REPLY_TX;
1307  dev_cmd->hdr.sequence =
1308  cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
1309  INDEX_TO_SEQ(q->write_ptr)));
1310 
1311  /* Set up first empty entry in queue's array of Tx/cmd buffers */
1312  out_meta = &txq->entries[q->write_ptr].meta;
1313 
1314  /*
1315  * Use the first empty entry in this queue's command buffer array
1316  * to contain the Tx command and MAC header concatenated together
1317  * (payload data will be in another buffer).
1318  * Size of this varies, due to varying MAC header length.
1319  * If end is not dword aligned, we'll have 2 extra bytes at the end
1320  * of the MAC header (device reads on dword boundaries).
1321  * We'll tell device about this padding later.
1322  */
1323  len = sizeof(struct iwl_tx_cmd) +
1324  sizeof(struct iwl_cmd_header) + hdr_len;
1325  firstlen = (len + 3) & ~3;
1326 
1327  /* Tell NIC about any 2-byte padding after MAC header */
1328  if (firstlen != len)
1329  tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
1330 
1331  /* Physical address of this Tx command's header (not MAC header!),
1332  * within command buffer array. */
1333  txcmd_phys = dma_map_single(trans->dev,
1334  &dev_cmd->hdr, firstlen,
1336  if (unlikely(dma_mapping_error(trans->dev, txcmd_phys)))
1337  goto out_err;
1338  dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
1339  dma_unmap_len_set(out_meta, len, firstlen);
1340 
1341  if (!ieee80211_has_morefrags(fc)) {
1342  txq->need_update = 1;
1343  } else {
1344  wait_write_ptr = 1;
1345  txq->need_update = 0;
1346  }
1347 
1348  /* Set up TFD's 2nd entry to point directly to remainder of skb,
1349  * if any (802.11 null frames have no payload). */
1350  secondlen = skb->len - hdr_len;
1351  if (secondlen > 0) {
1352  phys_addr = dma_map_single(trans->dev, skb->data + hdr_len,
1353  secondlen, DMA_TO_DEVICE);
1354  if (unlikely(dma_mapping_error(trans->dev, phys_addr))) {
1355  dma_unmap_single(trans->dev,
1356  dma_unmap_addr(out_meta, mapping),
1357  dma_unmap_len(out_meta, len),
1359  goto out_err;
1360  }
1361  }
1362 
1363  /* Attach buffers to TFD */
1364  iwlagn_txq_attach_buf_to_tfd(trans, txq, txcmd_phys, firstlen, 1);
1365  if (secondlen > 0)
1366  iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr,
1367  secondlen, 0);
1368 
1369  scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
1371 
1372  /* take back ownership of DMA buffer to enable update */
1373  dma_sync_single_for_cpu(trans->dev, txcmd_phys, firstlen,
1375  tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1376  tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1377 
1378  IWL_DEBUG_TX(trans, "sequence nr = 0X%x\n",
1379  le16_to_cpu(dev_cmd->hdr.sequence));
1380  IWL_DEBUG_TX(trans, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
1381 
1382  /* Set up entry for this TFD in Tx byte-count array */
1383  iwl_trans_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len));
1384 
1385  dma_sync_single_for_device(trans->dev, txcmd_phys, firstlen,
1387 
1388  trace_iwlwifi_dev_tx(trans->dev,
1389  &txq->tfds[txq->q.write_ptr],
1390  sizeof(struct iwl_tfd),
1391  &dev_cmd->hdr, firstlen,
1392  skb->data + hdr_len, secondlen);
1393 
1394  /* start timer if queue currently empty */
1395  if (txq->need_update && q->read_ptr == q->write_ptr &&
1396  trans_pcie->wd_timeout)
1397  mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
1398 
1399  /* Tell device the write index *just past* this latest filled TFD */
1400  q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1401  iwl_txq_update_write_ptr(trans, txq);
1402 
1403  /*
1404  * At this point the frame is "transmitted" successfully
1405  * and we will get a TX status notification eventually,
1406  * regardless of the value of ret. "ret" only indicates
1407  * whether or not we should update the write pointer.
1408  */
1409  if (iwl_queue_space(q) < q->high_mark) {
1410  if (wait_write_ptr) {
1411  txq->need_update = 1;
1412  iwl_txq_update_write_ptr(trans, txq);
1413  } else {
1414  iwl_stop_queue(trans, txq);
1415  }
1416  }
1417  spin_unlock(&txq->lock);
1418  return 0;
1419  out_err:
1420  spin_unlock(&txq->lock);
1421  return -1;
1422 }
1423 
1424 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1425 {
1426  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1427  int err;
1428  bool hw_rfkill;
1429 
1430  trans_pcie->inta_mask = CSR_INI_SET_MASK;
1431 
1432  if (!trans_pcie->irq_requested) {
1433  tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
1434  iwl_irq_tasklet, (unsigned long)trans);
1435 
1436  iwl_alloc_isr_ict(trans);
1437 
1438  err = request_irq(trans_pcie->irq, iwl_isr_ict, IRQF_SHARED,
1439  DRV_NAME, trans);
1440  if (err) {
1441  IWL_ERR(trans, "Error allocating IRQ %d\n",
1442  trans_pcie->irq);
1443  goto error;
1444  }
1445 
1447  trans_pcie->irq_requested = true;
1448  }
1449 
1450  err = iwl_prepare_card_hw(trans);
1451  if (err) {
1452  IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1453  goto err_free_irq;
1454  }
1455 
1456  iwl_apm_init(trans);
1457 
1458  /* From now on, the op_mode will be kept updated about RF kill state */
1459  iwl_enable_rfkill_int(trans);
1460 
1461  hw_rfkill = iwl_is_rfkill_set(trans);
1462  iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1463 
1464  return err;
1465 
1466 err_free_irq:
1467  trans_pcie->irq_requested = false;
1468  free_irq(trans_pcie->irq, trans);
1469 error:
1470  iwl_free_isr_ict(trans);
1471  tasklet_kill(&trans_pcie->irq_tasklet);
1472  return err;
1473 }
1474 
1475 static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
1476  bool op_mode_leaving)
1477 {
1478  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1479  bool hw_rfkill;
1480  unsigned long flags;
1481 
1482  spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1483  iwl_disable_interrupts(trans);
1484  spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1485 
1486  iwl_apm_stop(trans);
1487 
1488  spin_lock_irqsave(&trans_pcie->irq_lock, flags);
1489  iwl_disable_interrupts(trans);
1490  spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
1491 
1492  if (!op_mode_leaving) {
1493  /*
1494  * Even if we stop the HW, we still want the RF kill
1495  * interrupt
1496  */
1497  iwl_enable_rfkill_int(trans);
1498 
1499  /*
1500  * Check again since the RF kill state may have changed while
1501  * all the interrupts were disabled, in this case we couldn't
1502  * receive the RF kill interrupt and update the state in the
1503  * op_mode.
1504  */
1505  hw_rfkill = iwl_is_rfkill_set(trans);
1506  iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1507  }
1508 }
1509 
1510 static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1511  struct sk_buff_head *skbs)
1512 {
1513  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1514  struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
1515  /* n_bd is usually 256 => n_bd - 1 = 0xff */
1516  int tfd_num = ssn & (txq->q.n_bd - 1);
1517  int freed = 0;
1518 
1519  spin_lock(&txq->lock);
1520 
1521  if (txq->q.read_ptr != tfd_num) {
1522  IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1523  txq_id, txq->q.read_ptr, tfd_num, ssn);
1524  freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
1525  if (iwl_queue_space(&txq->q) > txq->q.low_mark)
1526  iwl_wake_queue(trans, txq);
1527  }
1528 
1529  spin_unlock(&txq->lock);
1530 }
1531 
1532 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1533 {
1534  writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1535 }
1536 
1537 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1538 {
1539  writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1540 }
1541 
1542 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1543 {
1544  return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1545 }
1546 
1547 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1548  const struct iwl_trans_config *trans_cfg)
1549 {
1550  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1551 
1552  trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1553  trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1554  if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1555  trans_pcie->n_no_reclaim_cmds = 0;
1556  else
1557  trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1558  if (trans_pcie->n_no_reclaim_cmds)
1559  memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1560  trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1561 
1562  trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1563  if (trans_pcie->rx_buf_size_8k)
1564  trans_pcie->rx_page_order = get_order(8 * 1024);
1565  else
1566  trans_pcie->rx_page_order = get_order(4 * 1024);
1567 
1568  trans_pcie->wd_timeout =
1570 
1571  trans_pcie->command_names = trans_cfg->command_names;
1572 }
1573 
1574 void iwl_trans_pcie_free(struct iwl_trans *trans)
1575 {
1576  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1577 
1578  iwl_trans_pcie_tx_free(trans);
1579  iwl_trans_pcie_rx_free(trans);
1580 
1581  if (trans_pcie->irq_requested == true) {
1582  free_irq(trans_pcie->irq, trans);
1583  iwl_free_isr_ict(trans);
1584  }
1585 
1586  pci_disable_msi(trans_pcie->pci_dev);
1587  iounmap(trans_pcie->hw_base);
1588  pci_release_regions(trans_pcie->pci_dev);
1589  pci_disable_device(trans_pcie->pci_dev);
1591 
1592  kfree(trans);
1593 }
1594 
1595 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1596 {
1597  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1598 
1599  if (state)
1600  set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1601  else
1602  clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
1603 }
1604 
1605 #ifdef CONFIG_PM_SLEEP
1606 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
1607 {
1608  return 0;
1609 }
1610 
1611 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
1612 {
1613  bool hw_rfkill;
1614 
1615  iwl_enable_rfkill_int(trans);
1616 
1617  hw_rfkill = iwl_is_rfkill_set(trans);
1618  iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
1619 
1620  if (!hw_rfkill)
1621  iwl_enable_interrupts(trans);
1622 
1623  return 0;
1624 }
1625 #endif /* CONFIG_PM_SLEEP */
1626 
1627 #define IWL_FLUSH_WAIT_MS 2000
1628 
1629 static int iwl_trans_pcie_wait_tx_queue_empty(struct iwl_trans *trans)
1630 {
1631  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1632  struct iwl_tx_queue *txq;
1633  struct iwl_queue *q;
1634  int cnt;
1635  unsigned long now = jiffies;
1636  int ret = 0;
1637 
1638  /* waiting for all the tx frames complete might take a while */
1639  for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1640  if (cnt == trans_pcie->cmd_queue)
1641  continue;
1642  txq = &trans_pcie->txq[cnt];
1643  q = &txq->q;
1644  while (q->read_ptr != q->write_ptr && !time_after(jiffies,
1646  msleep(1);
1647 
1648  if (q->read_ptr != q->write_ptr) {
1649  IWL_ERR(trans, "fail to flush all tx fifo queues\n");
1650  ret = -ETIMEDOUT;
1651  break;
1652  }
1653  }
1654  return ret;
1655 }
1656 
1657 static const char *get_fh_string(int cmd)
1658 {
1659 #define IWL_CMD(x) case x: return #x
1660  switch (cmd) {
1670  default:
1671  return "UNKNOWN";
1672  }
1673 #undef IWL_CMD
1674 }
1675 
1676 int iwl_dump_fh(struct iwl_trans *trans, char **buf)
1677 {
1678  int i;
1679  static const u32 fh_tbl[] = {
1689  };
1690 
1691 #ifdef CONFIG_IWLWIFI_DEBUGFS
1692  if (buf) {
1693  int pos = 0;
1694  size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1695 
1696  *buf = kmalloc(bufsz, GFP_KERNEL);
1697  if (!*buf)
1698  return -ENOMEM;
1699 
1700  pos += scnprintf(*buf + pos, bufsz - pos,
1701  "FH register values:\n");
1702 
1703  for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1704  pos += scnprintf(*buf + pos, bufsz - pos,
1705  " %34s: 0X%08x\n",
1706  get_fh_string(fh_tbl[i]),
1707  iwl_read_direct32(trans, fh_tbl[i]));
1708 
1709  return pos;
1710  }
1711 #endif
1712 
1713  IWL_ERR(trans, "FH register values:\n");
1714  for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1715  IWL_ERR(trans, " %34s: 0X%08x\n",
1716  get_fh_string(fh_tbl[i]),
1717  iwl_read_direct32(trans, fh_tbl[i]));
1718 
1719  return 0;
1720 }
1721 
1722 static const char *get_csr_string(int cmd)
1723 {
1724 #define IWL_CMD(x) case x: return #x
1725  switch (cmd) {
1728  IWL_CMD(CSR_INT);
1732  IWL_CMD(CSR_RESET);
1749  default:
1750  return "UNKNOWN";
1751  }
1752 #undef IWL_CMD
1753 }
1754 
1755 void iwl_dump_csr(struct iwl_trans *trans)
1756 {
1757  int i;
1758  static const u32 csr_tbl[] = {
1761  CSR_INT,
1762  CSR_INT_MASK,
1764  CSR_GPIO_IN,
1765  CSR_RESET,
1766  CSR_GP_CNTRL,
1767  CSR_HW_REV,
1769  CSR_EEPROM_GP,
1771  CSR_GIO_REG,
1776  CSR_LED_REG,
1782  };
1783  IWL_ERR(trans, "CSR values:\n");
1784  IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1785  "CSR_INT_PERIODIC_REG)\n");
1786  for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1787  IWL_ERR(trans, " %25s: 0X%08x\n",
1788  get_csr_string(csr_tbl[i]),
1789  iwl_read32(trans, csr_tbl[i]));
1790  }
1791 }
1792 
1793 #ifdef CONFIG_IWLWIFI_DEBUGFS
1794 /* create and remove of files */
1795 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1796  if (!debugfs_create_file(#name, mode, parent, trans, \
1797  &iwl_dbgfs_##name##_ops)) \
1798  goto err; \
1799 } while (0)
1800 
1801 /* file operation */
1802 #define DEBUGFS_READ_FUNC(name) \
1803 static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1804  char __user *user_buf, \
1805  size_t count, loff_t *ppos);
1806 
1807 #define DEBUGFS_WRITE_FUNC(name) \
1808 static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1809  const char __user *user_buf, \
1810  size_t count, loff_t *ppos);
1811 
1812 
1813 #define DEBUGFS_READ_FILE_OPS(name) \
1814  DEBUGFS_READ_FUNC(name); \
1815 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1816  .read = iwl_dbgfs_##name##_read, \
1817  .open = simple_open, \
1818  .llseek = generic_file_llseek, \
1819 };
1820 
1821 #define DEBUGFS_WRITE_FILE_OPS(name) \
1822  DEBUGFS_WRITE_FUNC(name); \
1823 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1824  .write = iwl_dbgfs_##name##_write, \
1825  .open = simple_open, \
1826  .llseek = generic_file_llseek, \
1827 };
1828 
1829 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1830  DEBUGFS_READ_FUNC(name); \
1831  DEBUGFS_WRITE_FUNC(name); \
1832 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1833  .write = iwl_dbgfs_##name##_write, \
1834  .read = iwl_dbgfs_##name##_read, \
1835  .open = simple_open, \
1836  .llseek = generic_file_llseek, \
1837 };
1838 
1839 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1840  char __user *user_buf,
1841  size_t count, loff_t *ppos)
1842 {
1843  struct iwl_trans *trans = file->private_data;
1844  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1845  struct iwl_tx_queue *txq;
1846  struct iwl_queue *q;
1847  char *buf;
1848  int pos = 0;
1849  int cnt;
1850  int ret;
1851  size_t bufsz;
1852 
1853  bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
1854 
1855  if (!trans_pcie->txq)
1856  return -EAGAIN;
1857 
1858  buf = kzalloc(bufsz, GFP_KERNEL);
1859  if (!buf)
1860  return -ENOMEM;
1861 
1862  for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1863  txq = &trans_pcie->txq[cnt];
1864  q = &txq->q;
1865  pos += scnprintf(buf + pos, bufsz - pos,
1866  "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1867  cnt, q->read_ptr, q->write_ptr,
1868  !!test_bit(cnt, trans_pcie->queue_used),
1869  !!test_bit(cnt, trans_pcie->queue_stopped));
1870  }
1871  ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1872  kfree(buf);
1873  return ret;
1874 }
1875 
1876 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1877  char __user *user_buf,
1878  size_t count, loff_t *ppos)
1879 {
1880  struct iwl_trans *trans = file->private_data;
1881  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1882  struct iwl_rx_queue *rxq = &trans_pcie->rxq;
1883  char buf[256];
1884  int pos = 0;
1885  const size_t bufsz = sizeof(buf);
1886 
1887  pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1888  rxq->read);
1889  pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1890  rxq->write);
1891  pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1892  rxq->free_count);
1893  if (rxq->rb_stts) {
1894  pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1895  le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1896  } else {
1897  pos += scnprintf(buf + pos, bufsz - pos,
1898  "closed_rb_num: Not Allocated\n");
1899  }
1900  return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1901 }
1902 
1903 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1904  char __user *user_buf,
1905  size_t count, loff_t *ppos)
1906 {
1907  struct iwl_trans *trans = file->private_data;
1908  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1909  struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1910 
1911  int pos = 0;
1912  char *buf;
1913  int bufsz = 24 * 64; /* 24 items * 64 char per item */
1914  ssize_t ret;
1915 
1916  buf = kzalloc(bufsz, GFP_KERNEL);
1917  if (!buf)
1918  return -ENOMEM;
1919 
1920  pos += scnprintf(buf + pos, bufsz - pos,
1921  "Interrupt Statistics Report:\n");
1922 
1923  pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1924  isr_stats->hw);
1925  pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1926  isr_stats->sw);
1927  if (isr_stats->sw || isr_stats->hw) {
1928  pos += scnprintf(buf + pos, bufsz - pos,
1929  "\tLast Restarting Code: 0x%X\n",
1930  isr_stats->err_code);
1931  }
1932 #ifdef CONFIG_IWLWIFI_DEBUG
1933  pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1934  isr_stats->sch);
1935  pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1936  isr_stats->alive);
1937 #endif
1938  pos += scnprintf(buf + pos, bufsz - pos,
1939  "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1940 
1941  pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1942  isr_stats->ctkill);
1943 
1944  pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1945  isr_stats->wakeup);
1946 
1947  pos += scnprintf(buf + pos, bufsz - pos,
1948  "Rx command responses:\t\t %u\n", isr_stats->rx);
1949 
1950  pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1951  isr_stats->tx);
1952 
1953  pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1954  isr_stats->unhandled);
1955 
1956  ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1957  kfree(buf);
1958  return ret;
1959 }
1960 
1961 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1962  const char __user *user_buf,
1963  size_t count, loff_t *ppos)
1964 {
1965  struct iwl_trans *trans = file->private_data;
1966  struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1967  struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1968 
1969  char buf[8];
1970  int buf_size;
1971  u32 reset_flag;
1972 
1973  memset(buf, 0, sizeof(buf));
1974  buf_size = min(count, sizeof(buf) - 1);
1975  if (copy_from_user(buf, user_buf, buf_size))
1976  return -EFAULT;
1977  if (sscanf(buf, "%x", &reset_flag) != 1)
1978  return -EFAULT;
1979  if (reset_flag == 0)
1980  memset(isr_stats, 0, sizeof(*isr_stats));
1981 
1982  return count;
1983 }
1984 
1985 static ssize_t iwl_dbgfs_csr_write(struct file *file,
1986  const char __user *user_buf,
1987  size_t count, loff_t *ppos)
1988 {
1989  struct iwl_trans *trans = file->private_data;
1990  char buf[8];
1991  int buf_size;
1992  int csr;
1993 
1994  memset(buf, 0, sizeof(buf));
1995  buf_size = min(count, sizeof(buf) - 1);
1996  if (copy_from_user(buf, user_buf, buf_size))
1997  return -EFAULT;
1998  if (sscanf(buf, "%d", &csr) != 1)
1999  return -EFAULT;
2000 
2001  iwl_dump_csr(trans);
2002 
2003  return count;
2004 }
2005 
2006 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2007  char __user *user_buf,
2008  size_t count, loff_t *ppos)
2009 {
2010  struct iwl_trans *trans = file->private_data;
2011  char *buf = NULL;
2012  int pos = 0;
2013  ssize_t ret = -EFAULT;
2014 
2015  ret = pos = iwl_dump_fh(trans, &buf);
2016  if (buf) {
2017  ret = simple_read_from_buffer(user_buf,
2018  count, ppos, buf, pos);
2019  kfree(buf);
2020  }
2021 
2022  return ret;
2023 }
2024 
2025 static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
2026  const char __user *user_buf,
2027  size_t count, loff_t *ppos)
2028 {
2029  struct iwl_trans *trans = file->private_data;
2030 
2031  if (!trans->op_mode)
2032  return -EAGAIN;
2033 
2034  local_bh_disable();
2035  iwl_op_mode_nic_error(trans->op_mode);
2036  local_bh_enable();
2037 
2038  return count;
2039 }
2040 
2042 DEBUGFS_READ_FILE_OPS(fh_reg);
2046 DEBUGFS_WRITE_FILE_OPS(fw_restart);
2047 
2048 /*
2049  * Create the debugfs files and directories
2050  *
2051  */
2052 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2053  struct dentry *dir)
2054 {
2058  DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2059  DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2060  DEBUGFS_ADD_FILE(fw_restart, dir, S_IWUSR);
2061  return 0;
2062 
2063 err:
2064  IWL_ERR(trans, "failed to create the trans debugfs entry\n");
2065  return -ENOMEM;
2066 }
2067 #else
2068 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2069  struct dentry *dir)
2070 {
2071  return 0;
2072 }
2073 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2074 
2075 static const struct iwl_trans_ops trans_ops_pcie = {
2076  .start_hw = iwl_trans_pcie_start_hw,
2077  .stop_hw = iwl_trans_pcie_stop_hw,
2078  .fw_alive = iwl_trans_pcie_fw_alive,
2079  .start_fw = iwl_trans_pcie_start_fw,
2080  .stop_device = iwl_trans_pcie_stop_device,
2081 
2082  .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
2083 
2084  .send_cmd = iwl_trans_pcie_send_cmd,
2085 
2086  .tx = iwl_trans_pcie_tx,
2087  .reclaim = iwl_trans_pcie_reclaim,
2088 
2089  .txq_disable = iwl_trans_pcie_txq_disable,
2090  .txq_enable = iwl_trans_pcie_txq_enable,
2091 
2092  .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2093 
2094  .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
2095 
2096 #ifdef CONFIG_PM_SLEEP
2097  .suspend = iwl_trans_pcie_suspend,
2098  .resume = iwl_trans_pcie_resume,
2099 #endif
2100  .write8 = iwl_trans_pcie_write8,
2101  .write32 = iwl_trans_pcie_write32,
2102  .read32 = iwl_trans_pcie_read32,
2103  .configure = iwl_trans_pcie_configure,
2104  .set_pmi = iwl_trans_pcie_set_pmi,
2105 };
2106 
2108  const struct pci_device_id *ent,
2109  const struct iwl_cfg *cfg)
2110 {
2111  struct iwl_trans_pcie *trans_pcie;
2112  struct iwl_trans *trans;
2113  u16 pci_cmd;
2114  int err;
2115 
2116  trans = kzalloc(sizeof(struct iwl_trans) +
2117  sizeof(struct iwl_trans_pcie), GFP_KERNEL);
2118 
2119  if (WARN_ON(!trans))
2120  return NULL;
2121 
2122  trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2123 
2124  trans->ops = &trans_ops_pcie;
2125  trans->cfg = cfg;
2126  trans_pcie->trans = trans;
2127  spin_lock_init(&trans_pcie->irq_lock);
2128  init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2129 
2130  /* W/A - seems to solve weird behavior. We need to remove this if we
2131  * don't want to stay in L1 all the time. This wastes a lot of power */
2134 
2135  if (pci_enable_device(pdev)) {
2136  err = -ENODEV;
2137  goto out_no_pci;
2138  }
2139 
2140  pci_set_master(pdev);
2141 
2142  err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2143  if (!err)
2144  err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2145  if (err) {
2146  err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2147  if (!err)
2148  err = pci_set_consistent_dma_mask(pdev,
2149  DMA_BIT_MASK(32));
2150  /* both attempts failed: */
2151  if (err) {
2152  dev_printk(KERN_ERR, &pdev->dev,
2153  "No suitable DMA available.\n");
2154  goto out_pci_disable_device;
2155  }
2156  }
2157 
2158  err = pci_request_regions(pdev, DRV_NAME);
2159  if (err) {
2160  dev_printk(KERN_ERR, &pdev->dev,
2161  "pci_request_regions failed\n");
2162  goto out_pci_disable_device;
2163  }
2164 
2165  trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2166  if (!trans_pcie->hw_base) {
2167  dev_printk(KERN_ERR, &pdev->dev, "pci_ioremap_bar failed\n");
2168  err = -ENODEV;
2169  goto out_pci_release_regions;
2170  }
2171 
2172  dev_printk(KERN_INFO, &pdev->dev,
2173  "pci_resource_len = 0x%08llx\n",
2174  (unsigned long long) pci_resource_len(pdev, 0));
2175  dev_printk(KERN_INFO, &pdev->dev,
2176  "pci_resource_base = %p\n", trans_pcie->hw_base);
2177 
2178  dev_printk(KERN_INFO, &pdev->dev,
2179  "HW Revision ID = 0x%X\n", pdev->revision);
2180 
2181  /* We disable the RETRY_TIMEOUT register (0x41) to keep
2182  * PCI Tx retries from interfering with C3 CPU state */
2183  pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2184 
2185  err = pci_enable_msi(pdev);
2186  if (err)
2187  dev_printk(KERN_ERR, &pdev->dev,
2188  "pci_enable_msi failed(0X%x)\n", err);
2189 
2190  trans->dev = &pdev->dev;
2191  trans_pcie->irq = pdev->irq;
2192  trans_pcie->pci_dev = pdev;
2193  trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
2194  trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2195  snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2196  "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2197 
2198  /* TODO: Move this away, not needed if not MSI */
2199  /* enable rfkill interrupt: hw bug w/a */
2200  pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2201  if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2202  pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2203  pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2204  }
2205 
2206  /* Initialize the wait queue for commands */
2208  spin_lock_init(&trans->reg_lock);
2209 
2210  snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
2211  "iwl_cmd_pool:%s", dev_name(trans->dev));
2212 
2213  trans->dev_cmd_headroom = 0;
2214  trans->dev_cmd_pool =
2216  sizeof(struct iwl_device_cmd)
2217  + trans->dev_cmd_headroom,
2218  sizeof(void *),
2220  NULL);
2221 
2222  if (!trans->dev_cmd_pool)
2223  goto out_pci_disable_msi;
2224 
2225  return trans;
2226 
2227 out_pci_disable_msi:
2228  pci_disable_msi(pdev);
2229 out_pci_release_regions:
2230  pci_release_regions(pdev);
2231 out_pci_disable_device:
2232  pci_disable_device(pdev);
2233 out_no_pci:
2234  kfree(trans);
2235  return NULL;
2236 }