63 #include <linux/pci.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
80 #define SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie) \
81 (((1<<trans->cfg->base_params->num_of_queues) - 1) &\
82 (~(1<<(trans_pcie)->cmd_queue)))
142 static void iwl_trans_rx_hw_init(
struct iwl_trans *trans,
189 static int iwl_rx_init(
struct iwl_trans *trans)
198 err = iwl_trans_rx_alloc(trans);
207 iwl_trans_rxq_free_rx_bufs(trans);
217 spin_unlock_irqrestore(&rxq->
lock, flags);
221 iwl_trans_rx_hw_init(trans, rxq);
226 spin_unlock_irqrestore(&trans_pcie->
irq_lock, flags);
231 static void iwl_trans_pcie_rx_free(
struct iwl_trans *trans)
245 iwl_trans_rxq_free_rx_bufs(trans);
246 spin_unlock_irqrestore(&rxq->
lock, flags);
263 static int iwl_trans_rx_stop(
struct iwl_trans *trans)
272 static int iwlagn_alloc_dma_ptr(
struct iwl_trans *trans,
286 static void iwlagn_free_dma_ptr(
struct iwl_trans *trans,
293 memset(ptr, 0,
sizeof(*ptr));
296 static void iwl_trans_pcie_queue_stuck_timer(
unsigned long data)
301 struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
307 spin_lock(&txq->
lock);
309 if (txq->
q.read_ptr == txq->
q.write_ptr) {
310 spin_unlock(&txq->
lock);
313 spin_unlock(&txq->
lock);
315 IWL_ERR(trans,
"Queue %d stuck for %u ms.\n", txq->
q.id,
317 IWL_ERR(trans,
"Current SW read_ptr %d write_ptr %d\n",
318 txq->
q.read_ptr, txq->
q.write_ptr);
325 IWL_ERR(trans,
"FH TRBs(%d) = 0x%08x\n", i,
328 for (i = 0; i < trans->
cfg->base_params->num_of_queues; i++) {
338 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
340 tbl_dw = tbl_dw & 0x0000FFFF;
343 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
344 i, active ?
"" :
"in", fifo, tbl_dw,
346 SCD_QUEUE_RDPTR(i)) & (txq->
q.n_bd - 1),
351 i = iwl_queue_inc_wrap(i, q->
n_bd)) {
354 IWL_ERR(trans,
"scratch %d = 0x%08x\n", i,
358 iwl_op_mode_nic_error(trans->
op_mode);
361 static int iwl_trans_txq_alloc(
struct iwl_trans *trans,
376 txq->
q.n_window = slots_num;
378 txq->
entries = kcalloc(slots_num,
386 for (i = 0; i < slots_num; i++) {
399 IWL_ERR(trans,
"dma_alloc_coherent(%zd) failed\n", tfd_sz);
407 for (i = 0; i < slots_num; i++)
417 int slots_num,
u32 txq_id)
425 BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
440 txq->
q.dma_addr >> 8);
448 static void iwl_tx_queue_unmap(
struct iwl_trans *trans,
int txq_id)
466 spin_lock_bh(&txq->
lock);
471 spin_unlock_bh(&txq->
lock);
482 static void iwl_tx_queue_free(
struct iwl_trans *trans,
int txq_id)
492 iwl_tx_queue_unmap(trans, txq_id);
496 for (i = 0; i < txq->
q.n_window; i++) {
504 txq->
q.n_bd, txq->
tfds, txq->
q.dma_addr);
505 memset(&txq->
q.dma_addr, 0,
sizeof(txq->
q.dma_addr));
514 memset(txq, 0,
sizeof(*txq));
522 static void iwl_trans_pcie_tx_free(
struct iwl_trans *trans)
528 if (trans_pcie->
txq) {
530 txq_id < trans->
cfg->base_params->num_of_queues; txq_id++)
531 iwl_tx_queue_free(trans, txq_id);
537 iwlagn_free_dma_ptr(trans, &trans_pcie->
kw);
539 iwlagn_free_dma_ptr(trans, &trans_pcie->
scd_bc_tbls);
549 static int iwl_trans_tx_alloc(
struct iwl_trans *trans)
552 int txq_id, slots_num;
555 u16 scd_bc_tbls_size = trans->
cfg->base_params->num_of_queues *
565 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->
scd_bc_tbls,
568 IWL_ERR(trans,
"Scheduler BC Table allocation failed\n");
573 ret = iwlagn_alloc_dma_ptr(trans, &trans_pcie->
kw,
IWL_KW_SIZE);
575 IWL_ERR(trans,
"Keep Warm allocation failed\n");
579 trans_pcie->
txq = kcalloc(trans->
cfg->base_params->num_of_queues,
581 if (!trans_pcie->
txq) {
582 IWL_ERR(trans,
"Not enough memory for txq\n");
588 for (txq_id = 0; txq_id < trans->
cfg->base_params->num_of_queues;
590 slots_num = (txq_id == trans_pcie->
cmd_queue) ?
592 ret = iwl_trans_txq_alloc(trans, &trans_pcie->
txq[txq_id],
595 IWL_ERR(trans,
"Tx %d queue alloc failed\n", txq_id);
603 iwl_trans_pcie_tx_free(trans);
607 static int iwl_tx_init(
struct iwl_trans *trans)
611 int txq_id, slots_num;
615 if (!trans_pcie->
txq) {
616 ret = iwl_trans_tx_alloc(trans);
629 trans_pcie->
kw.dma >> 4);
631 spin_unlock_irqrestore(&trans_pcie->
irq_lock, flags);
634 for (txq_id = 0; txq_id < trans->
cfg->base_params->num_of_queues;
636 slots_num = (txq_id == trans_pcie->
cmd_queue) ?
638 ret = iwl_trans_txq_init(trans, &trans_pcie->
txq[txq_id],
641 IWL_ERR(trans,
"Tx %d queue init failed\n", txq_id);
650 iwl_trans_pcie_tx_free(trans);
654 static void iwl_set_pwr_vmain(
struct iwl_trans *trans)
672 #define PCI_CFG_RETRY_TIMEOUT 0x041
673 #define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
674 #define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
676 static u16 iwl_pciexp_link_ctrl(
struct iwl_trans *trans)
686 static void iwl_apm_config(
struct iwl_trans *trans)
696 u16 lctl = iwl_pciexp_link_ctrl(trans);
699 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
703 "L1 Enabled; Disabling L0S\n");
708 "L1 Disabled; Enabling L0S\n");
718 static int iwl_apm_init(
struct iwl_trans *trans)
750 iwl_apm_config(trans);
753 if (trans->
cfg->base_params->pll_cfg_val)
755 trans->
cfg->base_params->pll_cfg_val);
796 static int iwl_apm_stop_master(
struct iwl_trans *trans)
807 IWL_WARN(trans,
"Master Disable Timed Out, 100 usec\n");
814 static void iwl_apm_stop(
struct iwl_trans *trans)
822 iwl_apm_stop_master(trans);
837 static int iwl_nic_init(
struct iwl_trans *trans)
849 spin_unlock_irqrestore(&trans_pcie->
irq_lock, flags);
851 iwl_set_pwr_vmain(trans);
853 iwl_op_mode_nic_config(trans->
op_mode);
859 if (iwl_tx_init(trans))
862 if (trans->
cfg->base_params->shadow_reg_enable) {
871 #define HW_READY_TIMEOUT (50)
874 static int iwl_set_hw_ready(
struct iwl_trans *trans)
887 IWL_DEBUG_INFO(trans,
"hardware%s ready\n", ret < 0 ?
" not" :
"");
892 static int iwl_prepare_card_hw(
struct iwl_trans *trans)
899 ret = iwl_set_hw_ready(trans);
909 ret = iwl_set_hw_ready(trans);
915 }
while (t < 150000);
945 (iwl_get_dma_hi_addr(phy_addr)
963 IWL_ERR(trans,
"Failed to load firmware chunk!\n");
970 static int iwl_load_section(
struct iwl_trans *trans,
u8 section_num,
978 IWL_DEBUG_FW(trans,
"[%d] uCode section being loaded...\n",
985 for (offset = 0; offset < section->
len; offset +=
PAGE_SIZE) {
990 memcpy(v_addr, (
u8 *)section->
data + offset, copy_size);
991 ret = iwl_load_firmware_chunk(trans, section->
offset + offset,
995 "Could not load the [%d] uCode section\n",
1005 static int iwl_load_given_ucode(
struct iwl_trans *trans,
1011 if (!image->
sec[i].data)
1014 ret = iwl_load_section(trans, i, &image->
sec[i]);
1025 static int iwl_trans_pcie_start_fw(
struct iwl_trans *trans,
1032 if (iwl_prepare_card_hw(trans)) {
1033 IWL_WARN(trans,
"Exit HW not ready\n");
1037 iwl_enable_rfkill_int(trans);
1040 hw_rfkill = iwl_is_rfkill_set(trans);
1041 iwl_op_mode_hw_rf_kill(trans->
op_mode, hw_rfkill);
1045 iwl_write32(trans,
CSR_INT, 0xFFFFFFFF);
1047 ret = iwl_nic_init(trans);
1049 IWL_ERR(trans,
"Unable to init nic\n");
1059 iwl_write32(trans,
CSR_INT, 0xFFFFFFFF);
1060 iwl_enable_interrupts(trans);
1067 return iwl_load_given_ucode(trans, fw);
1081 static void iwl_tx_start(
struct iwl_trans *trans)
1105 trans->
cfg->base_params->num_of_queues);
1117 iwl_trans_ac_txq_enable(trans, trans_pcie->
cmd_queue,
1121 iwl_trans_txq_set_sched(trans,
IWL_MASK(0, 7));
1139 static void iwl_trans_pcie_fw_alive(
struct iwl_trans *trans)
1142 iwl_tx_start(trans);
1148 static int iwl_trans_tx_stop(
struct iwl_trans *trans)
1151 int ch, txq_id,
ret;
1152 unsigned long flags;
1157 iwl_trans_txq_set_sched(trans, 0);
1167 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
1172 spin_unlock_irqrestore(&trans_pcie->
irq_lock, flags);
1174 if (!trans_pcie->
txq) {
1176 "Stopping tx queues that aren't allocated...\n");
1181 for (txq_id = 0; txq_id < trans->
cfg->base_params->num_of_queues;
1183 iwl_tx_queue_unmap(trans, txq_id);
1188 static void iwl_trans_pcie_stop_device(
struct iwl_trans *trans)
1191 unsigned long flags;
1195 iwl_disable_interrupts(trans);
1196 spin_unlock_irqrestore(&trans_pcie->
irq_lock, flags);
1209 iwl_trans_tx_stop(trans);
1210 iwl_trans_rx_stop(trans);
1223 iwl_apm_stop(trans);
1229 iwl_disable_interrupts(trans);
1230 spin_unlock_irqrestore(&trans_pcie->
irq_lock, flags);
1232 iwl_enable_rfkill_int(trans);
1250 static void iwl_trans_pcie_wowlan_suspend(
struct iwl_trans *trans)
1256 iwl_disable_interrupts(trans);
1273 u16 len, firstlen, secondlen;
1274 u8 wait_write_ptr = 0;
1279 txq = &trans_pcie->
txq[txq_id];
1287 spin_lock(&txq->
lock);
1294 #ifdef CONFIG_IWLWIFI_DEBUG
1298 "Q: %d WiFi Seq %d tfdNum %d",
1307 dev_cmd->
hdr.sequence =
1325 firstlen = (len + 3) & ~3;
1328 if (firstlen != len)
1334 &dev_cmd->
hdr, firstlen,
1341 if (!ieee80211_has_morefrags(fc)) {
1351 if (secondlen > 0) {
1376 tx_cmd->
dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1388 trace_iwlwifi_dev_tx(trans->
dev,
1389 &txq->
tfds[txq->
q.write_ptr],
1391 &dev_cmd->
hdr, firstlen,
1392 skb->
data + hdr_len, secondlen);
1410 if (wait_write_ptr) {
1414 iwl_stop_queue(trans, txq);
1417 spin_unlock(&txq->
lock);
1420 spin_unlock(&txq->
lock);
1424 static int iwl_trans_pcie_start_hw(
struct iwl_trans *trans)
1441 IWL_ERR(trans,
"Error allocating IRQ %d\n",
1450 err = iwl_prepare_card_hw(trans);
1452 IWL_ERR(trans,
"Error while preparing HW: %d\n", err);
1456 iwl_apm_init(trans);
1459 iwl_enable_rfkill_int(trans);
1461 hw_rfkill = iwl_is_rfkill_set(trans);
1462 iwl_op_mode_hw_rf_kill(trans->
op_mode, hw_rfkill);
1475 static void iwl_trans_pcie_stop_hw(
struct iwl_trans *trans,
1476 bool op_mode_leaving)
1480 unsigned long flags;
1483 iwl_disable_interrupts(trans);
1484 spin_unlock_irqrestore(&trans_pcie->
irq_lock, flags);
1486 iwl_apm_stop(trans);
1489 iwl_disable_interrupts(trans);
1490 spin_unlock_irqrestore(&trans_pcie->
irq_lock, flags);
1492 if (!op_mode_leaving) {
1497 iwl_enable_rfkill_int(trans);
1505 hw_rfkill = iwl_is_rfkill_set(trans);
1506 iwl_op_mode_hw_rf_kill(trans->
op_mode, hw_rfkill);
1510 static void iwl_trans_pcie_reclaim(
struct iwl_trans *trans,
int txq_id,
int ssn,
1516 int tfd_num = ssn & (txq->
q.n_bd - 1);
1519 spin_lock(&txq->
lock);
1521 if (txq->
q.read_ptr != tfd_num) {
1523 txq_id, txq->
q.read_ptr, tfd_num, ssn);
1526 iwl_wake_queue(trans, txq);
1529 spin_unlock(&txq->
lock);
1537 static void iwl_trans_pcie_write32(
struct iwl_trans *trans,
u32 ofs,
u32 val)
1547 static void iwl_trans_pcie_configure(
struct iwl_trans *trans,
1578 iwl_trans_pcie_tx_free(trans);
1579 iwl_trans_pcie_rx_free(trans);
1595 static void iwl_trans_pcie_set_pmi(
struct iwl_trans *trans,
bool state)
1605 #ifdef CONFIG_PM_SLEEP
1606 static int iwl_trans_pcie_suspend(
struct iwl_trans *trans)
1611 static int iwl_trans_pcie_resume(
struct iwl_trans *trans)
1615 iwl_enable_rfkill_int(trans);
1617 hw_rfkill = iwl_is_rfkill_set(trans);
1618 iwl_op_mode_hw_rf_kill(trans->
op_mode, hw_rfkill);
1621 iwl_enable_interrupts(trans);
1627 #define IWL_FLUSH_WAIT_MS 2000
1629 static int iwl_trans_pcie_wait_tx_queue_empty(
struct iwl_trans *trans)
1639 for (cnt = 0; cnt < trans->
cfg->base_params->num_of_queues; cnt++) {
1642 txq = &trans_pcie->
txq[
cnt];
1649 IWL_ERR(trans,
"fail to flush all tx fifo queues\n");
1657 static const char *get_fh_string(
int cmd)
1659 #define IWL_CMD(x) case x: return #x
1679 static const u32 fh_tbl[] = {
1691 #ifdef CONFIG_IWLWIFI_DEBUGFS
1700 pos +=
scnprintf(*buf + pos, bufsz - pos,
1701 "FH register values:\n");
1704 pos +=
scnprintf(*buf + pos, bufsz - pos,
1706 get_fh_string(fh_tbl[i]),
1713 IWL_ERR(trans,
"FH register values:\n");
1715 IWL_ERR(trans,
" %34s: 0X%08x\n",
1716 get_fh_string(fh_tbl[i]),
1722 static const char *get_csr_string(
int cmd)
1724 #define IWL_CMD(x) case x: return #x
1758 static const u32 csr_tbl[] = {
1783 IWL_ERR(trans,
"CSR values:\n");
1784 IWL_ERR(trans,
"(2nd byte of CSR_INT_COALESCING is "
1785 "CSR_INT_PERIODIC_REG)\n");
1787 IWL_ERR(trans,
" %25s: 0X%08x\n",
1788 get_csr_string(csr_tbl[i]),
1789 iwl_read32(trans, csr_tbl[i]));
1793 #ifdef CONFIG_IWLWIFI_DEBUGFS
1795 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1796 if (!debugfs_create_file(#name, mode, parent, trans, \
1797 &iwl_dbgfs_##name##_ops)) \
1802 #define DEBUGFS_READ_FUNC(name) \
1803 static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
1804 char __user *user_buf, \
1805 size_t count, loff_t *ppos);
1807 #define DEBUGFS_WRITE_FUNC(name) \
1808 static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
1809 const char __user *user_buf, \
1810 size_t count, loff_t *ppos);
1813 #define DEBUGFS_READ_FILE_OPS(name) \
1814 DEBUGFS_READ_FUNC(name); \
1815 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1816 .read = iwl_dbgfs_##name##_read, \
1817 .open = simple_open, \
1818 .llseek = generic_file_llseek, \
1821 #define DEBUGFS_WRITE_FILE_OPS(name) \
1822 DEBUGFS_WRITE_FUNC(name); \
1823 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1824 .write = iwl_dbgfs_##name##_write, \
1825 .open = simple_open, \
1826 .llseek = generic_file_llseek, \
1829 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1830 DEBUGFS_READ_FUNC(name); \
1831 DEBUGFS_WRITE_FUNC(name); \
1832 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1833 .write = iwl_dbgfs_##name##_write, \
1834 .read = iwl_dbgfs_##name##_read, \
1835 .open = simple_open, \
1836 .llseek = generic_file_llseek, \
1840 char __user *user_buf,
1841 size_t count, loff_t *ppos)
1853 bufsz =
sizeof(
char) * 64 * trans->
cfg->base_params->num_of_queues;
1855 if (!trans_pcie->
txq)
1862 for (cnt = 0; cnt < trans->
cfg->base_params->num_of_queues; cnt++) {
1863 txq = &trans_pcie->
txq[
cnt];
1865 pos +=
scnprintf(buf + pos, bufsz - pos,
1866 "hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1876 static ssize_t iwl_dbgfs_rx_queue_read(
struct file *file,
1877 char __user *user_buf,
1878 size_t count, loff_t *ppos)
1885 const size_t bufsz =
sizeof(
buf);
1887 pos +=
scnprintf(buf + pos, bufsz - pos,
"read: %u\n",
1889 pos +=
scnprintf(buf + pos, bufsz - pos,
"write: %u\n",
1891 pos +=
scnprintf(buf + pos, bufsz - pos,
"free_count: %u\n",
1894 pos +=
scnprintf(buf + pos, bufsz - pos,
"closed_rb_num: %u\n",
1897 pos +=
scnprintf(buf + pos, bufsz - pos,
1898 "closed_rb_num: Not Allocated\n");
1903 static ssize_t iwl_dbgfs_interrupt_read(
struct file *file,
1904 char __user *user_buf,
1905 size_t count, loff_t *ppos)
1913 int bufsz = 24 * 64;
1920 pos +=
scnprintf(buf + pos, bufsz - pos,
1921 "Interrupt Statistics Report:\n");
1923 pos +=
scnprintf(buf + pos, bufsz - pos,
"HW Error:\t\t\t %u\n",
1925 pos +=
scnprintf(buf + pos, bufsz - pos,
"SW Error:\t\t\t %u\n",
1927 if (isr_stats->
sw || isr_stats->
hw) {
1928 pos +=
scnprintf(buf + pos, bufsz - pos,
1929 "\tLast Restarting Code: 0x%X\n",
1932 #ifdef CONFIG_IWLWIFI_DEBUG
1933 pos +=
scnprintf(buf + pos, bufsz - pos,
"Frame transmitted:\t\t %u\n",
1935 pos +=
scnprintf(buf + pos, bufsz - pos,
"Alive interrupt:\t\t %u\n",
1938 pos +=
scnprintf(buf + pos, bufsz - pos,
1939 "HW RF KILL switch toggled:\t %u\n", isr_stats->
rfkill);
1941 pos +=
scnprintf(buf + pos, bufsz - pos,
"CT KILL:\t\t\t %u\n",
1944 pos +=
scnprintf(buf + pos, bufsz - pos,
"Wakeup Interrupt:\t\t %u\n",
1947 pos +=
scnprintf(buf + pos, bufsz - pos,
1948 "Rx command responses:\t\t %u\n", isr_stats->
rx);
1950 pos +=
scnprintf(buf + pos, bufsz - pos,
"Tx/FH interrupt:\t\t %u\n",
1953 pos +=
scnprintf(buf + pos, bufsz - pos,
"Unexpected INTA:\t\t %u\n",
1961 static ssize_t iwl_dbgfs_interrupt_write(
struct file *file,
1962 const char __user *user_buf,
1963 size_t count, loff_t *ppos)
1973 memset(buf, 0,
sizeof(buf));
1974 buf_size =
min(count,
sizeof(buf) - 1);
1977 if (
sscanf(buf,
"%x", &reset_flag) != 1)
1979 if (reset_flag == 0)
1980 memset(isr_stats, 0,
sizeof(*isr_stats));
1985 static ssize_t iwl_dbgfs_csr_write(
struct file *file,
1986 const char __user *user_buf,
1987 size_t count, loff_t *ppos)
1994 memset(buf, 0,
sizeof(buf));
1995 buf_size =
min(count,
sizeof(buf) - 1);
1998 if (
sscanf(buf,
"%d", &csr) != 1)
2006 static ssize_t iwl_dbgfs_fh_reg_read(
struct file *file,
2007 char __user *user_buf,
2008 size_t count, loff_t *ppos)
2018 count, ppos, buf, pos);
2025 static ssize_t iwl_dbgfs_fw_restart_write(
struct file *file,
2026 const char __user *user_buf,
2027 size_t count, loff_t *ppos)
2035 iwl_op_mode_nic_error(trans->
op_mode);
2052 static int iwl_trans_pcie_dbgfs_register(
struct iwl_trans *trans,
2064 IWL_ERR(trans,
"failed to create the trans debugfs entry\n");
2068 static int iwl_trans_pcie_dbgfs_register(
struct iwl_trans *trans,
2076 .start_hw = iwl_trans_pcie_start_hw,
2077 .stop_hw = iwl_trans_pcie_stop_hw,
2078 .fw_alive = iwl_trans_pcie_fw_alive,
2079 .start_fw = iwl_trans_pcie_start_fw,
2080 .stop_device = iwl_trans_pcie_stop_device,
2082 .wowlan_suspend = iwl_trans_pcie_wowlan_suspend,
2086 .tx = iwl_trans_pcie_tx,
2087 .reclaim = iwl_trans_pcie_reclaim,
2092 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2094 .wait_tx_queue_empty = iwl_trans_pcie_wait_tx_queue_empty,
2096 #ifdef CONFIG_PM_SLEEP
2097 .suspend = iwl_trans_pcie_suspend,
2098 .resume = iwl_trans_pcie_resume,
2100 .write8 = iwl_trans_pcie_write8,
2101 .write32 = iwl_trans_pcie_write32,
2102 .read32 = iwl_trans_pcie_read32,
2103 .configure = iwl_trans_pcie_configure,
2104 .set_pmi = iwl_trans_pcie_set_pmi,
2116 trans = kzalloc(
sizeof(
struct iwl_trans) +
2124 trans->
ops = &trans_ops_pcie;
2144 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(36));
2148 err = pci_set_consistent_dma_mask(pdev,
2153 "No suitable DMA available.\n");
2154 goto out_pci_disable_device;
2161 "pci_request_regions failed\n");
2162 goto out_pci_disable_device;
2167 dev_printk(
KERN_ERR, &pdev->
dev,
"pci_ioremap_bar failed\n");
2169 goto out_pci_release_regions;
2173 "pci_resource_len = 0x%08llx\n",
2176 "pci_resource_base = %p\n", trans_pcie->
hw_base);
2179 "HW Revision ID = 0x%X\n", pdev->
revision);
2185 err = pci_enable_msi(pdev);
2188 "pci_enable_msi failed(0X%x)\n", err);
2191 trans_pcie->
irq = pdev->
irq;
2200 pci_read_config_word(pdev,
PCI_COMMAND, &pci_cmd);
2202 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2203 pci_write_config_word(pdev,
PCI_COMMAND, pci_cmd);
2211 "iwl_cmd_pool:%s", dev_name(trans->
dev));
2223 goto out_pci_disable_msi;
2227 out_pci_disable_msi:
2229 out_pci_release_regions:
2231 out_pci_disable_device: