34 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/slab.h>
68 #define MTHCA_EQ_STATUS_OK ( 0 << 28)
69 #define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28)
70 #define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28)
71 #define MTHCA_EQ_OWNER_SW ( 0 << 24)
72 #define MTHCA_EQ_OWNER_HW ( 1 << 24)
73 #define MTHCA_EQ_FLAG_TR ( 1 << 18)
74 #define MTHCA_EQ_FLAG_OI ( 1 << 17)
75 #define MTHCA_EQ_STATE_ARMED ( 1 << 8)
76 #define MTHCA_EQ_STATE_FIRED ( 2 << 8)
77 #define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8)
78 #define MTHCA_EQ_STATE_ARBEL ( 8 << 8)
101 #define MTHCA_ASYNC_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_PATH_MIG) | \
102 (1ULL << MTHCA_EVENT_TYPE_COMM_EST) | \
103 (1ULL << MTHCA_EVENT_TYPE_SQ_DRAINED) | \
104 (1ULL << MTHCA_EVENT_TYPE_CQ_ERROR) | \
105 (1ULL << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \
106 (1ULL << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \
107 (1ULL << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \
108 (1ULL << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
109 (1ULL << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \
110 (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
111 (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
112 (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
113 #define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
114 (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
115 (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
116 #define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
118 #define MTHCA_EQ_DB_INC_CI (1 << 24)
119 #define MTHCA_EQ_DB_REQ_NOT (2 << 24)
120 #define MTHCA_EQ_DB_DISARM_CQ (3 << 24)
121 #define MTHCA_EQ_DB_SET_CI (4 << 24)
122 #define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24)
163 #define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7)
164 #define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7)
201 if (mthca_is_memfree(dev))
202 arbel_set_eq_ci(dev, eq, ci);
204 tavor_set_eq_ci(dev, eq, ci);
207 static inline void tavor_eq_req_not(
struct mthca_dev *dev,
int eqn)
214 static inline void arbel_eq_req_not(
struct mthca_dev *dev,
u32 eqn_mask)
219 static inline void disarm_cq(
struct mthca_dev *dev,
int eqn,
int cqn)
221 if (!mthca_is_memfree(dev)) {
241 static inline void set_eqe_hw(
struct mthca_eqe *eqe)
250 mthca_dbg(dev,
"Port change to %s for port %d\n",
251 active ?
"active" :
"down", port);
267 while ((eqe = next_eqe_sw(eq))) {
277 disarm_cq(dev, eq->
eqn, disarm_cqn);
329 eqe->
event.cmd.status,
341 eqe->
event.cq_err.syndrome == 1 ?
342 "overrun" :
"access violation",
357 mthca_warn(dev,
"Unhandled event %02x(%02x) on EQ %d\n",
391 static irqreturn_t mthca_tavor_interrupt(
int irq,
void *dev_ptr)
408 if (ecr & dev->
eq_table.eq[i].eqn_mask) {
409 if (mthca_eq_int(dev, &dev->
eq_table.eq[i]))
410 tavor_set_eq_ci(dev, &dev->
eq_table.eq[i],
412 tavor_eq_req_not(dev, dev->
eq_table.eq[i].eqn);
418 static irqreturn_t mthca_tavor_msi_x_interrupt(
int irq,
void *eq_ptr)
423 mthca_eq_int(dev, eq);
425 tavor_eq_req_not(dev, eq->
eqn);
431 static irqreturn_t mthca_arbel_interrupt(
int irq,
void *dev_ptr)
441 if (mthca_eq_int(dev, &dev->
eq_table.eq[i])) {
443 arbel_set_eq_ci(dev, &dev->
eq_table.eq[i],
447 arbel_eq_req_not(dev, dev->
eq_table.arm_mask);
452 static irqreturn_t mthca_arbel_msi_x_interrupt(
int irq,
void *eq_ptr)
457 mthca_eq_int(dev, eq);
459 arbel_eq_req_not(dev, eq->
eqn_mask);
465 static int mthca_create_eq(
struct mthca_dev *dev,
487 for (i = 0; i < npages; ++
i)
497 eq_context = mailbox->
buf;
499 for (i = 0; i < npages; ++
i) {
503 goto err_out_free_pages;
511 for (i = 0; i < eq->
nent; ++
i)
512 set_eqe_hw(get_eqe(eq, i));
516 goto err_out_free_pages;
525 goto err_out_free_eq;
527 memset(eq_context, 0,
sizeof *eq_context);
532 if (mthca_is_memfree(dev))
536 if (mthca_is_memfree(dev)) {
547 mthca_warn(dev,
"SW2HW_EQ returned %d\n", err);
548 goto err_out_free_mr;
559 mthca_dbg(dev,
"Allocated EQ %d with %d entries\n",
571 for (i = 0; i < npages; ++
i)
588 static void mthca_free_eq(
struct mthca_dev *dev,
603 mthca_warn(dev,
"HW2SW_EQ returned %d\n", err);
613 if ((i + 1) % 4 == 0)
619 for (i = 0; i < npages; ++
i)
628 static void mthca_free_irqs(
struct mthca_dev *dev)
642 static int mthca_map_reg(
struct mthca_dev *dev,
648 *map =
ioremap(base + offset, size);
655 static int mthca_map_eq_regs(
struct mthca_dev *dev)
657 if (mthca_is_memfree(dev)) {
668 mthca_err(dev,
"Couldn't map interrupt clear register, "
678 dev->
fw.
arbel.eq_arm_base) + 4, 4,
680 mthca_err(dev,
"Couldn't map EQ arm register, aborting.\n");
689 mthca_err(dev,
"Couldn't map EQ CI register, aborting.\n");
697 mthca_err(dev,
"Couldn't map interrupt clear register, "
705 mthca_err(dev,
"Couldn't map ecr register, "
716 static void mthca_unmap_eq_regs(
struct mthca_dev *dev)
718 if (mthca_is_memfree(dev)) {
744 if (pci_dma_mapping_error(dev->
pdev, dev->
eq_table.icm_dma)) {
776 dev->
limits.reserved_eqs);
780 err = mthca_map_eq_regs(dev);
790 (dev->
eq_table.inta_pin < 32 ? 4 : 0);
816 static const char *eq_name[] = {
825 "%s@pci:%s", eq_name[i],
826 pci_name(dev->
pdev));
828 mthca_is_memfree(dev) ?
829 mthca_arbel_msi_x_interrupt :
830 mthca_tavor_msi_x_interrupt,
841 mthca_is_memfree(dev) ?
842 mthca_arbel_interrupt :
843 mthca_tavor_interrupt,
853 mthca_warn(dev,
"MAP_EQ for async EQ %d failed (%d)\n",
859 mthca_warn(dev,
"MAP_EQ for cmd EQ %d failed (%d)\n",
863 if (mthca_is_memfree(dev))
864 arbel_eq_req_not(dev, dev->
eq_table.eq[i].eqn_mask);
866 tavor_eq_req_not(dev, dev->
eq_table.eq[i].eqn);
871 mthca_free_irqs(dev);
881 mthca_unmap_eq_regs(dev);
892 mthca_free_irqs(dev);
900 mthca_free_eq(dev, &dev->
eq_table.eq[i]);
902 mthca_unmap_eq_regs(dev);