36 #include <linux/slab.h>
37 #include <linux/export.h>
57 #define MLX4_EQ_STATUS_OK ( 0 << 28)
58 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
59 #define MLX4_EQ_OWNER_SW ( 0 << 24)
60 #define MLX4_EQ_OWNER_HW ( 1 << 24)
61 #define MLX4_EQ_FLAG_EC ( 1 << 18)
62 #define MLX4_EQ_FLAG_OI ( 1 << 17)
63 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
64 #define MLX4_EQ_STATE_FIRED (10 << 8)
65 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
67 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
68 (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
69 (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
70 (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
71 (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
72 (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
73 (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
74 (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
75 (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
76 (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
77 (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
78 (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
79 (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
80 (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
81 (1ull << MLX4_EVENT_TYPE_CMD) | \
82 (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
83 (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
84 (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
95 static void eq_set_ci(
struct mlx4_eq *eq,
int req_not)
120 return (!!(eqe->
owner & 0x80) ^
139 for (eqe = next_slave_event_eqe(slave_eq); eqe;
140 eqe = next_slave_event_eqe(slave_eq)) {
146 if (i != dev->
caps.function &&
151 "for slave %d\n", i);
155 mlx4_warn(dev,
"Failed to generate event "
156 "for slave %d\n", slave);
172 if ((!!(s_eqe->
owner & 0x80)) ^
174 mlx4_warn(dev,
"Master failed to generate an EQE for slave: %d. "
175 "No free EQE on slave events queue\n", slave);
176 spin_unlock_irqrestore(&slave_eq->
event_lock, flags);
188 &priv->
mfunc.master.slave_event_work);
189 spin_unlock_irqrestore(&slave_eq->
event_lock, flags);
192 static void mlx4_slave_event(
struct mlx4_dev *dev,
int slave,
197 &priv->
mfunc.master.slave_state[slave];
204 slave_event(dev, slave, eqe);
217 memset(&eqe, 0,
sizeof eqe);
234 memset(&eqe, 0,
sizeof eqe);
245 u8 port_subtype_change)
252 memset(&eqe, 0,
sizeof eqe);
255 eqe.
subtype = port_subtype_change;
258 mlx4_dbg(dev,
"%s: sending: %d to slave: %d on port: %d\n", __func__,
259 port_subtype_change, slave, port);
269 pr_err(
"%s: Error: asking for slave:%d, port:%d\n",
270 __func__, slave, port);
277 static int mlx4_set_slave_port_state(
struct mlx4_dev *dev,
int slave,
u8 port,
284 pr_err(
"%s: Error: asking for slave:%d, port:%d\n",
285 __func__, slave, port);
293 static void set_all_slave_state(
struct mlx4_dev *dev,
u8 port,
int event)
324 pr_err(
"%s: Error: asking for slave:%d, port:%d\n",
325 __func__, slave, port);
329 ctx = &priv->
mfunc.master.slave_state[slave];
335 mlx4_set_slave_port_state(dev, slave, port,
340 mlx4_set_slave_port_state(dev, slave, port,
343 mlx4_set_slave_port_state(dev, slave, port,
350 mlx4_set_slave_port_state(dev, slave, port,
355 mlx4_set_slave_port_state(dev, slave, port,
361 pr_err(
"%s: BUG!!! UNKNOWN state: "
362 "slave:%d, port:%d\n", __func__, slave, port);
368 spin_unlock_irqrestore(&ctx->
lock, flags);
378 memset(&eqe, 0,
sizeof eqe);
405 mlx4_dbg(dev,
"mlx4_handle_slave_flr\n");
410 mlx4_dbg(dev,
"mlx4_handle_slave_flr: "
411 "clean slave: %d\n", i);
415 spin_lock(&priv->
mfunc.master.slave_state_lock);
418 spin_unlock(&priv->
mfunc.master.slave_state_lock);
424 "FLR done (slave:%d)\n", i);
440 u8 update_slave_state;
444 while ((eqe = next_eqe_sw(eq))) {
466 if (mlx4_is_master(dev)) {
472 if (ret && ret != -
ENOENT) {
473 mlx4_dbg(dev,
"QP event %02x(%02x) on "
474 "EQ %d at index %u: could "
475 "not get slave id (%d)\n",
481 if (!ret && slave != dev->
caps.function) {
482 mlx4_slave_event(dev, slave, eqe);
488 0xffffff, eqe->
type);
492 mlx4_warn(dev,
"%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
495 if (mlx4_is_master(dev)) {
502 if (ret && ret != -
ENOENT) {
504 "on EQ %d at index %u: could"
505 " not get slave id (%d)\n",
510 mlx4_warn(dev,
"%s: slave:%d, srq_no:0x%x,"
511 " event: %02x(%02x)\n", __func__,
516 if (!ret && slave != dev->
caps.function) {
518 "%02x(%02x) to slave:%d\n",
521 mlx4_slave_event(dev, slave, eqe);
526 0xffffff, eqe->
type);
542 if (!mlx4_is_master(dev))
546 if (i == mlx4_master_func_num(dev))
548 mlx4_dbg(dev,
"%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
549 " to slave: %d, port:%d\n",
551 mlx4_slave_event(dev, i, eqe);
558 if (i == mlx4_master_func_num(dev))
560 mlx4_slave_event(dev, i, eqe);
569 if (!mlx4_is_master(dev))
573 if (i == mlx4_master_func_num(dev))
575 mlx4_slave_event(dev, i, eqe);
588 "overrun" :
"access violation",
590 if (mlx4_is_master(dev)) {
595 if (ret && ret != -
ENOENT) {
596 mlx4_dbg(dev,
"CQ event %02x(%02x) on "
597 "EQ %d at index %u: could "
598 "not get slave id (%d)\n",
604 if (!ret && slave != dev->
caps.function) {
605 mlx4_slave_event(dev, slave, eqe);
620 if (!mlx4_is_master(dev)) {
621 mlx4_warn(dev,
"Received comm channel event "
622 "for non master device\n");
629 &priv->
mfunc.master.comm_work);
634 if (!mlx4_is_master(dev)) {
635 mlx4_warn(dev,
"Non-master function received"
640 mlx4_dbg(dev,
"FLR event for slave: %d\n", flr_slave);
644 "Got FLR for unknown function: %d\n",
646 update_slave_state = 0;
648 update_slave_state = 1;
650 spin_lock(&priv->
mfunc.master.slave_state_lock);
651 if (update_slave_state) {
652 priv->
mfunc.master.slave_state[flr_slave].active =
false;
654 priv->
mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
656 spin_unlock(&priv->
mfunc.master.slave_state_lock);
658 &priv->
mfunc.master.slave_flr_event_work);
663 if (mlx4_is_master(dev))
666 "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
667 " to slave: %d\n", __func__, i);
668 if (i == dev->
caps.function)
670 mlx4_slave_event(dev, i, eqe);
672 mlx4_err(dev,
"Temperature Threshold was reached! "
673 "Threshold: %d celsius degrees; "
674 "Current Temperature: %d\n",
678 mlx4_warn(dev,
"Unhandled event FATAL WARNING (%02x), "
679 "subtype %02x on EQ %d at index %u. owner=%x, "
680 "nent=0x%x, slave=%x, ownership=%s\n",
684 !!(eqe->
owner & 0x80) ^
691 (
unsigned long) eqe);
697 mlx4_warn(dev,
"Unhandled event %02x(%02x) on EQ %d at "
698 "index %u. owner=%x, nent=0x%x, slave=%x, "
703 !!(eqe->
owner & 0x80) ^
730 static irqreturn_t mlx4_interrupt(
int irq,
void *dev_ptr)
739 for (i = 0; i < dev->
caps.num_comp_vectors + 1; ++
i)
740 work |= mlx4_eq_int(dev, &priv->
eq_table.eq[i]);
745 static irqreturn_t mlx4_msi_x_interrupt(
int irq,
void *eq_ptr)
750 mlx4_eq_int(dev, eq);
764 priv->
mfunc.master.slave_state[slave].event_eq;
766 u32
eqn = in_modifier & 0x1FF;
771 if (slave == dev->
caps.function)
772 err =
mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
777 if (in_param & (1
LL << i))
778 event_eq[
i].
eqn = in_modifier >> 31 ? -1 :
eqn;
786 return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
802 return mlx4_cmd_box(dev, 0, mailbox->
dma, eq_num,
807 static int mlx4_num_eq_uar(
struct mlx4_dev *dev)
814 return (dev->
caps.num_comp_vectors + 1 + dev->
caps.reserved_eqs +
815 dev->
caps.comp_pool)/4 - dev->
caps.reserved_eqs/4 + 1;
823 index = eq->
eqn / 4 - dev->
caps.reserved_eqs / 4;
825 if (!priv->
eq_table.uar_map[index]) {
830 if (!priv->
eq_table.uar_map[index]) {
831 mlx4_err(dev,
"Couldn't map EQ doorbell for EQN 0x%06x\n",
840 static void mlx4_unmap_uar(
struct mlx4_dev *dev)
845 for (i = 0; i < mlx4_num_eq_uar(dev); ++
i)
852 static int mlx4_create_eq(
struct mlx4_dev *dev,
int nent,
874 for (i = 0; i < npages; ++
i)
884 eq_context = mailbox->
buf;
886 for (i = 0; i < npages; ++
i) {
890 goto err_out_free_pages;
900 goto err_out_free_pages;
902 eq->
doorbell = mlx4_get_eq_uar(dev, eq);
905 goto err_out_free_eq;
910 goto err_out_free_eq;
914 goto err_out_free_mtt;
916 memset(eq_context, 0,
sizeof *eq_context);
927 err = mlx4_SW2HW_EQ(dev, mailbox, eq->
eqn);
929 mlx4_warn(dev,
"SW2HW_EQ failed (%d)\n", err);
930 goto err_out_free_mtt;
947 for (i = 0; i < npages; ++
i)
963 static void mlx4_free_eq(
struct mlx4_dev *dev,
976 err = mlx4_HW2SW_EQ(dev, mailbox, eq->
eqn);
978 mlx4_warn(dev,
"HW2SW_EQ failed (%d)\n", err);
981 mlx4_dbg(dev,
"Dumping EQ context %02x:\n", eq->
eqn);
986 if ((i + 1) % 4 == 0)
992 for (i = 0; i < npages; ++
i)
1002 static void mlx4_free_irqs(
struct mlx4_dev *dev)
1011 for (i = 0; i < dev->
caps.num_comp_vectors + 1; ++
i)
1012 if (eq_table->
eq[i].have_irq) {
1014 eq_table->
eq[
i].have_irq = 0;
1017 for (i = 0; i < dev->
caps.comp_pool; i++) {
1022 if (priv->
msix_ctl.pool_bm & 1ULL << i) {
1024 vec = dev->
caps.num_comp_vectors + 1 +
i;
1034 static int mlx4_map_clr_int(
struct mlx4_dev *dev)
1041 mlx4_err(dev,
"Couldn't map interrupt clear register, aborting.\n");
1048 static void mlx4_unmap_clr_int(
struct mlx4_dev *dev)
1078 priv->
eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
1087 dev->
caps.num_eqs - 1, dev->
caps.reserved_eqs, 0);
1091 for (i = 0; i < mlx4_num_eq_uar(dev); ++
i)
1094 if (!mlx4_is_slave(dev)) {
1095 err = mlx4_map_clr_int(dev);
1097 goto err_out_bitmap;
1102 (priv->
eq_table.inta_pin < 32 ? 4 : 0);
1107 dev->
caps.comp_pool),
1111 goto err_out_bitmap;
1114 for (i = 0; i < dev->
caps.num_comp_vectors; ++
i) {
1115 err = mlx4_create_eq(dev, dev->
caps.num_cqs -
1116 dev->
caps.reserved_cqs +
1133 for (i = dev->
caps.num_comp_vectors + 1;
1134 i < dev->
caps.num_comp_vectors + dev->
caps.comp_pool + 1; ++i) {
1136 err = mlx4_create_eq(dev, dev->
caps.num_cqs -
1137 dev->
caps.reserved_cqs +
1149 const char *eq_name;
1151 for (i = 0; i < dev->
caps.num_comp_vectors + 1; ++
i) {
1152 if (i < dev->
caps.num_comp_vectors) {
1156 "mlx4-comp-%d@pci:%s", i,
1157 pci_name(dev->
pdev));
1162 "mlx4-async@pci:%s",
1163 pci_name(dev->
pdev));
1166 eq_name = priv->
eq_table.irq_names +
1169 mlx4_msi_x_interrupt, 0, eq_name,
1180 pci_name(dev->
pdev));
1189 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1192 mlx4_warn(dev,
"MAP_EQ for async EQ %d failed (%d)\n",
1193 priv->
eq_table.eq[dev->
caps.num_comp_vectors].eqn, err);
1195 for (i = 0; i < dev->
caps.num_comp_vectors + 1; ++
i)
1196 eq_set_ci(&priv->
eq_table.eq[i], 1);
1201 mlx4_free_eq(dev, &priv->
eq_table.eq[dev->
caps.num_comp_vectors]);
1204 i = dev->
caps.num_comp_vectors - 1;
1208 mlx4_free_eq(dev, &priv->
eq_table.eq[i]);
1211 if (!mlx4_is_slave(dev))
1212 mlx4_unmap_clr_int(dev);
1213 mlx4_free_irqs(dev);
1216 mlx4_unmap_uar(dev);
1230 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
1233 mlx4_free_irqs(dev);
1235 for (i = 0; i < dev->
caps.num_comp_vectors + dev->
caps.comp_pool + 1; ++
i)
1236 mlx4_free_eq(dev, &priv->
eq_table.eq[i]);
1238 if (!mlx4_is_slave(dev))
1239 mlx4_unmap_clr_int(dev);
1241 mlx4_unmap_uar(dev);
1266 for(i = 0; !err && (i < dev->
caps.num_comp_vectors); ++
i) {
1271 err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1274 mlx4_warn(dev,
"Failed mapping eq for interrupt test\n");
1285 mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1296 int vec = 0, err = 0,
i;
1299 for (i = 0; !vec && i < dev->
caps.comp_pool; i++) {
1300 if (~priv->
msix_ctl.pool_bm & 1ULL << i) {
1302 vec = dev->
caps.num_comp_vectors + 1 +
i;
1305 MLX4_IRQNAME_SIZE,
"%s", name);
1306 #ifdef CONFIG_RFS_ACCEL
1308 err = irq_cpu_rmap_add(rmap,
1311 mlx4_warn(dev,
"Failed adding irq rmap\n");
1315 mlx4_msi_x_interrupt, 0,
1325 eq_set_ci(&priv->
eq_table.eq[vec], 1);
1334 err = (i == dev->
caps.comp_pool) ? -
ENOSPC : err;
1344 int i = vec - dev->
caps.num_comp_vectors - 1;
1350 if (priv->
msix_ctl.pool_bm & 1ULL << i) {