Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
eq.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/init.h>
35 #include <linux/interrupt.h>
36 #include <linux/slab.h>
37 #include <linux/export.h>
38 #include <linux/mm.h>
39 #include <linux/dma-mapping.h>
40 
41 #include <linux/mlx4/cmd.h>
42 #include <linux/cpu_rmap.h>
43 
44 #include "mlx4.h"
45 #include "fw.h"
46 
47 enum {
49 };
50 
51 enum {
55 };
56 
57 #define MLX4_EQ_STATUS_OK ( 0 << 28)
58 #define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
59 #define MLX4_EQ_OWNER_SW ( 0 << 24)
60 #define MLX4_EQ_OWNER_HW ( 1 << 24)
61 #define MLX4_EQ_FLAG_EC ( 1 << 18)
62 #define MLX4_EQ_FLAG_OI ( 1 << 17)
63 #define MLX4_EQ_STATE_ARMED ( 9 << 8)
64 #define MLX4_EQ_STATE_FIRED (10 << 8)
65 #define MLX4_EQ_STATE_ALWAYS_ARMED (11 << 8)
66 
67 #define MLX4_ASYNC_EVENT_MASK ((1ull << MLX4_EVENT_TYPE_PATH_MIG) | \
68  (1ull << MLX4_EVENT_TYPE_COMM_EST) | \
69  (1ull << MLX4_EVENT_TYPE_SQ_DRAINED) | \
70  (1ull << MLX4_EVENT_TYPE_CQ_ERROR) | \
71  (1ull << MLX4_EVENT_TYPE_WQ_CATAS_ERROR) | \
72  (1ull << MLX4_EVENT_TYPE_EEC_CATAS_ERROR) | \
73  (1ull << MLX4_EVENT_TYPE_PATH_MIG_FAILED) | \
74  (1ull << MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \
75  (1ull << MLX4_EVENT_TYPE_WQ_ACCESS_ERROR) | \
76  (1ull << MLX4_EVENT_TYPE_PORT_CHANGE) | \
77  (1ull << MLX4_EVENT_TYPE_ECC_DETECT) | \
78  (1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
79  (1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
80  (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
81  (1ull << MLX4_EVENT_TYPE_CMD) | \
82  (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
83  (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
84  (1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
85 
86 static u64 get_async_ev_mask(struct mlx4_dev *dev)
87 {
88  u64 async_ev_mask = MLX4_ASYNC_EVENT_MASK;
89  if (dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
90  async_ev_mask |= (1ull << MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT);
91 
92  return async_ev_mask;
93 }
94 
95 static void eq_set_ci(struct mlx4_eq *eq, int req_not)
96 {
97  __raw_writel((__force u32) cpu_to_be32((eq->cons_index & 0xffffff) |
98  req_not << 31),
99  eq->doorbell);
100  /* We still want ordering, just not swabbing, so add a barrier */
101  mb();
102 }
103 
104 static struct mlx4_eqe *get_eqe(struct mlx4_eq *eq, u32 entry)
105 {
106  unsigned long off = (entry & (eq->nent - 1)) * MLX4_EQ_ENTRY_SIZE;
107  return eq->page_list[off / PAGE_SIZE].buf + off % PAGE_SIZE;
108 }
109 
110 static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
111 {
112  struct mlx4_eqe *eqe = get_eqe(eq, eq->cons_index);
113  return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
114 }
115 
116 static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
117 {
118  struct mlx4_eqe *eqe =
119  &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
120  return (!!(eqe->owner & 0x80) ^
121  !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
122  eqe : NULL;
123 }
124 
126 {
127  struct mlx4_mfunc_master_ctx *master =
128  container_of(work, struct mlx4_mfunc_master_ctx,
130  struct mlx4_mfunc *mfunc =
131  container_of(master, struct mlx4_mfunc, master);
132  struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
133  struct mlx4_dev *dev = &priv->dev;
134  struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
135  struct mlx4_eqe *eqe;
136  u8 slave;
137  int i;
138 
139  for (eqe = next_slave_event_eqe(slave_eq); eqe;
140  eqe = next_slave_event_eqe(slave_eq)) {
141  slave = eqe->slave_id;
142 
143  /* All active slaves need to receive the event */
144  if (slave == ALL_SLAVES) {
145  for (i = 0; i < dev->num_slaves; i++) {
146  if (i != dev->caps.function &&
147  master->slave_state[i].active)
148  if (mlx4_GEN_EQE(dev, i, eqe))
149  mlx4_warn(dev, "Failed to "
150  " generate event "
151  "for slave %d\n", i);
152  }
153  } else {
154  if (mlx4_GEN_EQE(dev, slave, eqe))
155  mlx4_warn(dev, "Failed to generate event "
156  "for slave %d\n", slave);
157  }
158  ++slave_eq->cons;
159  }
160 }
161 
162 
163 static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
164 {
165  struct mlx4_priv *priv = mlx4_priv(dev);
166  struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
167  struct mlx4_eqe *s_eqe;
168  unsigned long flags;
169 
170  spin_lock_irqsave(&slave_eq->event_lock, flags);
171  s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
172  if ((!!(s_eqe->owner & 0x80)) ^
173  (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
174  mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
175  "No free EQE on slave events queue\n", slave);
176  spin_unlock_irqrestore(&slave_eq->event_lock, flags);
177  return;
178  }
179 
180  memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
181  s_eqe->slave_id = slave;
182  /* ensure all information is written before setting the ownersip bit */
183  wmb();
184  s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
185  ++slave_eq->prod;
186 
187  queue_work(priv->mfunc.master.comm_wq,
188  &priv->mfunc.master.slave_event_work);
189  spin_unlock_irqrestore(&slave_eq->event_lock, flags);
190 }
191 
192 static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
193  struct mlx4_eqe *eqe)
194 {
195  struct mlx4_priv *priv = mlx4_priv(dev);
196  struct mlx4_slave_state *s_slave =
197  &priv->mfunc.master.slave_state[slave];
198 
199  if (!s_slave->active) {
200  /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
201  return;
202  }
203 
204  slave_event(dev, slave, eqe);
205 }
206 
207 int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
208 {
209  struct mlx4_eqe eqe;
210 
211  struct mlx4_priv *priv = mlx4_priv(dev);
212  struct mlx4_slave_state *s_slave = &priv->mfunc.master.slave_state[slave];
213 
214  if (!s_slave->active)
215  return 0;
216 
217  memset(&eqe, 0, sizeof eqe);
218 
221  eqe.event.port_mgmt_change.port = port;
222 
223  return mlx4_GEN_EQE(dev, slave, &eqe);
224 }
226 
227 int mlx4_gen_guid_change_eqe(struct mlx4_dev *dev, int slave, u8 port)
228 {
229  struct mlx4_eqe eqe;
230 
231  /*don't send if we don't have the that slave */
232  if (dev->num_vfs < slave)
233  return 0;
234  memset(&eqe, 0, sizeof eqe);
235 
238  eqe.event.port_mgmt_change.port = port;
239 
240  return mlx4_GEN_EQE(dev, slave, &eqe);
241 }
243 
244 int mlx4_gen_port_state_change_eqe(struct mlx4_dev *dev, int slave, u8 port,
245  u8 port_subtype_change)
246 {
247  struct mlx4_eqe eqe;
248 
249  /*don't send if we don't have the that slave */
250  if (dev->num_vfs < slave)
251  return 0;
252  memset(&eqe, 0, sizeof eqe);
253 
255  eqe.subtype = port_subtype_change;
256  eqe.event.port_change.port = cpu_to_be32(port << 28);
257 
258  mlx4_dbg(dev, "%s: sending: %d to slave: %d on port: %d\n", __func__,
259  port_subtype_change, slave, port);
260  return mlx4_GEN_EQE(dev, slave, &eqe);
261 }
263 
265 {
266  struct mlx4_priv *priv = mlx4_priv(dev);
267  struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
268  if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS) {
269  pr_err("%s: Error: asking for slave:%d, port:%d\n",
270  __func__, slave, port);
271  return SLAVE_PORT_DOWN;
272  }
273  return s_state[slave].port_state[port];
274 }
276 
277 static int mlx4_set_slave_port_state(struct mlx4_dev *dev, int slave, u8 port,
278  enum slave_port_state state)
279 {
280  struct mlx4_priv *priv = mlx4_priv(dev);
281  struct mlx4_slave_state *s_state = priv->mfunc.master.slave_state;
282 
283  if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
284  pr_err("%s: Error: asking for slave:%d, port:%d\n",
285  __func__, slave, port);
286  return -1;
287  }
288  s_state[slave].port_state[port] = state;
289 
290  return 0;
291 }
292 
293 static void set_all_slave_state(struct mlx4_dev *dev, u8 port, int event)
294 {
295  int i;
296  enum slave_port_gen_event gen_event;
297 
298  for (i = 0; i < dev->num_slaves; i++)
299  set_and_calc_slave_port_state(dev, i, port, event, &gen_event);
300 }
301 /**************************************************************************
302  The function get as input the new event to that port,
303  and according to the prev state change the slave's port state.
304  The events are:
305  MLX4_PORT_STATE_DEV_EVENT_PORT_DOWN,
306  MLX4_PORT_STATE_DEV_EVENT_PORT_UP
307  MLX4_PORT_STATE_IB_EVENT_GID_VALID
308  MLX4_PORT_STATE_IB_EVENT_GID_INVALID
309 ***************************************************************************/
310 int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
311  u8 port, int event,
312  enum slave_port_gen_event *gen_event)
313 {
314  struct mlx4_priv *priv = mlx4_priv(dev);
315  struct mlx4_slave_state *ctx = NULL;
316  unsigned long flags;
317  int ret = -1;
318  enum slave_port_state cur_state =
319  mlx4_get_slave_port_state(dev, slave, port);
320 
321  *gen_event = SLAVE_PORT_GEN_EVENT_NONE;
322 
323  if (slave >= dev->num_slaves || port > MLX4_MAX_PORTS || port == 0) {
324  pr_err("%s: Error: asking for slave:%d, port:%d\n",
325  __func__, slave, port);
326  return ret;
327  }
328 
329  ctx = &priv->mfunc.master.slave_state[slave];
330  spin_lock_irqsave(&ctx->lock, flags);
331 
332  switch (cur_state) {
333  case SLAVE_PORT_DOWN:
335  mlx4_set_slave_port_state(dev, slave, port,
337  break;
338  case SLAVE_PENDING_UP:
340  mlx4_set_slave_port_state(dev, slave, port,
343  mlx4_set_slave_port_state(dev, slave, port,
344  SLAVE_PORT_UP);
345  *gen_event = SLAVE_PORT_GEN_EVENT_UP;
346  }
347  break;
348  case SLAVE_PORT_UP:
350  mlx4_set_slave_port_state(dev, slave, port,
352  *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
354  event) {
355  mlx4_set_slave_port_state(dev, slave, port,
357  *gen_event = SLAVE_PORT_GEN_EVENT_DOWN;
358  }
359  break;
360  default:
361  pr_err("%s: BUG!!! UNKNOWN state: "
362  "slave:%d, port:%d\n", __func__, slave, port);
363  goto out;
364  }
365  ret = mlx4_get_slave_port_state(dev, slave, port);
366 
367 out:
368  spin_unlock_irqrestore(&ctx->lock, flags);
369  return ret;
370 }
371 
373 
374 int mlx4_gen_slaves_port_mgt_ev(struct mlx4_dev *dev, u8 port, int attr)
375 {
376  struct mlx4_eqe eqe;
377 
378  memset(&eqe, 0, sizeof eqe);
379 
382  eqe.event.port_mgmt_change.port = port;
383  eqe.event.port_mgmt_change.params.port_info.changed_attr =
384  cpu_to_be32((u32) attr);
385 
386  slave_event(dev, ALL_SLAVES, &eqe);
387  return 0;
388 }
390 
392 {
393  struct mlx4_mfunc_master_ctx *master =
394  container_of(work, struct mlx4_mfunc_master_ctx,
396  struct mlx4_mfunc *mfunc =
397  container_of(master, struct mlx4_mfunc, master);
398  struct mlx4_priv *priv =
399  container_of(mfunc, struct mlx4_priv, mfunc);
400  struct mlx4_dev *dev = &priv->dev;
401  struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
402  int i;
403  int err;
404 
405  mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
406 
407  for (i = 0 ; i < dev->num_slaves; i++) {
408 
409  if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
410  mlx4_dbg(dev, "mlx4_handle_slave_flr: "
411  "clean slave: %d\n", i);
412 
414  /*return the slave to running mode*/
415  spin_lock(&priv->mfunc.master.slave_state_lock);
416  slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
417  slave_state[i].is_slave_going_down = 0;
418  spin_unlock(&priv->mfunc.master.slave_state_lock);
419  /*notify the FW:*/
420  err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
422  if (err)
423  mlx4_warn(dev, "Failed to notify FW on "
424  "FLR done (slave:%d)\n", i);
425  }
426  }
427 }
428 
429 static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
430 {
431  struct mlx4_priv *priv = mlx4_priv(dev);
432  struct mlx4_eqe *eqe;
433  int cqn;
434  int eqes_found = 0;
435  int set_ci = 0;
436  int port;
437  int slave = 0;
438  int ret;
439  u32 flr_slave;
440  u8 update_slave_state;
441  int i;
442  enum slave_port_gen_event gen_event;
443 
444  while ((eqe = next_eqe_sw(eq))) {
445  /*
446  * Make sure we read EQ entry contents after we've
447  * checked the ownership bit.
448  */
449  rmb();
450 
451  switch (eqe->type) {
453  cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff;
454  mlx4_cq_completion(dev, cqn);
455  break;
456 
465  mlx4_dbg(dev, "event %d arrived\n", eqe->type);
466  if (mlx4_is_master(dev)) {
467  /* forward only to slave owning the QP */
469  RES_QP,
470  be32_to_cpu(eqe->event.qp.qpn)
471  & 0xffffff, &slave);
472  if (ret && ret != -ENOENT) {
473  mlx4_dbg(dev, "QP event %02x(%02x) on "
474  "EQ %d at index %u: could "
475  "not get slave id (%d)\n",
476  eqe->type, eqe->subtype,
477  eq->eqn, eq->cons_index, ret);
478  break;
479  }
480 
481  if (!ret && slave != dev->caps.function) {
482  mlx4_slave_event(dev, slave, eqe);
483  break;
484  }
485 
486  }
487  mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
488  0xffffff, eqe->type);
489  break;
490 
492  mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
493  __func__);
495  if (mlx4_is_master(dev)) {
496  /* forward only to slave owning the SRQ */
498  RES_SRQ,
499  be32_to_cpu(eqe->event.srq.srqn)
500  & 0xffffff,
501  &slave);
502  if (ret && ret != -ENOENT) {
503  mlx4_warn(dev, "SRQ event %02x(%02x) "
504  "on EQ %d at index %u: could"
505  " not get slave id (%d)\n",
506  eqe->type, eqe->subtype,
507  eq->eqn, eq->cons_index, ret);
508  break;
509  }
510  mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
511  " event: %02x(%02x)\n", __func__,
512  slave,
513  be32_to_cpu(eqe->event.srq.srqn),
514  eqe->type, eqe->subtype);
515 
516  if (!ret && slave != dev->caps.function) {
517  mlx4_warn(dev, "%s: sending event "
518  "%02x(%02x) to slave:%d\n",
519  __func__, eqe->type,
520  eqe->subtype, slave);
521  mlx4_slave_event(dev, slave, eqe);
522  break;
523  }
524  }
525  mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
526  0xffffff, eqe->type);
527  break;
528 
529  case MLX4_EVENT_TYPE_CMD:
530  mlx4_cmd_event(dev,
531  be16_to_cpu(eqe->event.cmd.token),
532  eqe->event.cmd.status,
533  be64_to_cpu(eqe->event.cmd.out_param));
534  break;
535 
537  port = be32_to_cpu(eqe->event.port_change.port) >> 28;
540  port);
541  mlx4_priv(dev)->sense.do_sense_port[port] = 1;
542  if (!mlx4_is_master(dev))
543  break;
544  for (i = 0; i < dev->num_slaves; i++) {
545  if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
546  if (i == mlx4_master_func_num(dev))
547  continue;
548  mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
549  " to slave: %d, port:%d\n",
550  __func__, i, port);
551  mlx4_slave_event(dev, i, eqe);
552  } else { /* IB port */
553  set_and_calc_slave_port_state(dev, i, port,
555  &gen_event);
556  /*we can be in pending state, then do not send port_down event*/
557  if (SLAVE_PORT_GEN_EVENT_DOWN == gen_event) {
558  if (i == mlx4_master_func_num(dev))
559  continue;
560  mlx4_slave_event(dev, i, eqe);
561  }
562  }
563  }
564  } else {
566 
567  mlx4_priv(dev)->sense.do_sense_port[port] = 0;
568 
569  if (!mlx4_is_master(dev))
570  break;
571  if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
572  for (i = 0; i < dev->num_slaves; i++) {
573  if (i == mlx4_master_func_num(dev))
574  continue;
575  mlx4_slave_event(dev, i, eqe);
576  }
577  else /* IB port */
578  /* port-up event will be sent to a slave when the
579  * slave's alias-guid is set. This is done in alias_GUID.c
580  */
581  set_all_slave_state(dev, port, MLX4_DEV_EVENT_PORT_UP);
582  }
583  break;
584 
586  mlx4_warn(dev, "CQ %s on CQN %06x\n",
587  eqe->event.cq_err.syndrome == 1 ?
588  "overrun" : "access violation",
589  be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
590  if (mlx4_is_master(dev)) {
592  RES_CQ,
593  be32_to_cpu(eqe->event.cq_err.cqn)
594  & 0xffffff, &slave);
595  if (ret && ret != -ENOENT) {
596  mlx4_dbg(dev, "CQ event %02x(%02x) on "
597  "EQ %d at index %u: could "
598  "not get slave id (%d)\n",
599  eqe->type, eqe->subtype,
600  eq->eqn, eq->cons_index, ret);
601  break;
602  }
603 
604  if (!ret && slave != dev->caps.function) {
605  mlx4_slave_event(dev, slave, eqe);
606  break;
607  }
608  }
609  mlx4_cq_event(dev,
610  be32_to_cpu(eqe->event.cq_err.cqn)
611  & 0xffffff,
612  eqe->type);
613  break;
614 
616  mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
617  break;
618 
620  if (!mlx4_is_master(dev)) {
621  mlx4_warn(dev, "Received comm channel event "
622  "for non master device\n");
623  break;
624  }
625  memcpy(&priv->mfunc.master.comm_arm_bit_vector,
626  eqe->event.comm_channel_arm.bit_vec,
627  sizeof eqe->event.comm_channel_arm.bit_vec);
628  queue_work(priv->mfunc.master.comm_wq,
629  &priv->mfunc.master.comm_work);
630  break;
631 
633  flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
634  if (!mlx4_is_master(dev)) {
635  mlx4_warn(dev, "Non-master function received"
636  "FLR event\n");
637  break;
638  }
639 
640  mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
641 
642  if (flr_slave >= dev->num_slaves) {
643  mlx4_warn(dev,
644  "Got FLR for unknown function: %d\n",
645  flr_slave);
646  update_slave_state = 0;
647  } else
648  update_slave_state = 1;
649 
650  spin_lock(&priv->mfunc.master.slave_state_lock);
651  if (update_slave_state) {
652  priv->mfunc.master.slave_state[flr_slave].active = false;
653  priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
654  priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
655  }
656  spin_unlock(&priv->mfunc.master.slave_state_lock);
657  queue_work(priv->mfunc.master.comm_wq,
658  &priv->mfunc.master.slave_flr_event_work);
659  break;
660 
663  if (mlx4_is_master(dev))
664  for (i = 0; i < dev->num_slaves; i++) {
665  mlx4_dbg(dev, "%s: Sending "
666  "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
667  " to slave: %d\n", __func__, i);
668  if (i == dev->caps.function)
669  continue;
670  mlx4_slave_event(dev, i, eqe);
671  }
672  mlx4_err(dev, "Temperature Threshold was reached! "
673  "Threshold: %d celsius degrees; "
674  "Current Temperature: %d\n",
675  be16_to_cpu(eqe->event.warming.warning_threshold),
676  be16_to_cpu(eqe->event.warming.current_temperature));
677  } else
678  mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
679  "subtype %02x on EQ %d at index %u. owner=%x, "
680  "nent=0x%x, slave=%x, ownership=%s\n",
681  eqe->type, eqe->subtype, eq->eqn,
682  eq->cons_index, eqe->owner, eq->nent,
683  eqe->slave_id,
684  !!(eqe->owner & 0x80) ^
685  !!(eq->cons_index & eq->nent) ? "HW" : "SW");
686 
687  break;
688 
691  (unsigned long) eqe);
692  break;
693 
696  default:
697  mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
698  "index %u. owner=%x, nent=0x%x, slave=%x, "
699  "ownership=%s\n",
700  eqe->type, eqe->subtype, eq->eqn,
701  eq->cons_index, eqe->owner, eq->nent,
702  eqe->slave_id,
703  !!(eqe->owner & 0x80) ^
704  !!(eq->cons_index & eq->nent) ? "HW" : "SW");
705  break;
706  };
707 
708  ++eq->cons_index;
709  eqes_found = 1;
710  ++set_ci;
711 
712  /*
713  * The HCA will think the queue has overflowed if we
714  * don't tell it we've been processing events. We
715  * create our EQs with MLX4_NUM_SPARE_EQE extra
716  * entries, so we must update our consumer index at
717  * least that often.
718  */
719  if (unlikely(set_ci >= MLX4_NUM_SPARE_EQE)) {
720  eq_set_ci(eq, 0);
721  set_ci = 0;
722  }
723  }
724 
725  eq_set_ci(eq, 1);
726 
727  return eqes_found;
728 }
729 
730 static irqreturn_t mlx4_interrupt(int irq, void *dev_ptr)
731 {
732  struct mlx4_dev *dev = dev_ptr;
733  struct mlx4_priv *priv = mlx4_priv(dev);
734  int work = 0;
735  int i;
736 
737  writel(priv->eq_table.clr_mask, priv->eq_table.clr_int);
738 
739  for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
740  work |= mlx4_eq_int(dev, &priv->eq_table.eq[i]);
741 
742  return IRQ_RETVAL(work);
743 }
744 
745 static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
746 {
747  struct mlx4_eq *eq = eq_ptr;
748  struct mlx4_dev *dev = eq->dev;
749 
750  mlx4_eq_int(dev, eq);
751 
752  /* MSI-X vectors always belong to us */
753  return IRQ_HANDLED;
754 }
755 
756 int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
757  struct mlx4_vhcr *vhcr,
758  struct mlx4_cmd_mailbox *inbox,
759  struct mlx4_cmd_mailbox *outbox,
760  struct mlx4_cmd_info *cmd)
761 {
762  struct mlx4_priv *priv = mlx4_priv(dev);
763  struct mlx4_slave_event_eq_info *event_eq =
764  priv->mfunc.master.slave_state[slave].event_eq;
765  u32 in_modifier = vhcr->in_modifier;
766  u32 eqn = in_modifier & 0x1FF;
767  u64 in_param = vhcr->in_param;
768  int err = 0;
769  int i;
770 
771  if (slave == dev->caps.function)
772  err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
775  if (!err)
776  for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
777  if (in_param & (1LL << i))
778  event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
779 
780  return err;
781 }
782 
783 static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
784  int eq_num)
785 {
786  return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
789 }
790 
791 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
792  int eq_num)
793 {
794  return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
797 }
798 
799 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
800  int eq_num)
801 {
802  return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
805 }
806 
807 static int mlx4_num_eq_uar(struct mlx4_dev *dev)
808 {
809  /*
810  * Each UAR holds 4 EQ doorbells. To figure out how many UARs
811  * we need to map, take the difference of highest index and
812  * the lowest index we'll use and add 1.
813  */
814  return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
815  dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
816 }
817 
818 static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
819 {
820  struct mlx4_priv *priv = mlx4_priv(dev);
821  int index;
822 
823  index = eq->eqn / 4 - dev->caps.reserved_eqs / 4;
824 
825  if (!priv->eq_table.uar_map[index]) {
826  priv->eq_table.uar_map[index] =
827  ioremap(pci_resource_start(dev->pdev, 2) +
828  ((eq->eqn / 4) << PAGE_SHIFT),
829  PAGE_SIZE);
830  if (!priv->eq_table.uar_map[index]) {
831  mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
832  eq->eqn);
833  return NULL;
834  }
835  }
836 
837  return priv->eq_table.uar_map[index] + 0x800 + 8 * (eq->eqn % 4);
838 }
839 
840 static void mlx4_unmap_uar(struct mlx4_dev *dev)
841 {
842  struct mlx4_priv *priv = mlx4_priv(dev);
843  int i;
844 
845  for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
846  if (priv->eq_table.uar_map[i]) {
847  iounmap(priv->eq_table.uar_map[i]);
848  priv->eq_table.uar_map[i] = NULL;
849  }
850 }
851 
852 static int mlx4_create_eq(struct mlx4_dev *dev, int nent,
853  u8 intr, struct mlx4_eq *eq)
854 {
855  struct mlx4_priv *priv = mlx4_priv(dev);
856  struct mlx4_cmd_mailbox *mailbox;
857  struct mlx4_eq_context *eq_context;
858  int npages;
859  u64 *dma_list = NULL;
860  dma_addr_t t;
861  u64 mtt_addr;
862  int err = -ENOMEM;
863  int i;
864 
865  eq->dev = dev;
866  eq->nent = roundup_pow_of_two(max(nent, 2));
867  npages = PAGE_ALIGN(eq->nent * MLX4_EQ_ENTRY_SIZE) / PAGE_SIZE;
868 
869  eq->page_list = kmalloc(npages * sizeof *eq->page_list,
870  GFP_KERNEL);
871  if (!eq->page_list)
872  goto err_out;
873 
874  for (i = 0; i < npages; ++i)
875  eq->page_list[i].buf = NULL;
876 
877  dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
878  if (!dma_list)
879  goto err_out_free;
880 
881  mailbox = mlx4_alloc_cmd_mailbox(dev);
882  if (IS_ERR(mailbox))
883  goto err_out_free;
884  eq_context = mailbox->buf;
885 
886  for (i = 0; i < npages; ++i) {
887  eq->page_list[i].buf = dma_alloc_coherent(&dev->pdev->dev,
888  PAGE_SIZE, &t, GFP_KERNEL);
889  if (!eq->page_list[i].buf)
890  goto err_out_free_pages;
891 
892  dma_list[i] = t;
893  eq->page_list[i].map = t;
894 
895  memset(eq->page_list[i].buf, 0, PAGE_SIZE);
896  }
897 
898  eq->eqn = mlx4_bitmap_alloc(&priv->eq_table.bitmap);
899  if (eq->eqn == -1)
900  goto err_out_free_pages;
901 
902  eq->doorbell = mlx4_get_eq_uar(dev, eq);
903  if (!eq->doorbell) {
904  err = -ENOMEM;
905  goto err_out_free_eq;
906  }
907 
908  err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt);
909  if (err)
910  goto err_out_free_eq;
911 
912  err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list);
913  if (err)
914  goto err_out_free_mtt;
915 
916  memset(eq_context, 0, sizeof *eq_context);
917  eq_context->flags = cpu_to_be32(MLX4_EQ_STATUS_OK |
919  eq_context->log_eq_size = ilog2(eq->nent);
920  eq_context->intr = intr;
922 
923  mtt_addr = mlx4_mtt_addr(dev, &eq->mtt);
924  eq_context->mtt_base_addr_h = mtt_addr >> 32;
925  eq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
926 
927  err = mlx4_SW2HW_EQ(dev, mailbox, eq->eqn);
928  if (err) {
929  mlx4_warn(dev, "SW2HW_EQ failed (%d)\n", err);
930  goto err_out_free_mtt;
931  }
932 
933  kfree(dma_list);
934  mlx4_free_cmd_mailbox(dev, mailbox);
935 
936  eq->cons_index = 0;
937 
938  return err;
939 
940 err_out_free_mtt:
941  mlx4_mtt_cleanup(dev, &eq->mtt);
942 
943 err_out_free_eq:
944  mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
945 
946 err_out_free_pages:
947  for (i = 0; i < npages; ++i)
948  if (eq->page_list[i].buf)
949  dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
950  eq->page_list[i].buf,
951  eq->page_list[i].map);
952 
953  mlx4_free_cmd_mailbox(dev, mailbox);
954 
955 err_out_free:
956  kfree(eq->page_list);
957  kfree(dma_list);
958 
959 err_out:
960  return err;
961 }
962 
963 static void mlx4_free_eq(struct mlx4_dev *dev,
964  struct mlx4_eq *eq)
965 {
966  struct mlx4_priv *priv = mlx4_priv(dev);
967  struct mlx4_cmd_mailbox *mailbox;
968  int err;
969  int npages = PAGE_ALIGN(MLX4_EQ_ENTRY_SIZE * eq->nent) / PAGE_SIZE;
970  int i;
971 
972  mailbox = mlx4_alloc_cmd_mailbox(dev);
973  if (IS_ERR(mailbox))
974  return;
975 
976  err = mlx4_HW2SW_EQ(dev, mailbox, eq->eqn);
977  if (err)
978  mlx4_warn(dev, "HW2SW_EQ failed (%d)\n", err);
979 
980  if (0) {
981  mlx4_dbg(dev, "Dumping EQ context %02x:\n", eq->eqn);
982  for (i = 0; i < sizeof (struct mlx4_eq_context) / 4; ++i) {
983  if (i % 4 == 0)
984  pr_cont("[%02x] ", i * 4);
985  pr_cont(" %08x", be32_to_cpup(mailbox->buf + i * 4));
986  if ((i + 1) % 4 == 0)
987  pr_cont("\n");
988  }
989  }
990 
991  mlx4_mtt_cleanup(dev, &eq->mtt);
992  for (i = 0; i < npages; ++i)
993  dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
994  eq->page_list[i].buf,
995  eq->page_list[i].map);
996 
997  kfree(eq->page_list);
998  mlx4_bitmap_free(&priv->eq_table.bitmap, eq->eqn);
999  mlx4_free_cmd_mailbox(dev, mailbox);
1000 }
1001 
1002 static void mlx4_free_irqs(struct mlx4_dev *dev)
1003 {
1004  struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
1005  struct mlx4_priv *priv = mlx4_priv(dev);
1006  int i, vec;
1007 
1008  if (eq_table->have_irq)
1009  free_irq(dev->pdev->irq, dev);
1010 
1011  for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1012  if (eq_table->eq[i].have_irq) {
1013  free_irq(eq_table->eq[i].irq, eq_table->eq + i);
1014  eq_table->eq[i].have_irq = 0;
1015  }
1016 
1017  for (i = 0; i < dev->caps.comp_pool; i++) {
1018  /*
1019  * Freeing the assigned irq's
1020  * all bits should be 0, but we need to validate
1021  */
1022  if (priv->msix_ctl.pool_bm & 1ULL << i) {
1023  /* NO need protecting*/
1024  vec = dev->caps.num_comp_vectors + 1 + i;
1025  free_irq(priv->eq_table.eq[vec].irq,
1026  &priv->eq_table.eq[vec]);
1027  }
1028  }
1029 
1030 
1031  kfree(eq_table->irq_names);
1032 }
1033 
1034 static int mlx4_map_clr_int(struct mlx4_dev *dev)
1035 {
1036  struct mlx4_priv *priv = mlx4_priv(dev);
1037 
1038  priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
1039  priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
1040  if (!priv->clr_base) {
1041  mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
1042  return -ENOMEM;
1043  }
1044 
1045  return 0;
1046 }
1047 
1048 static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1049 {
1050  struct mlx4_priv *priv = mlx4_priv(dev);
1051 
1052  iounmap(priv->clr_base);
1053 }
1054 
1056 {
1057  struct mlx4_priv *priv = mlx4_priv(dev);
1058 
1059  priv->eq_table.eq = kcalloc(dev->caps.num_eqs - dev->caps.reserved_eqs,
1060  sizeof *priv->eq_table.eq, GFP_KERNEL);
1061  if (!priv->eq_table.eq)
1062  return -ENOMEM;
1063 
1064  return 0;
1065 }
1066 
1067 void mlx4_free_eq_table(struct mlx4_dev *dev)
1068 {
1069  kfree(mlx4_priv(dev)->eq_table.eq);
1070 }
1071 
1073 {
1074  struct mlx4_priv *priv = mlx4_priv(dev);
1075  int err;
1076  int i;
1077 
1078  priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
1079  sizeof *priv->eq_table.uar_map,
1080  GFP_KERNEL);
1081  if (!priv->eq_table.uar_map) {
1082  err = -ENOMEM;
1083  goto err_out_free;
1084  }
1085 
1086  err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
1087  dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
1088  if (err)
1089  goto err_out_free;
1090 
1091  for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
1092  priv->eq_table.uar_map[i] = NULL;
1093 
1094  if (!mlx4_is_slave(dev)) {
1095  err = mlx4_map_clr_int(dev);
1096  if (err)
1097  goto err_out_bitmap;
1098 
1099  priv->eq_table.clr_mask =
1100  swab32(1 << (priv->eq_table.inta_pin & 31));
1101  priv->eq_table.clr_int = priv->clr_base +
1102  (priv->eq_table.inta_pin < 32 ? 4 : 0);
1103  }
1104 
1105  priv->eq_table.irq_names =
1106  kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
1107  dev->caps.comp_pool),
1108  GFP_KERNEL);
1109  if (!priv->eq_table.irq_names) {
1110  err = -ENOMEM;
1111  goto err_out_bitmap;
1112  }
1113 
1114  for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
1115  err = mlx4_create_eq(dev, dev->caps.num_cqs -
1116  dev->caps.reserved_cqs +
1118  (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
1119  &priv->eq_table.eq[i]);
1120  if (err) {
1121  --i;
1122  goto err_out_unmap;
1123  }
1124  }
1125 
1126  err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
1127  (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
1128  &priv->eq_table.eq[dev->caps.num_comp_vectors]);
1129  if (err)
1130  goto err_out_comp;
1131 
1132  /*if additional completion vectors poolsize is 0 this loop will not run*/
1133  for (i = dev->caps.num_comp_vectors + 1;
1134  i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
1135 
1136  err = mlx4_create_eq(dev, dev->caps.num_cqs -
1137  dev->caps.reserved_cqs +
1139  (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
1140  &priv->eq_table.eq[i]);
1141  if (err) {
1142  --i;
1143  goto err_out_unmap;
1144  }
1145  }
1146 
1147 
1148  if (dev->flags & MLX4_FLAG_MSI_X) {
1149  const char *eq_name;
1150 
1151  for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
1152  if (i < dev->caps.num_comp_vectors) {
1153  snprintf(priv->eq_table.irq_names +
1154  i * MLX4_IRQNAME_SIZE,
1155  MLX4_IRQNAME_SIZE,
1156  "mlx4-comp-%d@pci:%s", i,
1157  pci_name(dev->pdev));
1158  } else {
1159  snprintf(priv->eq_table.irq_names +
1160  i * MLX4_IRQNAME_SIZE,
1161  MLX4_IRQNAME_SIZE,
1162  "mlx4-async@pci:%s",
1163  pci_name(dev->pdev));
1164  }
1165 
1166  eq_name = priv->eq_table.irq_names +
1167  i * MLX4_IRQNAME_SIZE;
1168  err = request_irq(priv->eq_table.eq[i].irq,
1169  mlx4_msi_x_interrupt, 0, eq_name,
1170  priv->eq_table.eq + i);
1171  if (err)
1172  goto err_out_async;
1173 
1174  priv->eq_table.eq[i].have_irq = 1;
1175  }
1176  } else {
1177  snprintf(priv->eq_table.irq_names,
1179  DRV_NAME "@pci:%s",
1180  pci_name(dev->pdev));
1181  err = request_irq(dev->pdev->irq, mlx4_interrupt,
1182  IRQF_SHARED, priv->eq_table.irq_names, dev);
1183  if (err)
1184  goto err_out_async;
1185 
1186  priv->eq_table.have_irq = 1;
1187  }
1188 
1189  err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1190  priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1191  if (err)
1192  mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
1193  priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
1194 
1195  for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
1196  eq_set_ci(&priv->eq_table.eq[i], 1);
1197 
1198  return 0;
1199 
1200 err_out_async:
1201  mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
1202 
1203 err_out_comp:
1204  i = dev->caps.num_comp_vectors - 1;
1205 
1206 err_out_unmap:
1207  while (i >= 0) {
1208  mlx4_free_eq(dev, &priv->eq_table.eq[i]);
1209  --i;
1210  }
1211  if (!mlx4_is_slave(dev))
1212  mlx4_unmap_clr_int(dev);
1213  mlx4_free_irqs(dev);
1214 
1215 err_out_bitmap:
1216  mlx4_unmap_uar(dev);
1217  mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1218 
1219 err_out_free:
1220  kfree(priv->eq_table.uar_map);
1221 
1222  return err;
1223 }
1224 
1226 {
1227  struct mlx4_priv *priv = mlx4_priv(dev);
1228  int i;
1229 
1230  mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
1231  priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1232 
1233  mlx4_free_irqs(dev);
1234 
1235  for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
1236  mlx4_free_eq(dev, &priv->eq_table.eq[i]);
1237 
1238  if (!mlx4_is_slave(dev))
1239  mlx4_unmap_clr_int(dev);
1240 
1241  mlx4_unmap_uar(dev);
1242  mlx4_bitmap_cleanup(&priv->eq_table.bitmap);
1243 
1244  kfree(priv->eq_table.uar_map);
1245 }
1246 
1247 /* A test that verifies that we can accept interrupts on all
1248  * the irq vectors of the device.
1249  * Interrupts are checked using the NOP command.
1250  */
1252 {
1253  struct mlx4_priv *priv = mlx4_priv(dev);
1254  int i;
1255  int err;
1256 
1257  err = mlx4_NOP(dev);
1258  /* When not in MSI_X, there is only one irq to check */
1259  if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
1260  return err;
1261 
1262  /* A loop over all completion vectors, for each vector we will check
1263  * whether it works by mapping command completions to that vector
1264  * and performing a NOP command
1265  */
1266  for(i = 0; !err && (i < dev->caps.num_comp_vectors); ++i) {
1267  /* Temporary use polling for command completions */
1268  mlx4_cmd_use_polling(dev);
1269 
1270  /* Map the new eq to handle all asyncronous events */
1271  err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1272  priv->eq_table.eq[i].eqn);
1273  if (err) {
1274  mlx4_warn(dev, "Failed mapping eq for interrupt test\n");
1275  mlx4_cmd_use_events(dev);
1276  break;
1277  }
1278 
1279  /* Go back to using events */
1280  mlx4_cmd_use_events(dev);
1281  err = mlx4_NOP(dev);
1282  }
1283 
1284  /* Return to default */
1285  mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
1286  priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
1287  return err;
1288 }
1290 
1291 int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1292  int *vector)
1293 {
1294 
1295  struct mlx4_priv *priv = mlx4_priv(dev);
1296  int vec = 0, err = 0, i;
1297 
1298  mutex_lock(&priv->msix_ctl.pool_lock);
1299  for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
1300  if (~priv->msix_ctl.pool_bm & 1ULL << i) {
1301  priv->msix_ctl.pool_bm |= 1ULL << i;
1302  vec = dev->caps.num_comp_vectors + 1 + i;
1303  snprintf(priv->eq_table.irq_names +
1304  vec * MLX4_IRQNAME_SIZE,
1305  MLX4_IRQNAME_SIZE, "%s", name);
1306 #ifdef CONFIG_RFS_ACCEL
1307  if (rmap) {
1308  err = irq_cpu_rmap_add(rmap,
1309  priv->eq_table.eq[vec].irq);
1310  if (err)
1311  mlx4_warn(dev, "Failed adding irq rmap\n");
1312  }
1313 #endif
1314  err = request_irq(priv->eq_table.eq[vec].irq,
1315  mlx4_msi_x_interrupt, 0,
1316  &priv->eq_table.irq_names[vec<<5],
1317  priv->eq_table.eq + vec);
1318  if (err) {
1319  /*zero out bit by fliping it*/
1320  priv->msix_ctl.pool_bm ^= 1 << i;
1321  vec = 0;
1322  continue;
1323  /*we dont want to break here*/
1324  }
1325  eq_set_ci(&priv->eq_table.eq[vec], 1);
1326  }
1327  }
1328  mutex_unlock(&priv->msix_ctl.pool_lock);
1329 
1330  if (vec) {
1331  *vector = vec;
1332  } else {
1333  *vector = 0;
1334  err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
1335  }
1336  return err;
1337 }
1339 
1340 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1341 {
1342  struct mlx4_priv *priv = mlx4_priv(dev);
1343  /*bm index*/
1344  int i = vec - dev->caps.num_comp_vectors - 1;
1345 
1346  if (likely(i >= 0)) {
1347  /*sanity check , making sure were not trying to free irq's
1348  Belonging to a legacy EQ*/
1349  mutex_lock(&priv->msix_ctl.pool_lock);
1350  if (priv->msix_ctl.pool_bm & 1ULL << i) {
1351  free_irq(priv->eq_table.eq[vec].irq,
1352  &priv->eq_table.eq[vec]);
1353  priv->msix_ctl.pool_bm &= ~(1ULL << i);
1354  }
1355  mutex_unlock(&priv->msix_ctl.pool_lock);
1356  }
1357 
1358 }
1360