Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ocrdma_hw.c
Go to the documentation of this file.
1 /*******************************************************************
2  * This file is part of the Emulex RoCE Device Driver for *
3  * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
4  * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5  * EMULEX and SLI are trademarks of Emulex. *
6  * www.emulex.com *
7  * *
8  * This program is free software; you can redistribute it and/or *
9  * modify it under the terms of version 2 of the GNU General *
10  * Public License as published by the Free Software Foundation. *
11  * This program is distributed in the hope that it will be useful. *
12  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16  * TO BE LEGALLY INVALID. See the GNU General Public License for *
17  * more details, a copy of which can be found in the file COPYING *
18  * included with this package. *
19  *
20  * Contact Information:
22  *
23  * Emulex
24  * 3333 Susan Street
25  * Costa Mesa, CA 92626
26  *******************************************************************/
27 
28 #include <linux/sched.h>
29 #include <linux/interrupt.h>
30 #include <linux/log2.h>
31 #include <linux/dma-mapping.h>
32 
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_user_verbs.h>
35 #include <rdma/ib_addr.h>
36 
37 #include "ocrdma.h"
38 #include "ocrdma_hw.h"
39 #include "ocrdma_verbs.h"
40 #include "ocrdma_ah.h"
41 
42 enum mbx_status {
81 };
82 
85 };
86 
87 enum cqe_status {
93 };
94 
95 static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
96 {
97  return (u8 *)eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
98 }
99 
100 static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
101 {
102  eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
103 }
104 
105 static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
106 {
107  struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
108  ((u8 *) dev->mq.cq.va +
109  (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
110 
112  return NULL;
113  return cqe;
114 }
115 
116 static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
117 {
118  dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
119 }
120 
121 static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
122 {
123  return (struct ocrdma_mqe *)((u8 *) dev->mq.sq.va +
124  (dev->mq.sq.head *
125  sizeof(struct ocrdma_mqe)));
126 }
127 
128 static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
129 {
130  dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
131  atomic_inc(&dev->mq.sq.used);
132 }
133 
134 static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
135 {
136  return (void *)((u8 *) dev->mq.sq.va +
137  (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)));
138 }
139 
141 {
142  switch (qps) {
143  case OCRDMA_QPS_RST:
144  return IB_QPS_RESET;
145  case OCRDMA_QPS_INIT:
146  return IB_QPS_INIT;
147  case OCRDMA_QPS_RTR:
148  return IB_QPS_RTR;
149  case OCRDMA_QPS_RTS:
150  return IB_QPS_RTS;
151  case OCRDMA_QPS_SQD:
153  return IB_QPS_SQD;
154  case OCRDMA_QPS_SQE:
155  return IB_QPS_SQE;
156  case OCRDMA_QPS_ERR:
157  return IB_QPS_ERR;
158  };
159  return IB_QPS_ERR;
160 }
161 
162 static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
163 {
164  switch (qps) {
165  case IB_QPS_RESET:
166  return OCRDMA_QPS_RST;
167  case IB_QPS_INIT:
168  return OCRDMA_QPS_INIT;
169  case IB_QPS_RTR:
170  return OCRDMA_QPS_RTR;
171  case IB_QPS_RTS:
172  return OCRDMA_QPS_RTS;
173  case IB_QPS_SQD:
174  return OCRDMA_QPS_SQD;
175  case IB_QPS_SQE:
176  return OCRDMA_QPS_SQE;
177  case IB_QPS_ERR:
178  return OCRDMA_QPS_ERR;
179  };
180  return OCRDMA_QPS_ERR;
181 }
182 
183 static int ocrdma_get_mbx_errno(u32 status)
184 {
185  int err_num = -EFAULT;
186  u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
188  u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
190 
191  switch (mbox_status) {
194  err_num = -EAGAIN;
195  break;
196 
217  err_num = -EINVAL;
218  break;
219 
224  err_num = -EBUSY;
225  break;
226 
236  err_num = -ENOBUFS;
237  break;
238 
240  switch (add_status) {
242  err_num = -EAGAIN;
243  break;
244  }
245  default:
246  err_num = -EFAULT;
247  }
248  return err_num;
249 }
250 
251 static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
252 {
253  int err_num = -EINVAL;
254 
255  switch (cqe_status) {
257  err_num = -EPERM;
258  break;
260  err_num = -EINVAL;
261  break;
264  err_num = -EAGAIN;
265  break;
267  err_num = -EIO;
268  break;
269  }
270  return err_num;
271 }
272 
273 void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
274  bool solicited, u16 cqe_popped)
275 {
277 
278  val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
280 
281  if (armed)
282  val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
283  if (solicited)
284  val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
285  val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
286  iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
287 }
288 
289 static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
290 {
291  u32 val = 0;
292 
293  val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
294  val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
295  iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
296 }
297 
298 static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
299  bool arm, bool clear_int, u16 num_eqe)
300 {
301  u32 val = 0;
302 
303  val |= eq_id & OCRDMA_EQ_ID_MASK;
305  if (arm)
306  val |= (1 << OCRDMA_REARM_SHIFT);
307  if (clear_int)
308  val |= (1 << OCRDMA_EQ_CLR_SHIFT);
309  val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
310  val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
311  iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
312 }
313 
314 static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
315  u8 opcode, u8 subsys, u32 cmd_len)
316 {
317  cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
318  cmd_hdr->timeout = 20; /* seconds */
319  cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
320 }
321 
322 static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
323 {
324  struct ocrdma_mqe *mqe;
325 
326  mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
327  if (!mqe)
328  return NULL;
329  mqe->hdr.spcl_sge_cnt_emb |=
332  mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
333 
334  ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
335  mqe->hdr.pyld_len);
336  return mqe;
337 }
338 
339 static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
340 {
341  dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
342 }
343 
344 static int ocrdma_alloc_q(struct ocrdma_dev *dev,
345  struct ocrdma_queue_info *q, u16 len, u16 entry_size)
346 {
347  memset(q, 0, sizeof(*q));
348  q->len = len;
349  q->entry_size = entry_size;
350  q->size = len * entry_size;
351  q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
352  &q->dma, GFP_KERNEL);
353  if (!q->va)
354  return -ENOMEM;
355  memset(q->va, 0, q->size);
356  return 0;
357 }
358 
359 static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
360  dma_addr_t host_pa, int hw_page_size)
361 {
362  int i;
363 
364  for (i = 0; i < cnt; i++) {
365  q_pa[i].lo = (u32) (host_pa & 0xffffffff);
366  q_pa[i].hi = (u32) upper_32_bits(host_pa);
367  host_pa += hw_page_size;
368  }
369 }
370 
371 static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev,
372  struct ocrdma_eq *eq)
373 {
374  /* assign vector and update vector id for next EQ */
375  eq->vector = dev->nic_info.msix.start_vector;
376  dev->nic_info.msix.start_vector += 1;
377 }
378 
379 static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev)
380 {
381  /* this assumes that EQs are freed in exactly reverse order
382  * as its allocation.
383  */
384  dev->nic_info.msix.start_vector -= 1;
385 }
386 
387 static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
388  int queue_type)
389 {
390  u8 opcode = 0;
391  int status;
392  struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
393 
394  switch (queue_type) {
395  case QTYPE_MCCQ:
396  opcode = OCRDMA_CMD_DELETE_MQ;
397  break;
398  case QTYPE_CQ:
399  opcode = OCRDMA_CMD_DELETE_CQ;
400  break;
401  case QTYPE_EQ:
402  opcode = OCRDMA_CMD_DELETE_EQ;
403  break;
404  default:
405  BUG();
406  }
407  memset(cmd, 0, sizeof(*cmd));
408  ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
409  cmd->id = q->id;
410 
411  status = be_roce_mcc_cmd(dev->nic_info.netdev,
412  cmd, sizeof(*cmd), NULL, NULL);
413  if (!status)
414  q->created = false;
415  return status;
416 }
417 
418 static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
419 {
420  int status;
421  struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
422  struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
423 
424  memset(cmd, 0, sizeof(*cmd));
425  ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
426  sizeof(*cmd));
427  if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
428  cmd->req.rsvd_version = 0;
429  else
430  cmd->req.rsvd_version = 2;
431 
432  cmd->num_pages = 4;
434  cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
435 
436  ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
437  PAGE_SIZE_4K);
438  status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
439  NULL);
440  if (!status) {
441  eq->q.id = rsp->vector_eqid & 0xffff;
442  if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
443  ocrdma_assign_eq_vect_gen2(dev, eq);
444  else {
445  eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
446  dev->nic_info.msix.start_vector += 1;
447  }
448  eq->q.created = true;
449  }
450  return status;
451 }
452 
453 static int ocrdma_create_eq(struct ocrdma_dev *dev,
454  struct ocrdma_eq *eq, u16 q_len)
455 {
456  int status;
457 
458  status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
459  sizeof(struct ocrdma_eqe));
460  if (status)
461  return status;
462 
463  status = ocrdma_mbx_create_eq(dev, eq);
464  if (status)
465  goto mbx_err;
466  eq->dev = dev;
467  ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
468 
469  return 0;
470 mbx_err:
471  ocrdma_free_q(dev, &eq->q);
472  return status;
473 }
474 
475 static int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
476 {
477  int irq;
478 
479  if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
480  irq = dev->nic_info.pdev->irq;
481  else
482  irq = dev->nic_info.msix.vector_list[eq->vector];
483  return irq;
484 }
485 
486 static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
487 {
488  if (eq->q.created) {
489  ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
490  if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
491  ocrdma_free_eq_vect_gen2(dev);
492  ocrdma_free_q(dev, &eq->q);
493  }
494 }
495 
496 static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
497 {
498  int irq;
499 
500  /* disarm EQ so that interrupts are not generated
501  * during freeing and EQ delete is in progress.
502  */
503  ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
504 
505  irq = ocrdma_get_irq(dev, eq);
506  free_irq(irq, eq);
507  _ocrdma_destroy_eq(dev, eq);
508 }
509 
510 static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev)
511 {
512  int i;
513 
514  /* deallocate the data path eqs */
515  for (i = 0; i < dev->eq_cnt; i++)
516  ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
517 }
518 
519 static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
520  struct ocrdma_queue_info *cq,
521  struct ocrdma_queue_info *eq)
522 {
523  struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
524  struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
525  int status;
526 
527  memset(cmd, 0, sizeof(*cmd));
528  ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
529  OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
530 
531  cmd->pgsz_pgcnt = PAGES_4K_SPANNED(cq->va, cq->size);
533  cmd->eqn = (eq->id << OCRDMA_CREATE_CQ_EQID_SHIFT);
534 
535  ocrdma_build_q_pages(&cmd->pa[0], cmd->pgsz_pgcnt,
536  cq->dma, PAGE_SIZE_4K);
537  status = be_roce_mcc_cmd(dev->nic_info.netdev,
538  cmd, sizeof(*cmd), NULL, NULL);
539  if (!status) {
541  cq->created = true;
542  }
543  return status;
544 }
545 
546 static u32 ocrdma_encoded_q_len(int q_len)
547 {
548  u32 len_encoded = fls(q_len); /* log2(len) + 1 */
549 
550  if (len_encoded == 16)
551  len_encoded = 0;
552  return len_encoded;
553 }
554 
555 static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
556  struct ocrdma_queue_info *mq,
557  struct ocrdma_queue_info *cq)
558 {
559  int num_pages, status;
560  struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
561  struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
562  struct ocrdma_pa *pa;
563 
564  memset(cmd, 0, sizeof(*cmd));
565  num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
566 
567  if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
568  ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ,
569  OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
570  cmd->v0.pages = num_pages;
571  cmd->v0.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
572  cmd->v0.async_cqid_valid = (cq->id << 1);
573  cmd->v0.cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
575  cmd->v0.cqid_ringsize |=
577  cmd->v0.valid = OCRDMA_CREATE_MQ_VALID;
578  pa = &cmd->v0.pa[0];
579  } else {
580  ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
581  OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
582  cmd->req.rsvd_version = 1;
583  cmd->v1.cqid_pages = num_pages;
584  cmd->v1.cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
585  cmd->v1.async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
586  cmd->v1.async_event_bitmap = Bit(20);
587  cmd->v1.async_cqid_ringsize = cq->id;
588  cmd->v1.async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
590  cmd->v1.valid = OCRDMA_CREATE_MQ_VALID;
591  pa = &cmd->v1.pa[0];
592  }
593  ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
594  status = be_roce_mcc_cmd(dev->nic_info.netdev,
595  cmd, sizeof(*cmd), NULL, NULL);
596  if (!status) {
597  mq->id = rsp->id;
598  mq->created = true;
599  }
600  return status;
601 }
602 
603 static int ocrdma_create_mq(struct ocrdma_dev *dev)
604 {
605  int status;
606 
607  /* Alloc completion queue for Mailbox queue */
608  status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
609  sizeof(struct ocrdma_mcqe));
610  if (status)
611  goto alloc_err;
612 
613  status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q);
614  if (status)
615  goto mbx_cq_free;
616 
617  memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
618  init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
619  mutex_init(&dev->mqe_ctx.lock);
620 
621  /* Alloc Mailbox queue */
622  status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
623  sizeof(struct ocrdma_mqe));
624  if (status)
625  goto mbx_cq_destroy;
626  status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
627  if (status)
628  goto mbx_q_free;
629  ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
630  return 0;
631 
632 mbx_q_free:
633  ocrdma_free_q(dev, &dev->mq.sq);
634 mbx_cq_destroy:
635  ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
636 mbx_cq_free:
637  ocrdma_free_q(dev, &dev->mq.cq);
638 alloc_err:
639  return status;
640 }
641 
642 static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
643 {
644  struct ocrdma_queue_info *mbxq, *cq;
645 
646  /* mqe_ctx lock synchronizes with any other pending cmds. */
647  mutex_lock(&dev->mqe_ctx.lock);
648  mbxq = &dev->mq.sq;
649  if (mbxq->created) {
650  ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
651  ocrdma_free_q(dev, mbxq);
652  }
653  mutex_unlock(&dev->mqe_ctx.lock);
654 
655  cq = &dev->mq.cq;
656  if (cq->created) {
657  ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
658  ocrdma_free_q(dev, cq);
659  }
660 }
661 
662 static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
663  struct ocrdma_qp *qp)
664 {
665  enum ib_qp_state new_ib_qps = IB_QPS_ERR;
666  enum ib_qp_state old_ib_qps;
667 
668  if (qp == NULL)
669  BUG();
670  ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps);
671 }
672 
673 static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
674  struct ocrdma_ae_mcqe *cqe)
675 {
676  struct ocrdma_qp *qp = NULL;
677  struct ocrdma_cq *cq = NULL;
678  struct ib_event ib_evt;
679  int cq_event = 0;
680  int qp_event = 1;
681  int srq_event = 0;
682  int dev_event = 0;
685 
687  qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK];
689  cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
690 
691  ib_evt.device = &dev->ibdev;
692 
693  switch (type) {
694  case OCRDMA_CQ_ERROR:
695  ib_evt.element.cq = &cq->ibcq;
696  ib_evt.event = IB_EVENT_CQ_ERR;
697  cq_event = 1;
698  qp_event = 0;
699  break;
701  ib_evt.element.cq = &cq->ibcq;
702  ib_evt.event = IB_EVENT_CQ_ERR;
703  break;
705  ib_evt.element.qp = &qp->ibqp;
706  ib_evt.event = IB_EVENT_QP_FATAL;
707  ocrdma_process_qpcat_error(dev, qp);
708  break;
710  ib_evt.element.qp = &qp->ibqp;
711  ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
712  break;
714  ib_evt.element.qp = &qp->ibqp;
715  ib_evt.event = IB_EVENT_COMM_EST;
716  break;
718  ib_evt.element.qp = &qp->ibqp;
719  ib_evt.event = IB_EVENT_SQ_DRAINED;
720  break;
722  ib_evt.element.port_num = 1;
723  ib_evt.event = IB_EVENT_DEVICE_FATAL;
724  qp_event = 0;
725  dev_event = 1;
726  break;
727  case OCRDMA_SRQCAT_ERROR:
728  ib_evt.element.srq = &qp->srq->ibsrq;
729  ib_evt.event = IB_EVENT_SRQ_ERR;
730  srq_event = 1;
731  qp_event = 0;
732  break;
734  ib_evt.element.srq = &qp->srq->ibsrq;
735  ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
736  srq_event = 1;
737  qp_event = 0;
738  break;
740  ib_evt.element.qp = &qp->ibqp;
741  ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
742  break;
743  default:
744  cq_event = 0;
745  qp_event = 0;
746  srq_event = 0;
747  dev_event = 0;
748  ocrdma_err("%s() unknown type=0x%x\n", __func__, type);
749  break;
750  }
751 
752  if (qp_event) {
753  if (qp->ibqp.event_handler)
754  qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
755  } else if (cq_event) {
756  if (cq->ibcq.event_handler)
757  cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
758  } else if (srq_event) {
759  if (qp->srq->ibsrq.event_handler)
760  qp->srq->ibsrq.event_handler(&ib_evt,
761  qp->srq->ibsrq.
762  srq_context);
763  } else if (dev_event)
764  ib_dispatch_event(&ib_evt);
765 
766 }
767 
768 static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
769 {
770  /* async CQE processing */
771  struct ocrdma_ae_mcqe *cqe = ae_cqe;
772  u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
774 
775  if (evt_code == OCRDMA_ASYNC_EVE_CODE)
776  ocrdma_dispatch_ibevent(dev, cqe);
777  else
778  ocrdma_err("%s(%d) invalid evt code=0x%x\n",
779  __func__, dev->id, evt_code);
780 }
781 
782 static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
783 {
784  if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
785  dev->mqe_ctx.cqe_status = (cqe->status &
787  dev->mqe_ctx.ext_status =
790  dev->mqe_ctx.cmd_done = true;
791  wake_up(&dev->mqe_ctx.cmd_wait);
792  } else
793  ocrdma_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
794  __func__, cqe->tag_lo, dev->mqe_ctx.tag);
795 }
796 
797 static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
798 {
799  u16 cqe_popped = 0;
800  struct ocrdma_mcqe *cqe;
801 
802  while (1) {
803  cqe = ocrdma_get_mcqe(dev);
804  if (cqe == NULL)
805  break;
806  ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
807  cqe_popped += 1;
809  ocrdma_process_acqe(dev, cqe);
811  ocrdma_process_mcqe(dev, cqe);
812  else
813  ocrdma_err("%s() cqe->compl is not set.\n", __func__);
814  memset(cqe, 0, sizeof(struct ocrdma_mcqe));
815  ocrdma_mcq_inc_tail(dev);
816  }
817  ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
818  return 0;
819 }
820 
821 static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
822  struct ocrdma_cq *cq)
823 {
824  unsigned long flags;
825  struct ocrdma_qp *qp;
826  bool buddy_cq_found = false;
827  /* Go through list of QPs in error state which are using this CQ
828  * and invoke its callback handler to trigger CQE processing for
829  * error/flushed CQE. It is rare to find more than few entries in
830  * this list as most consumers stops after getting error CQE.
831  * List is traversed only once when a matching buddy cq found for a QP.
832  */
833  spin_lock_irqsave(&dev->flush_q_lock, flags);
834  list_for_each_entry(qp, &cq->sq_head, sq_entry) {
835  if (qp->srq)
836  continue;
837  /* if wq and rq share the same cq, than comp_handler
838  * is already invoked.
839  */
840  if (qp->sq_cq == qp->rq_cq)
841  continue;
842  /* if completion came on sq, rq's cq is buddy cq.
843  * if completion came on rq, sq's cq is buddy cq.
844  */
845  if (qp->sq_cq == cq)
846  cq = qp->rq_cq;
847  else
848  cq = qp->sq_cq;
849  buddy_cq_found = true;
850  break;
851  }
852  spin_unlock_irqrestore(&dev->flush_q_lock, flags);
853  if (buddy_cq_found == false)
854  return;
855  if (cq->ibcq.comp_handler) {
856  spin_lock_irqsave(&cq->comp_handler_lock, flags);
857  (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
858  spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
859  }
860 }
861 
862 static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
863 {
864  unsigned long flags;
865  struct ocrdma_cq *cq;
866 
867  if (cq_idx >= OCRDMA_MAX_CQ)
868  BUG();
869 
870  cq = dev->cq_tbl[cq_idx];
871  if (cq == NULL) {
872  ocrdma_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx);
873  return;
874  }
875  spin_lock_irqsave(&cq->cq_lock, flags);
876  cq->armed = false;
877  cq->solicited = false;
878  spin_unlock_irqrestore(&cq->cq_lock, flags);
879 
880  ocrdma_ring_cq_db(dev, cq->id, false, false, 0);
881 
882  if (cq->ibcq.comp_handler) {
883  spin_lock_irqsave(&cq->comp_handler_lock, flags);
884  (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
885  spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
886  }
887  ocrdma_qp_buddy_cq_handler(dev, cq);
888 }
889 
890 static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
891 {
892  /* process the MQ-CQE. */
893  if (cq_id == dev->mq.cq.id)
894  ocrdma_mq_cq_handler(dev, cq_id);
895  else
896  ocrdma_qp_cq_handler(dev, cq_id);
897 }
898 
899 static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
900 {
901  struct ocrdma_eq *eq = handle;
902  struct ocrdma_dev *dev = eq->dev;
903  struct ocrdma_eqe eqe;
904  struct ocrdma_eqe *ptr;
905  u16 eqe_popped = 0;
906  u16 cq_id;
907  while (1) {
908  ptr = ocrdma_get_eqe(eq);
909  eqe = *ptr;
910  ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
911  if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
912  break;
913  eqe_popped += 1;
914  ptr->id_valid = 0;
915  /* check whether its CQE or not. */
916  if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
917  cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
918  ocrdma_cq_handler(dev, cq_id);
919  }
920  ocrdma_eq_inc_tail(eq);
921  }
922  ocrdma_ring_eq_db(dev, eq->q.id, true, true, eqe_popped);
923  /* Ring EQ doorbell with num_popped to 0 to enable interrupts again. */
924  if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
925  ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
926  return IRQ_HANDLED;
927 }
928 
929 static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
930 {
931  struct ocrdma_mqe *mqe;
932 
933  dev->mqe_ctx.tag = dev->mq.sq.head;
934  dev->mqe_ctx.cmd_done = false;
935  mqe = ocrdma_get_mqe(dev);
936  cmd->hdr.tag_lo = dev->mq.sq.head;
937  ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
938  /* make sure descriptor is written before ringing doorbell */
939  wmb();
940  ocrdma_mq_inc_head(dev);
941  ocrdma_ring_mq_db(dev);
942 }
943 
944 static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
945 {
946  long status;
947  /* 30 sec timeout */
948  status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
949  (dev->mqe_ctx.cmd_done != false),
950  msecs_to_jiffies(30000));
951  if (status)
952  return 0;
953  else
954  return -1;
955 }
956 
957 /* issue a mailbox command on the MQ */
958 static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
959 {
960  int status = 0;
961  u16 cqe_status, ext_status;
962  struct ocrdma_mqe *rsp;
963 
964  mutex_lock(&dev->mqe_ctx.lock);
965  ocrdma_post_mqe(dev, mqe);
966  status = ocrdma_wait_mqe_cmpl(dev);
967  if (status)
968  goto mbx_err;
969  cqe_status = dev->mqe_ctx.cqe_status;
970  ext_status = dev->mqe_ctx.ext_status;
971  rsp = ocrdma_get_mqe_rsp(dev);
972  ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
973  if (cqe_status || ext_status) {
974  ocrdma_err
975  ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
976  __func__,
977  (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
978  OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
979  status = ocrdma_get_mbx_cqe_errno(cqe_status);
980  goto mbx_err;
981  }
982  if (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK)
983  status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
984 mbx_err:
985  mutex_unlock(&dev->mqe_ctx.lock);
986  return status;
987 }
988 
989 static void ocrdma_get_attr(struct ocrdma_dev *dev,
990  struct ocrdma_dev_attr *attr,
991  struct ocrdma_mbx_query_config *rsp)
992 {
993  attr->max_pd =
996  attr->max_qp =
999  attr->max_send_sge = ((rsp->max_write_send_sge &
1002  attr->max_recv_sge = (rsp->max_write_send_sge &
1005  attr->max_srq_sge = (rsp->max_srq_rqe_sge &
1008  attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1011  attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
1014  attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
1017  attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
1020  attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
1023  attr->max_mr = rsp->max_mr;
1024  attr->max_mr_size = ~0ull;
1025  attr->max_fmr = 0;
1027  attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
1028  attr->max_cqe = rsp->max_cq_cqes_per_cq &
1030  attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1034  attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1038  attr->max_inline_data =
1039  attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1041  if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1042  attr->ird = 1;
1045  }
1046  dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1048  dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1050 }
1051 
1052 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
1053  struct ocrdma_fw_conf_rsp *conf)
1054 {
1055  u32 fn_mode;
1056 
1057  fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
1058  if (fn_mode != OCRDMA_FN_MODE_RDMA)
1059  return -EINVAL;
1060  dev->base_eqid = conf->base_eqid;
1061  dev->max_eq = conf->max_eq;
1062  dev->attr.max_cq = OCRDMA_MAX_CQ - 1;
1063  return 0;
1064 }
1065 
1066 /* can be issued only during init time. */
1067 static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
1068 {
1069  int status = -ENOMEM;
1070  struct ocrdma_mqe *cmd;
1071  struct ocrdma_fw_ver_rsp *rsp;
1072 
1073  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
1074  if (!cmd)
1075  return -ENOMEM;
1076  ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1078  OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1079 
1080  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1081  if (status)
1082  goto mbx_err;
1083  rsp = (struct ocrdma_fw_ver_rsp *)cmd;
1084  memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
1085  memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
1086  sizeof(rsp->running_ver));
1087  ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
1088 mbx_err:
1089  kfree(cmd);
1090  return status;
1091 }
1092 
1093 /* can be issued only during init time. */
1094 static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
1095 {
1096  int status = -ENOMEM;
1097  struct ocrdma_mqe *cmd;
1098  struct ocrdma_fw_conf_rsp *rsp;
1099 
1100  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
1101  if (!cmd)
1102  return -ENOMEM;
1103  ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1105  OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1106  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1107  if (status)
1108  goto mbx_err;
1109  rsp = (struct ocrdma_fw_conf_rsp *)cmd;
1110  status = ocrdma_check_fw_config(dev, rsp);
1111 mbx_err:
1112  kfree(cmd);
1113  return status;
1114 }
1115 
1116 static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1117 {
1118  int status = -ENOMEM;
1119  struct ocrdma_mbx_query_config *rsp;
1120  struct ocrdma_mqe *cmd;
1121 
1122  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
1123  if (!cmd)
1124  return status;
1125  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1126  if (status)
1127  goto mbx_err;
1128  rsp = (struct ocrdma_mbx_query_config *)cmd;
1129  ocrdma_get_attr(dev, &dev->attr, rsp);
1130 mbx_err:
1131  kfree(cmd);
1132  return status;
1133 }
1134 
1135 int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1136 {
1137  int status = -ENOMEM;
1138  struct ocrdma_alloc_pd *cmd;
1139  struct ocrdma_alloc_pd_rsp *rsp;
1140 
1141  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
1142  if (!cmd)
1143  return status;
1144  if (pd->dpp_enabled)
1146  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1147  if (status)
1148  goto mbx_err;
1149  rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
1152  pd->dpp_enabled = true;
1153  pd->dpp_page = rsp->dpp_page_pdid >>
1155  } else {
1156  pd->dpp_enabled = false;
1157  pd->num_dpp_qp = 0;
1158  }
1159 mbx_err:
1160  kfree(cmd);
1161  return status;
1162 }
1163 
1164 int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1165 {
1166  int status = -ENOMEM;
1167  struct ocrdma_dealloc_pd *cmd;
1168 
1169  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
1170  if (!cmd)
1171  return status;
1172  cmd->id = pd->id;
1173  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1174  kfree(cmd);
1175  return status;
1176 }
1177 
1178 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1179  int *num_pages, int *page_size)
1180 {
1181  int i;
1182  int mem_size;
1183 
1184  *num_entries = roundup_pow_of_two(*num_entries);
1185  mem_size = *num_entries * entry_size;
1186  /* find the possible lowest possible multiplier */
1187  for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1188  if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
1189  break;
1190  }
1191  if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1192  return -EINVAL;
1193  mem_size = roundup(mem_size,
1195  *num_pages =
1196  mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1197  *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1198  *num_entries = mem_size / entry_size;
1199  return 0;
1200 }
1201 
1202 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
1203 {
1204  int i ;
1205  int status = 0;
1206  int max_ah;
1207  struct ocrdma_create_ah_tbl *cmd;
1208  struct ocrdma_create_ah_tbl_rsp *rsp;
1209  struct pci_dev *pdev = dev->nic_info.pdev;
1210  dma_addr_t pa;
1211  struct ocrdma_pbe *pbes;
1212 
1213  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
1214  if (!cmd)
1215  return status;
1216 
1217  max_ah = OCRDMA_MAX_AH;
1218  dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
1219 
1220  /* number of PBEs in PBL */
1221  cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
1224 
1225  /* page size */
1226  for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1227  if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
1228  break;
1229  }
1230  cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
1232 
1233  /* ah_entry size */
1234  cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
1237 
1238  dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1239  &dev->av_tbl.pbl.pa,
1240  GFP_KERNEL);
1241  if (dev->av_tbl.pbl.va == NULL)
1242  goto mem_err;
1243 
1244  dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
1245  &pa, GFP_KERNEL);
1246  if (dev->av_tbl.va == NULL)
1247  goto mem_err_ah;
1248  dev->av_tbl.pa = pa;
1249  dev->av_tbl.num_ah = max_ah;
1250  memset(dev->av_tbl.va, 0, dev->av_tbl.size);
1251 
1252  pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
1253  for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
1254  pbes[i].pa_lo = (u32) (pa & 0xffffffff);
1255  pbes[i].pa_hi = (u32) upper_32_bits(pa);
1256  pa += PAGE_SIZE;
1257  }
1258  cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
1259  cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
1260  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1261  if (status)
1262  goto mbx_err;
1263  rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
1264  dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
1265  kfree(cmd);
1266  return 0;
1267 
1268 mbx_err:
1269  dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1270  dev->av_tbl.pa);
1271  dev->av_tbl.va = NULL;
1272 mem_err_ah:
1273  dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1274  dev->av_tbl.pbl.pa);
1275  dev->av_tbl.pbl.va = NULL;
1276  dev->av_tbl.size = 0;
1277 mem_err:
1278  kfree(cmd);
1279  return status;
1280 }
1281 
1282 static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
1283 {
1284  struct ocrdma_delete_ah_tbl *cmd;
1285  struct pci_dev *pdev = dev->nic_info.pdev;
1286 
1287  if (dev->av_tbl.va == NULL)
1288  return;
1289 
1290  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
1291  if (!cmd)
1292  return;
1293  cmd->ahid = dev->av_tbl.ahid;
1294 
1295  ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1296  dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1297  dev->av_tbl.pa);
1298  dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1299  dev->av_tbl.pbl.pa);
1300  kfree(cmd);
1301 }
1302 
1303 /* Multiple CQs uses the EQ. This routine returns least used
1304  * EQ to associate with CQ. This will distributes the interrupt
1305  * processing and CPU load to associated EQ, vector and so to that CPU.
1306  */
1307 static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
1308 {
1309  int i, selected_eq = 0, cq_cnt = 0;
1310  u16 eq_id;
1311 
1312  mutex_lock(&dev->dev_lock);
1313  cq_cnt = dev->qp_eq_tbl[0].cq_cnt;
1314  eq_id = dev->qp_eq_tbl[0].q.id;
1315  /* find the EQ which is has the least number of
1316  * CQs associated with it.
1317  */
1318  for (i = 0; i < dev->eq_cnt; i++) {
1319  if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) {
1320  cq_cnt = dev->qp_eq_tbl[i].cq_cnt;
1321  eq_id = dev->qp_eq_tbl[i].q.id;
1322  selected_eq = i;
1323  }
1324  }
1325  dev->qp_eq_tbl[selected_eq].cq_cnt += 1;
1326  mutex_unlock(&dev->dev_lock);
1327  return eq_id;
1328 }
1329 
1330 static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
1331 {
1332  int i;
1333 
1334  mutex_lock(&dev->dev_lock);
1335  for (i = 0; i < dev->eq_cnt; i++) {
1336  if (dev->qp_eq_tbl[i].q.id != eq_id)
1337  continue;
1338  dev->qp_eq_tbl[i].cq_cnt -= 1;
1339  break;
1340  }
1341  mutex_unlock(&dev->dev_lock);
1342 }
1343 
1344 int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1345  int entries, int dpp_cq)
1346 {
1347  int status = -ENOMEM; int max_hw_cqe;
1348  struct pci_dev *pdev = dev->nic_info.pdev;
1349  struct ocrdma_create_cq *cmd;
1350  struct ocrdma_create_cq_rsp *rsp;
1351  u32 hw_pages, cqe_size, page_size, cqe_count;
1352 
1353  if (dpp_cq)
1354  return -EINVAL;
1355  if (entries > dev->attr.max_cqe) {
1356  ocrdma_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1357  __func__, dev->id, dev->attr.max_cqe, entries);
1358  return -EINVAL;
1359  }
1360  if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY))
1361  return -EINVAL;
1362 
1363  if (dpp_cq) {
1364  cq->max_hw_cqe = 1;
1365  max_hw_cqe = 1;
1366  cqe_size = OCRDMA_DPP_CQE_SIZE;
1367  hw_pages = 1;
1368  } else {
1369  cq->max_hw_cqe = dev->attr.max_cqe;
1370  max_hw_cqe = dev->attr.max_cqe;
1371  cqe_size = sizeof(struct ocrdma_cqe);
1372  hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
1373  }
1374 
1375  cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1376 
1377  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
1378  if (!cmd)
1379  return -ENOMEM;
1380  ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1381  OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1382  cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1383  if (!cq->va) {
1384  status = -ENOMEM;
1385  goto mem_err;
1386  }
1387  memset(cq->va, 0, cq->len);
1388  page_size = cq->len / hw_pages;
1389  cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1391  cmd->cmd.pgsz_pgcnt |= hw_pages;
1392  cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1393 
1394  if (dev->eq_cnt < 0)
1395  goto eq_err;
1396  cq->eqn = ocrdma_bind_eq(dev);
1397  cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
1398  cqe_count = cq->len / cqe_size;
1399  if (cqe_count > 1024)
1400  /* Set cnt to 3 to indicate more than 1024 cq entries */
1401  cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
1402  else {
1403  u8 count = 0;
1404  switch (cqe_count) {
1405  case 256:
1406  count = 0;
1407  break;
1408  case 512:
1409  count = 1;
1410  break;
1411  case 1024:
1412  count = 2;
1413  break;
1414  default:
1415  goto mbx_err;
1416  }
1417  cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
1418  }
1419  /* shared eq between all the consumer cqs. */
1420  cmd->cmd.eqn = cq->eqn;
1421  if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1422  if (dpp_cq)
1423  cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
1425  cq->phase_change = false;
1426  cmd->cmd.cqe_count = (cq->len / cqe_size);
1427  } else {
1428  cmd->cmd.cqe_count = (cq->len / cqe_size) - 1;
1429  cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
1430  cq->phase_change = true;
1431  }
1432 
1433  ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
1434  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1435  if (status)
1436  goto mbx_err;
1437 
1438  rsp = (struct ocrdma_create_cq_rsp *)cmd;
1439  cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
1440  kfree(cmd);
1441  return 0;
1442 mbx_err:
1443  ocrdma_unbind_eq(dev, cq->eqn);
1444 eq_err:
1445  dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1446 mem_err:
1447  kfree(cmd);
1448  return status;
1449 }
1450 
1451 int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
1452 {
1453  int status = -ENOMEM;
1454  struct ocrdma_destroy_cq *cmd;
1455 
1456  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
1457  if (!cmd)
1458  return status;
1459  ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
1460  OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1461 
1462  cmd->bypass_flush_qid |=
1463  (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
1465 
1466  ocrdma_unbind_eq(dev, cq->eqn);
1467  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1468  if (status)
1469  goto mbx_err;
1470  dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
1471 mbx_err:
1472  kfree(cmd);
1473  return status;
1474 }
1475 
1476 int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1477  u32 pdid, int addr_check)
1478 {
1479  int status = -ENOMEM;
1480  struct ocrdma_alloc_lkey *cmd;
1481  struct ocrdma_alloc_lkey_rsp *rsp;
1482 
1483  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
1484  if (!cmd)
1485  return status;
1486  cmd->pdid = pdid;
1487  cmd->pbl_sz_flags |= addr_check;
1488  cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
1489  cmd->pbl_sz_flags |=
1491  cmd->pbl_sz_flags |=
1493  cmd->pbl_sz_flags |=
1495  cmd->pbl_sz_flags |=
1497  cmd->pbl_sz_flags |=
1499 
1500  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1501  if (status)
1502  goto mbx_err;
1503  rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
1504  hwmr->lkey = rsp->lrkey;
1505 mbx_err:
1506  kfree(cmd);
1507  return status;
1508 }
1509 
1510 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
1511 {
1512  int status = -ENOMEM;
1513  struct ocrdma_dealloc_lkey *cmd;
1514 
1515  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
1516  if (!cmd)
1517  return -ENOMEM;
1518  cmd->lkey = lkey;
1519  cmd->rsvd_frmr = fr_mr ? 1 : 0;
1520  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1521  if (status)
1522  goto mbx_err;
1523 mbx_err:
1524  kfree(cmd);
1525  return status;
1526 }
1527 
1528 static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1529  u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
1530 {
1531  int status = -ENOMEM;
1532  int i;
1533  struct ocrdma_reg_nsmr *cmd;
1534  struct ocrdma_reg_nsmr_rsp *rsp;
1535 
1536  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
1537  if (!cmd)
1538  return -ENOMEM;
1539  cmd->num_pbl_pdid =
1540  pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
1541 
1542  cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
1544  cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
1546  cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
1548  cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
1550  cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
1553 
1557  cmd->totlen_low = hwmr->len;
1558  cmd->totlen_high = upper_32_bits(hwmr->len);
1559  cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
1560  cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
1561  cmd->va_loaddr = (u32) hwmr->va;
1562  cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
1563 
1564  for (i = 0; i < pbl_cnt; i++) {
1565  cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
1566  cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
1567  }
1568  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1569  if (status)
1570  goto mbx_err;
1571  rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
1572  hwmr->lkey = rsp->lrkey;
1573 mbx_err:
1574  kfree(cmd);
1575  return status;
1576 }
1577 
1578 static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
1579  struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
1580  u32 pbl_offset, u32 last)
1581 {
1582  int status = -ENOMEM;
1583  int i;
1584  struct ocrdma_reg_nsmr_cont *cmd;
1585 
1586  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
1587  if (!cmd)
1588  return -ENOMEM;
1589  cmd->lrkey = hwmr->lkey;
1591  (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
1592  cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
1593 
1594  for (i = 0; i < pbl_cnt; i++) {
1595  cmd->pbl[i].lo =
1596  (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
1597  cmd->pbl[i].hi =
1598  upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
1599  }
1600  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1601  if (status)
1602  goto mbx_err;
1603 mbx_err:
1604  kfree(cmd);
1605  return status;
1606 }
1607 
1608 int ocrdma_reg_mr(struct ocrdma_dev *dev,
1609  struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
1610 {
1611  int status;
1612  u32 last = 0;
1613  u32 cur_pbl_cnt, pbl_offset;
1614  u32 pending_pbl_cnt = hwmr->num_pbls;
1615 
1616  pbl_offset = 0;
1617  cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1618  if (cur_pbl_cnt == pending_pbl_cnt)
1619  last = 1;
1620 
1621  status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
1622  cur_pbl_cnt, hwmr->pbe_size, last);
1623  if (status) {
1624  ocrdma_err("%s() status=%d\n", __func__, status);
1625  return status;
1626  }
1627  /* if there is no more pbls to register then exit. */
1628  if (last)
1629  return 0;
1630 
1631  while (!last) {
1632  pbl_offset += cur_pbl_cnt;
1633  pending_pbl_cnt -= cur_pbl_cnt;
1634  cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1635  /* if we reach the end of the pbls, then need to set the last
1636  * bit, indicating no more pbls to register for this memory key.
1637  */
1638  if (cur_pbl_cnt == pending_pbl_cnt)
1639  last = 1;
1640 
1641  status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
1642  pbl_offset, last);
1643  if (status)
1644  break;
1645  }
1646  if (status)
1647  ocrdma_err("%s() err. status=%d\n", __func__, status);
1648 
1649  return status;
1650 }
1651 
1653 {
1654  struct ocrdma_qp *tmp;
1655  bool found = false;
1656  list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
1657  if (qp == tmp) {
1658  found = true;
1659  break;
1660  }
1661  }
1662  return found;
1663 }
1664 
1666 {
1667  struct ocrdma_qp *tmp;
1668  bool found = false;
1669  list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
1670  if (qp == tmp) {
1671  found = true;
1672  break;
1673  }
1674  }
1675  return found;
1676 }
1677 
1678 void ocrdma_flush_qp(struct ocrdma_qp *qp)
1679 {
1680  bool found;
1681  unsigned long flags;
1682 
1683  spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
1684  found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1685  if (!found)
1686  list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
1687  if (!qp->srq) {
1688  found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1689  if (!found)
1690  list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
1691  }
1692  spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
1693 }
1694 
1695 int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
1696  enum ib_qp_state *old_ib_state)
1697 {
1698  unsigned long flags;
1699  int status = 0;
1701  new_state = get_ocrdma_qp_state(new_ib_state);
1702 
1703  /* sync with wqe and rqe posting */
1704  spin_lock_irqsave(&qp->q_lock, flags);
1705 
1706  if (old_ib_state)
1707  *old_ib_state = get_ibqp_state(qp->state);
1708  if (new_state == qp->state) {
1709  spin_unlock_irqrestore(&qp->q_lock, flags);
1710  return 1;
1711  }
1712 
1713  switch (qp->state) {
1714  case OCRDMA_QPS_RST:
1715  switch (new_state) {
1716  case OCRDMA_QPS_RST:
1717  case OCRDMA_QPS_INIT:
1718  break;
1719  default:
1720  status = -EINVAL;
1721  break;
1722  };
1723  break;
1724  case OCRDMA_QPS_INIT:
1725  /* qps: INIT->XXX */
1726  switch (new_state) {
1727  case OCRDMA_QPS_INIT:
1728  case OCRDMA_QPS_RTR:
1729  break;
1730  case OCRDMA_QPS_ERR:
1731  ocrdma_flush_qp(qp);
1732  break;
1733  default:
1734  status = -EINVAL;
1735  break;
1736  };
1737  break;
1738  case OCRDMA_QPS_RTR:
1739  /* qps: RTS->XXX */
1740  switch (new_state) {
1741  case OCRDMA_QPS_RTS:
1742  break;
1743  case OCRDMA_QPS_ERR:
1744  ocrdma_flush_qp(qp);
1745  break;
1746  default:
1747  status = -EINVAL;
1748  break;
1749  };
1750  break;
1751  case OCRDMA_QPS_RTS:
1752  /* qps: RTS->XXX */
1753  switch (new_state) {
1754  case OCRDMA_QPS_SQD:
1755  case OCRDMA_QPS_SQE:
1756  break;
1757  case OCRDMA_QPS_ERR:
1758  ocrdma_flush_qp(qp);
1759  break;
1760  default:
1761  status = -EINVAL;
1762  break;
1763  };
1764  break;
1765  case OCRDMA_QPS_SQD:
1766  /* qps: SQD->XXX */
1767  switch (new_state) {
1768  case OCRDMA_QPS_RTS:
1769  case OCRDMA_QPS_SQE:
1770  case OCRDMA_QPS_ERR:
1771  break;
1772  default:
1773  status = -EINVAL;
1774  break;
1775  };
1776  break;
1777  case OCRDMA_QPS_SQE:
1778  switch (new_state) {
1779  case OCRDMA_QPS_RTS:
1780  case OCRDMA_QPS_ERR:
1781  break;
1782  default:
1783  status = -EINVAL;
1784  break;
1785  };
1786  break;
1787  case OCRDMA_QPS_ERR:
1788  /* qps: ERR->XXX */
1789  switch (new_state) {
1790  case OCRDMA_QPS_RST:
1791  break;
1792  default:
1793  status = -EINVAL;
1794  break;
1795  };
1796  break;
1797  default:
1798  status = -EINVAL;
1799  break;
1800  };
1801  if (!status)
1802  qp->state = new_state;
1803 
1804  spin_unlock_irqrestore(&qp->q_lock, flags);
1805  return status;
1806 }
1807 
1808 static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
1809 {
1810  u32 flags = 0;
1811  if (qp->cap_flags & OCRDMA_QP_INB_RD)
1813  if (qp->cap_flags & OCRDMA_QP_INB_WR)
1815  if (qp->cap_flags & OCRDMA_QP_MW_BIND)
1817  if (qp->cap_flags & OCRDMA_QP_LKEY0)
1819  if (qp->cap_flags & OCRDMA_QP_FAST_REG)
1821  return flags;
1822 }
1823 
1824 static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
1825  struct ib_qp_init_attr *attrs,
1826  struct ocrdma_qp *qp)
1827 {
1828  int status;
1829  u32 len, hw_pages, hw_page_size;
1830  dma_addr_t pa;
1831  struct ocrdma_dev *dev = qp->dev;
1832  struct pci_dev *pdev = dev->nic_info.pdev;
1833  u32 max_wqe_allocated;
1834  u32 max_sges = attrs->cap.max_send_sge;
1835 
1836  max_wqe_allocated = attrs->cap.max_send_wr;
1837  /* need to allocate one extra to for GEN1 family */
1838  if (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)
1839  max_wqe_allocated += 1;
1840 
1841  status = ocrdma_build_q_conf(&max_wqe_allocated,
1842  dev->attr.wqe_size, &hw_pages, &hw_page_size);
1843  if (status) {
1844  ocrdma_err("%s() req. max_send_wr=0x%x\n", __func__,
1845  max_wqe_allocated);
1846  return -EINVAL;
1847  }
1848  qp->sq.max_cnt = max_wqe_allocated;
1849  len = (hw_pages * hw_page_size);
1850 
1851  qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
1852  if (!qp->sq.va)
1853  return -EINVAL;
1854  memset(qp->sq.va, 0, len);
1855  qp->sq.len = len;
1856  qp->sq.pa = pa;
1857  qp->sq.entry_size = dev->attr.wqe_size;
1858  ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
1859 
1860  cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
1862  cmd->num_wq_rq_pages |= (hw_pages <<
1865  cmd->max_sge_send_write |= (max_sges <<
1868  cmd->max_sge_send_write |= (max_sges <<
1871  cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
1874  cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
1877  return 0;
1878 }
1879 
1880 static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
1881  struct ib_qp_init_attr *attrs,
1882  struct ocrdma_qp *qp)
1883 {
1884  int status;
1885  u32 len, hw_pages, hw_page_size;
1886  dma_addr_t pa = 0;
1887  struct ocrdma_dev *dev = qp->dev;
1888  struct pci_dev *pdev = dev->nic_info.pdev;
1889  u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
1890 
1891  status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
1892  &hw_pages, &hw_page_size);
1893  if (status) {
1894  ocrdma_err("%s() req. max_recv_wr=0x%x\n", __func__,
1895  attrs->cap.max_recv_wr + 1);
1896  return status;
1897  }
1898  qp->rq.max_cnt = max_rqe_allocated;
1899  len = (hw_pages * hw_page_size);
1900 
1901  qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
1902  if (!qp->rq.va)
1903  return status;
1904  memset(qp->rq.va, 0, len);
1905  qp->rq.pa = pa;
1906  qp->rq.len = len;
1907  qp->rq.entry_size = dev->attr.rqe_size;
1908 
1909  ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
1910  cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1912  cmd->num_wq_rq_pages |=
1915  cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
1918  cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
1921  cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
1924  return 0;
1925 }
1926 
1927 static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
1928  struct ocrdma_pd *pd,
1929  struct ocrdma_qp *qp,
1930  u8 enable_dpp_cq, u16 dpp_cq_id)
1931 {
1932  pd->num_dpp_qp--;
1933  qp->dpp_enabled = true;
1935  if (!enable_dpp_cq)
1936  return;
1938  cmd->dpp_credits_cqid = dpp_cq_id;
1941 }
1942 
1943 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
1944  struct ocrdma_qp *qp)
1945 {
1946  struct ocrdma_dev *dev = qp->dev;
1947  struct pci_dev *pdev = dev->nic_info.pdev;
1948  dma_addr_t pa = 0;
1949  int ird_page_size = dev->attr.ird_page_size;
1950  int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
1951 
1952  if (dev->attr.ird == 0)
1953  return 0;
1954 
1955  qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
1956  &pa, GFP_KERNEL);
1957  if (!qp->ird_q_va)
1958  return -ENOMEM;
1959  memset(qp->ird_q_va, 0, ird_q_len);
1960  ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
1961  pa, ird_page_size);
1962  return 0;
1963 }
1964 
1965 static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
1966  struct ocrdma_qp *qp,
1967  struct ib_qp_init_attr *attrs,
1968  u16 *dpp_offset, u16 *dpp_credit_lmt)
1969 {
1970  u32 max_wqe_allocated, max_rqe_allocated;
1972  qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
1973  qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
1976  qp->dpp_enabled = false;
1978  qp->dpp_enabled = true;
1979  *dpp_credit_lmt = (rsp->dpp_response &
1982  *dpp_offset = (rsp->dpp_response &
1985  }
1986  max_wqe_allocated =
1988  max_wqe_allocated = 1 << max_wqe_allocated;
1989  max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
1990 
1991  qp->sq.max_cnt = max_wqe_allocated;
1992  qp->sq.max_wqe_idx = max_wqe_allocated - 1;
1993 
1994  if (!attrs->srq) {
1995  qp->rq.max_cnt = max_rqe_allocated;
1996  qp->rq.max_wqe_idx = max_rqe_allocated - 1;
1997  }
1998 }
1999 
2000 int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2001  u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
2002  u16 *dpp_credit_lmt)
2003 {
2004  int status = -ENOMEM;
2005  u32 flags = 0;
2006  struct ocrdma_dev *dev = qp->dev;
2007  struct ocrdma_pd *pd = qp->pd;
2008  struct pci_dev *pdev = dev->nic_info.pdev;
2009  struct ocrdma_cq *cq;
2010  struct ocrdma_create_qp_req *cmd;
2011  struct ocrdma_create_qp_rsp *rsp;
2012  int qptype;
2013 
2014  switch (attrs->qp_type) {
2015  case IB_QPT_GSI:
2016  qptype = OCRDMA_QPT_GSI;
2017  break;
2018  case IB_QPT_RC:
2019  qptype = OCRDMA_QPT_RC;
2020  break;
2021  case IB_QPT_UD:
2022  qptype = OCRDMA_QPT_UD;
2023  break;
2024  default:
2025  return -EINVAL;
2026  };
2027 
2028  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
2029  if (!cmd)
2030  return status;
2031  cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
2033  status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
2034  if (status)
2035  goto sq_err;
2036 
2037  if (attrs->srq) {
2038  struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
2040  cmd->rq_addr[0].lo = srq->id;
2041  qp->srq = srq;
2042  } else {
2043  status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
2044  if (status)
2045  goto rq_err;
2046  }
2047 
2048  status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
2049  if (status)
2050  goto mbx_err;
2051 
2054 
2055  flags = ocrdma_set_create_qp_mbx_access_flags(qp);
2056 
2057  cmd->max_sge_recv_flags |= flags;
2058  cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
2061  cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
2064  cq = get_ocrdma_cq(attrs->send_cq);
2067  qp->sq_cq = cq;
2068  cq = get_ocrdma_cq(attrs->recv_cq);
2071  qp->rq_cq = cq;
2072 
2073  if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
2074  (attrs->cap.max_inline_data <= dev->attr.max_inline_data))
2075  ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2076  dpp_cq_id);
2077 
2078  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2079  if (status)
2080  goto mbx_err;
2081  rsp = (struct ocrdma_create_qp_rsp *)cmd;
2082  ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
2083  qp->state = OCRDMA_QPS_RST;
2084  kfree(cmd);
2085  return 0;
2086 mbx_err:
2087  if (qp->rq.va)
2088  dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2089 rq_err:
2090  ocrdma_err("%s(%d) rq_err\n", __func__, dev->id);
2091  dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2092 sq_err:
2093  ocrdma_err("%s(%d) sq_err\n", __func__, dev->id);
2094  kfree(cmd);
2095  return status;
2096 }
2097 
2098 int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2099  struct ocrdma_qp_params *param)
2100 {
2101  int status = -ENOMEM;
2102  struct ocrdma_query_qp *cmd;
2103  struct ocrdma_query_qp_rsp *rsp;
2104 
2105  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd));
2106  if (!cmd)
2107  return status;
2108  cmd->qp_id = qp->id;
2109  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2110  if (status)
2111  goto mbx_err;
2112  rsp = (struct ocrdma_query_qp_rsp *)cmd;
2113  memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
2114 mbx_err:
2115  kfree(cmd);
2116  return status;
2117 }
2118 
2119 int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
2120  u8 *mac_addr)
2121 {
2122  struct in6_addr in6;
2123 
2124  memcpy(&in6, dgid, sizeof in6);
2125  if (rdma_is_multicast_addr(&in6))
2126  rdma_get_mcast_mac(&in6, mac_addr);
2127  else if (rdma_link_local_addr(&in6))
2128  rdma_get_ll_mac(&in6, mac_addr);
2129  else {
2130  ocrdma_err("%s() fail to resolve mac_addr.\n", __func__);
2131  return -EINVAL;
2132  }
2133  return 0;
2134 }
2135 
2136 static void ocrdma_set_av_params(struct ocrdma_qp *qp,
2137  struct ocrdma_modify_qp *cmd,
2138  struct ib_qp_attr *attrs)
2139 {
2140  struct ib_ah_attr *ah_attr = &attrs->ah_attr;
2141  union ib_gid sgid;
2142  u32 vlan_id;
2143  u8 mac_addr[6];
2144  if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
2145  return;
2146  cmd->params.tclass_sq_psn |=
2147  (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2148  cmd->params.rnt_rc_sl_fl |=
2149  (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
2150  cmd->params.hop_lmt_rq_psn |=
2151  (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
2153  memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2154  sizeof(cmd->params.dgid));
2155  ocrdma_query_gid(&qp->dev->ibdev, 1,
2156  ah_attr->grh.sgid_index, &sgid);
2157  qp->sgid_idx = ah_attr->grh.sgid_index;
2158  memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2159  ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
2160  cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2161  (mac_addr[2] << 16) | (mac_addr[3] << 24);
2162  /* convert them to LE format. */
2163  ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2164  ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2165  cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2166  vlan_id = rdma_get_vlan_id(&sgid);
2167  if (vlan_id && (vlan_id < 0x1000)) {
2168  cmd->params.vlan_dmac_b4_to_b5 |=
2169  vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2171  }
2172 }
2173 
2174 static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2175  struct ocrdma_modify_qp *cmd,
2176  struct ib_qp_attr *attrs, int attr_mask,
2177  enum ib_qp_state old_qps)
2178 {
2179  int status = 0;
2180  struct net_device *netdev = qp->dev->nic_info.netdev;
2181  int eth_mtu = iboe_get_mtu(netdev->mtu);
2182 
2183  if (attr_mask & IB_QP_PKEY_INDEX) {
2184  cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
2187  }
2188  if (attr_mask & IB_QP_QKEY) {
2189  qp->qkey = attrs->qkey;
2190  cmd->params.qkey = attrs->qkey;
2192  }
2193  if (attr_mask & IB_QP_AV)
2194  ocrdma_set_av_params(qp, cmd, attrs);
2195  else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2196  /* set the default mac address for UD, GSI QPs */
2197  cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
2198  (qp->dev->nic_info.mac_addr[1] << 8) |
2199  (qp->dev->nic_info.mac_addr[2] << 16) |
2200  (qp->dev->nic_info.mac_addr[3] << 24);
2201  cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
2202  (qp->dev->nic_info.mac_addr[5] << 8);
2203  }
2204  if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2205  attrs->en_sqd_async_notify) {
2206  cmd->params.max_sge_recv_flags |=
2209  }
2210  if (attr_mask & IB_QP_DEST_QPN) {
2211  cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
2214  }
2215  if (attr_mask & IB_QP_PATH_MTU) {
2216  if (ib_mtu_enum_to_int(eth_mtu) <
2217  ib_mtu_enum_to_int(attrs->path_mtu)) {
2218  status = -EINVAL;
2219  goto pmtu_err;
2220  }
2221  cmd->params.path_mtu_pkey_indx |=
2222  (ib_mtu_enum_to_int(attrs->path_mtu) <<
2226  }
2227  if (attr_mask & IB_QP_TIMEOUT) {
2228  cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
2231  }
2232  if (attr_mask & IB_QP_RETRY_CNT) {
2233  cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
2237  }
2238  if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2239  cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
2243  }
2244  if (attr_mask & IB_QP_RNR_RETRY) {
2245  cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
2249  }
2250  if (attr_mask & IB_QP_SQ_PSN) {
2251  cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
2253  }
2254  if (attr_mask & IB_QP_RQ_PSN) {
2255  cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
2257  }
2258  if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2259  if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
2260  status = -EINVAL;
2261  goto pmtu_err;
2262  }
2263  qp->max_ord = attrs->max_rd_atomic;
2265  }
2266  if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2267  if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
2268  status = -EINVAL;
2269  goto pmtu_err;
2270  }
2271  qp->max_ird = attrs->max_dest_rd_atomic;
2273  }
2274  cmd->params.max_ord_ird = (qp->max_ord <<
2277 pmtu_err:
2278  return status;
2279 }
2280 
2281 int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2282  struct ib_qp_attr *attrs, int attr_mask,
2283  enum ib_qp_state old_qps)
2284 {
2285  int status = -ENOMEM;
2286  struct ocrdma_modify_qp *cmd;
2287 
2288  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
2289  if (!cmd)
2290  return status;
2291 
2292  cmd->params.id = qp->id;
2293  cmd->flags = 0;
2294  if (attr_mask & IB_QP_STATE) {
2295  cmd->params.max_sge_recv_flags |=
2296  (get_ocrdma_qp_state(attrs->qp_state) <<
2300  } else
2301  cmd->params.max_sge_recv_flags |=
2304  status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
2305  if (status)
2306  goto mbx_err;
2307  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2308  if (status)
2309  goto mbx_err;
2310 
2311 mbx_err:
2312  kfree(cmd);
2313  return status;
2314 }
2315 
2316 int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
2317 {
2318  int status = -ENOMEM;
2319  struct ocrdma_destroy_qp *cmd;
2320  struct pci_dev *pdev = dev->nic_info.pdev;
2321 
2322  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
2323  if (!cmd)
2324  return status;
2325  cmd->qp_id = qp->id;
2326  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2327  if (status)
2328  goto mbx_err;
2329 
2330 mbx_err:
2331  kfree(cmd);
2332  if (qp->sq.va)
2333  dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2334  if (!qp->srq && qp->rq.va)
2335  dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2336  if (qp->dpp_enabled)
2337  qp->pd->num_dpp_qp++;
2338  return status;
2339 }
2340 
2342  struct ib_srq_init_attr *srq_attr,
2343  struct ocrdma_pd *pd)
2344 {
2345  int status = -ENOMEM;
2346  int hw_pages, hw_page_size;
2347  int len;
2348  struct ocrdma_create_srq_rsp *rsp;
2349  struct ocrdma_create_srq *cmd;
2350  dma_addr_t pa;
2351  struct ocrdma_dev *dev = srq->dev;
2352  struct pci_dev *pdev = dev->nic_info.pdev;
2353  u32 max_rqe_allocated;
2354 
2355  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2356  if (!cmd)
2357  return status;
2358 
2360  max_rqe_allocated = srq_attr->attr.max_wr + 1;
2361  status = ocrdma_build_q_conf(&max_rqe_allocated,
2362  dev->attr.rqe_size,
2363  &hw_pages, &hw_page_size);
2364  if (status) {
2365  ocrdma_err("%s() req. max_wr=0x%x\n", __func__,
2366  srq_attr->attr.max_wr);
2367  status = -EINVAL;
2368  goto ret;
2369  }
2370  len = hw_pages * hw_page_size;
2371  srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2372  if (!srq->rq.va) {
2373  status = -ENOMEM;
2374  goto ret;
2375  }
2376  ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2377 
2378  srq->rq.entry_size = dev->attr.rqe_size;
2379  srq->rq.pa = pa;
2380  srq->rq.len = len;
2381  srq->rq.max_cnt = max_rqe_allocated;
2382 
2383  cmd->max_sge_rqe = ilog2(max_rqe_allocated);
2384  cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
2386 
2387  cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2389  cmd->pages_rqe_sz |= (dev->attr.rqe_size
2393 
2394  status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2395  if (status)
2396  goto mbx_err;
2397  rsp = (struct ocrdma_create_srq_rsp *)cmd;
2398  srq->id = rsp->id;
2399  srq->rq.dbid = rsp->id;
2400  max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
2403  max_rqe_allocated = (1 << max_rqe_allocated);
2404  srq->rq.max_cnt = max_rqe_allocated;
2405  srq->rq.max_wqe_idx = max_rqe_allocated - 1;
2406  srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
2409  goto ret;
2410 mbx_err:
2411  dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
2412 ret:
2413  kfree(cmd);
2414  return status;
2415 }
2416 
2417 int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2418 {
2419  int status = -ENOMEM;
2420  struct ocrdma_modify_srq *cmd;
2421  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2422  if (!cmd)
2423  return status;
2424  cmd->id = srq->id;
2425  cmd->limit_max_rqe |= srq_attr->srq_limit <<
2427  status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
2428  kfree(cmd);
2429  return status;
2430 }
2431 
2432 int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2433 {
2434  int status = -ENOMEM;
2435  struct ocrdma_query_srq *cmd;
2436  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2437  if (!cmd)
2438  return status;
2439  cmd->id = srq->rq.dbid;
2440  status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
2441  if (status == 0) {
2442  struct ocrdma_query_srq_rsp *rsp =
2443  (struct ocrdma_query_srq_rsp *)cmd;
2444  srq_attr->max_sge =
2445  rsp->srq_lmt_max_sge &
2447  srq_attr->max_wr =
2449  srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
2451  }
2452  kfree(cmd);
2453  return status;
2454 }
2455 
2456 int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
2457 {
2458  int status = -ENOMEM;
2459  struct ocrdma_destroy_srq *cmd;
2460  struct pci_dev *pdev = dev->nic_info.pdev;
2461  cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
2462  if (!cmd)
2463  return status;
2464  cmd->id = srq->id;
2465  status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
2466  if (srq->rq.va)
2467  dma_free_coherent(&pdev->dev, srq->rq.len,
2468  srq->rq.va, srq->rq.pa);
2469  kfree(cmd);
2470  return status;
2471 }
2472 
2473 int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2474 {
2475  int i;
2476  int status = -EINVAL;
2477  struct ocrdma_av *av;
2478  unsigned long flags;
2479 
2480  av = dev->av_tbl.va;
2481  spin_lock_irqsave(&dev->av_tbl.lock, flags);
2482  for (i = 0; i < dev->av_tbl.num_ah; i++) {
2483  if (av->valid == 0) {
2484  av->valid = OCRDMA_AV_VALID;
2485  ah->av = av;
2486  ah->id = i;
2487  status = 0;
2488  break;
2489  }
2490  av++;
2491  }
2492  if (i == dev->av_tbl.num_ah)
2493  status = -EAGAIN;
2494  spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2495  return status;
2496 }
2497 
2498 int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2499 {
2500  unsigned long flags;
2501  spin_lock_irqsave(&dev->av_tbl.lock, flags);
2502  ah->av->valid = 0;
2503  spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2504  return 0;
2505 }
2506 
2507 static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)
2508 {
2509  int status;
2510  int irq;
2511  unsigned long flags = 0;
2512  int num_eq = 0;
2513 
2514  if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
2515  flags = IRQF_SHARED;
2516  else {
2517  num_eq = dev->nic_info.msix.num_vectors -
2518  dev->nic_info.msix.start_vector;
2519  /* minimum two vectors/eq are required for rdma to work.
2520  * one for control path and one for data path.
2521  */
2522  if (num_eq < 2)
2523  return -EBUSY;
2524  }
2525 
2526  status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN);
2527  if (status)
2528  return status;
2529  sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id);
2530  irq = ocrdma_get_irq(dev, &dev->meq);
2531  status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name,
2532  &dev->meq);
2533  if (status)
2534  _ocrdma_destroy_eq(dev, &dev->meq);
2535  return status;
2536 }
2537 
2538 static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
2539 {
2540  int num_eq, i, status = 0;
2541  int irq;
2542  unsigned long flags = 0;
2543 
2544  num_eq = dev->nic_info.msix.num_vectors -
2545  dev->nic_info.msix.start_vector;
2546  if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
2547  num_eq = 1;
2548  flags = IRQF_SHARED;
2549  } else
2550  num_eq = min_t(u32, num_eq, num_online_cpus());
2551  dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
2552  if (!dev->qp_eq_tbl)
2553  return -ENOMEM;
2554 
2555  for (i = 0; i < num_eq; i++) {
2556  status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i],
2557  OCRDMA_EQ_LEN);
2558  if (status) {
2559  status = -EINVAL;
2560  break;
2561  }
2562  sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d",
2563  dev->id, i);
2564  irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]);
2565  status = request_irq(irq, ocrdma_irq_handler, flags,
2566  dev->qp_eq_tbl[i].irq_name,
2567  &dev->qp_eq_tbl[i]);
2568  if (status) {
2569  _ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
2570  status = -EINVAL;
2571  break;
2572  }
2573  dev->eq_cnt += 1;
2574  }
2575  /* one eq is sufficient for data path to work */
2576  if (dev->eq_cnt >= 1)
2577  return 0;
2578  if (status)
2579  ocrdma_destroy_qp_eqs(dev);
2580  return status;
2581 }
2582 
2583 int ocrdma_init_hw(struct ocrdma_dev *dev)
2584 {
2585  int status;
2586  /* set up control path eq */
2587  status = ocrdma_create_mq_eq(dev);
2588  if (status)
2589  return status;
2590  /* set up data path eq */
2591  status = ocrdma_create_qp_eqs(dev);
2592  if (status)
2593  goto qpeq_err;
2594  status = ocrdma_create_mq(dev);
2595  if (status)
2596  goto mq_err;
2597  status = ocrdma_mbx_query_fw_config(dev);
2598  if (status)
2599  goto conf_err;
2600  status = ocrdma_mbx_query_dev(dev);
2601  if (status)
2602  goto conf_err;
2603  status = ocrdma_mbx_query_fw_ver(dev);
2604  if (status)
2605  goto conf_err;
2606  status = ocrdma_mbx_create_ah_tbl(dev);
2607  if (status)
2608  goto conf_err;
2609  return 0;
2610 
2611 conf_err:
2612  ocrdma_destroy_mq(dev);
2613 mq_err:
2614  ocrdma_destroy_qp_eqs(dev);
2615 qpeq_err:
2616  ocrdma_destroy_eq(dev, &dev->meq);
2617  ocrdma_err("%s() status=%d\n", __func__, status);
2618  return status;
2619 }
2620 
2622 {
2623  ocrdma_mbx_delete_ah_tbl(dev);
2624 
2625  /* cleanup the data path eqs */
2626  ocrdma_destroy_qp_eqs(dev);
2627 
2628  /* cleanup the control path */
2629  ocrdma_destroy_mq(dev);
2630  ocrdma_destroy_eq(dev, &dev->meq);
2631 }