Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ipath_qp.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/err.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/vmalloc.h>
38 
39 #include "ipath_verbs.h"
40 #include "ipath_kernel.h"
41 
42 #define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
43 #define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
44 #define mk_qpn(qpt, map, off) (((map) - (qpt)->map) * BITS_PER_PAGE + \
45  (off))
46 #define find_next_offset(map, off) find_next_zero_bit((map)->page, \
47  BITS_PER_PAGE, off)
48 
49 /*
50  * Convert the AETH credit code into the number of credits.
51  */
52 static u32 credit_table[31] = {
53  0, /* 0 */
54  1, /* 1 */
55  2, /* 2 */
56  3, /* 3 */
57  4, /* 4 */
58  6, /* 5 */
59  8, /* 6 */
60  12, /* 7 */
61  16, /* 8 */
62  24, /* 9 */
63  32, /* A */
64  48, /* B */
65  64, /* C */
66  96, /* D */
67  128, /* E */
68  192, /* F */
69  256, /* 10 */
70  384, /* 11 */
71  512, /* 12 */
72  768, /* 13 */
73  1024, /* 14 */
74  1536, /* 15 */
75  2048, /* 16 */
76  3072, /* 17 */
77  4096, /* 18 */
78  6144, /* 19 */
79  8192, /* 1A */
80  12288, /* 1B */
81  16384, /* 1C */
82  24576, /* 1D */
83  32768 /* 1E */
84 };
85 
86 
87 static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
88 {
89  unsigned long page = get_zeroed_page(GFP_KERNEL);
90  unsigned long flags;
91 
92  /*
93  * Free the page if someone raced with us installing it.
94  */
95 
96  spin_lock_irqsave(&qpt->lock, flags);
97  if (map->page)
98  free_page(page);
99  else
100  map->page = (void *)page;
101  spin_unlock_irqrestore(&qpt->lock, flags);
102 }
103 
104 
105 static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
106 {
107  u32 i, offset, max_scan, qpn;
108  struct qpn_map *map;
109  u32 ret = -1;
110 
111  if (type == IB_QPT_SMI)
112  ret = 0;
113  else if (type == IB_QPT_GSI)
114  ret = 1;
115 
116  if (ret != -1) {
117  map = &qpt->map[0];
118  if (unlikely(!map->page)) {
119  get_map_page(qpt, map);
120  if (unlikely(!map->page)) {
121  ret = -ENOMEM;
122  goto bail;
123  }
124  }
125  if (!test_and_set_bit(ret, map->page))
126  atomic_dec(&map->n_free);
127  else
128  ret = -EBUSY;
129  goto bail;
130  }
131 
132  qpn = qpt->last + 1;
133  if (qpn >= QPN_MAX)
134  qpn = 2;
135  offset = qpn & BITS_PER_PAGE_MASK;
136  map = &qpt->map[qpn / BITS_PER_PAGE];
137  max_scan = qpt->nmaps - !offset;
138  for (i = 0;;) {
139  if (unlikely(!map->page)) {
140  get_map_page(qpt, map);
141  if (unlikely(!map->page))
142  break;
143  }
144  if (likely(atomic_read(&map->n_free))) {
145  do {
146  if (!test_and_set_bit(offset, map->page)) {
147  atomic_dec(&map->n_free);
148  qpt->last = qpn;
149  ret = qpn;
150  goto bail;
151  }
152  offset = find_next_offset(map, offset);
153  qpn = mk_qpn(qpt, map, offset);
154  /*
155  * This test differs from alloc_pidmap().
156  * If find_next_offset() does find a zero
157  * bit, we don't need to check for QPN
158  * wrapping around past our starting QPN.
159  * We just need to be sure we don't loop
160  * forever.
161  */
162  } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
163  }
164  /*
165  * In order to keep the number of pages allocated to a
166  * minimum, we scan the all existing pages before increasing
167  * the size of the bitmap table.
168  */
169  if (++i > max_scan) {
170  if (qpt->nmaps == QPNMAP_ENTRIES)
171  break;
172  map = &qpt->map[qpt->nmaps++];
173  offset = 0;
174  } else if (map < &qpt->map[qpt->nmaps]) {
175  ++map;
176  offset = 0;
177  } else {
178  map = &qpt->map[0];
179  offset = 2;
180  }
181  qpn = mk_qpn(qpt, map, offset);
182  }
183 
184  ret = -ENOMEM;
185 
186 bail:
187  return ret;
188 }
189 
190 static void free_qpn(struct ipath_qp_table *qpt, u32 qpn)
191 {
192  struct qpn_map *map;
193 
194  map = qpt->map + qpn / BITS_PER_PAGE;
195  if (map->page)
196  clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
197  atomic_inc(&map->n_free);
198 }
199 
209 static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
210  enum ib_qp_type type)
211 {
212  unsigned long flags;
213  int ret;
214 
215  ret = alloc_qpn(qpt, type);
216  if (ret < 0)
217  goto bail;
218  qp->ibqp.qp_num = ret;
219 
220  /* Add the QP to the hash table. */
221  spin_lock_irqsave(&qpt->lock, flags);
222 
223  ret %= qpt->max;
224  qp->next = qpt->table[ret];
225  qpt->table[ret] = qp;
226  atomic_inc(&qp->refcount);
227 
228  spin_unlock_irqrestore(&qpt->lock, flags);
229  ret = 0;
230 
231 bail:
232  return ret;
233 }
234 
243 static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
244 {
245  struct ipath_qp *q, **qpp;
246  unsigned long flags;
247 
248  spin_lock_irqsave(&qpt->lock, flags);
249 
250  /* Remove QP from the hash table. */
251  qpp = &qpt->table[qp->ibqp.qp_num % qpt->max];
252  for (; (q = *qpp) != NULL; qpp = &q->next) {
253  if (q == qp) {
254  *qpp = qp->next;
255  qp->next = NULL;
256  atomic_dec(&qp->refcount);
257  break;
258  }
259  }
260 
261  spin_unlock_irqrestore(&qpt->lock, flags);
262 }
263 
271 unsigned ipath_free_all_qps(struct ipath_qp_table *qpt)
272 {
273  unsigned long flags;
274  struct ipath_qp *qp;
275  u32 n, qp_inuse = 0;
276 
277  spin_lock_irqsave(&qpt->lock, flags);
278  for (n = 0; n < qpt->max; n++) {
279  qp = qpt->table[n];
280  qpt->table[n] = NULL;
281 
282  for (; qp; qp = qp->next)
283  qp_inuse++;
284  }
285  spin_unlock_irqrestore(&qpt->lock, flags);
286 
287  for (n = 0; n < ARRAY_SIZE(qpt->map); n++)
288  if (qpt->map[n].page)
289  free_page((unsigned long) qpt->map[n].page);
290  return qp_inuse;
291 }
292 
301 struct ipath_qp *ipath_lookup_qpn(struct ipath_qp_table *qpt, u32 qpn)
302 {
303  unsigned long flags;
304  struct ipath_qp *qp;
305 
306  spin_lock_irqsave(&qpt->lock, flags);
307 
308  for (qp = qpt->table[qpn % qpt->max]; qp; qp = qp->next) {
309  if (qp->ibqp.qp_num == qpn) {
310  atomic_inc(&qp->refcount);
311  break;
312  }
313  }
314 
315  spin_unlock_irqrestore(&qpt->lock, flags);
316  return qp;
317 }
318 
324 static void ipath_reset_qp(struct ipath_qp *qp, enum ib_qp_type type)
325 {
326  qp->remote_qpn = 0;
327  qp->qkey = 0;
328  qp->qp_access_flags = 0;
329  atomic_set(&qp->s_dma_busy, 0);
331  qp->s_hdrwords = 0;
332  qp->s_wqe = NULL;
333  qp->s_pkt_delay = 0;
334  qp->s_draining = 0;
335  qp->s_psn = 0;
336  qp->r_psn = 0;
337  qp->r_msn = 0;
338  if (type == IB_QPT_RC) {
339  qp->s_state = IB_OPCODE_RC_SEND_LAST;
340  qp->r_state = IB_OPCODE_RC_SEND_LAST;
341  } else {
342  qp->s_state = IB_OPCODE_UC_SEND_LAST;
343  qp->r_state = IB_OPCODE_UC_SEND_LAST;
344  }
345  qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
346  qp->r_nak_state = 0;
347  qp->r_aflags = 0;
348  qp->r_flags = 0;
349  qp->s_rnr_timeout = 0;
350  qp->s_head = 0;
351  qp->s_tail = 0;
352  qp->s_cur = 0;
353  qp->s_last = 0;
354  qp->s_ssn = 1;
355  qp->s_lsn = 0;
356  memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
357  qp->r_head_ack_queue = 0;
358  qp->s_tail_ack_queue = 0;
359  qp->s_num_rd_atomic = 0;
360  if (qp->r_rq.wq) {
361  qp->r_rq.wq->head = 0;
362  qp->r_rq.wq->tail = 0;
363  }
364 }
365 
378 {
379  struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
380  struct ib_wc wc;
381  int ret = 0;
382 
383  if (qp->state == IB_QPS_ERR)
384  goto bail;
385 
386  qp->state = IB_QPS_ERR;
387 
388  spin_lock(&dev->pending_lock);
389  if (!list_empty(&qp->timerwait))
390  list_del_init(&qp->timerwait);
391  if (!list_empty(&qp->piowait))
392  list_del_init(&qp->piowait);
393  spin_unlock(&dev->pending_lock);
394 
395  /* Schedule the sending tasklet to drain the send work queue. */
396  if (qp->s_last != qp->s_head)
397  ipath_schedule_send(qp);
398 
399  memset(&wc, 0, sizeof(wc));
400  wc.qp = &qp->ibqp;
401  wc.opcode = IB_WC_RECV;
402 
404  wc.wr_id = qp->r_wr_id;
405  wc.status = err;
406  ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
407  }
409 
410  if (qp->r_rq.wq) {
411  struct ipath_rwq *wq;
412  u32 head;
413  u32 tail;
414 
415  spin_lock(&qp->r_rq.lock);
416 
417  /* sanity check pointers before trusting them */
418  wq = qp->r_rq.wq;
419  head = wq->head;
420  if (head >= qp->r_rq.size)
421  head = 0;
422  tail = wq->tail;
423  if (tail >= qp->r_rq.size)
424  tail = 0;
425  while (tail != head) {
426  wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id;
427  if (++tail >= qp->r_rq.size)
428  tail = 0;
429  ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
430  }
431  wq->tail = tail;
432 
433  spin_unlock(&qp->r_rq.lock);
434  } else if (qp->ibqp.event_handler)
435  ret = 1;
436 
437 bail:
438  return ret;
439 }
440 
450 int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
451  int attr_mask, struct ib_udata *udata)
452 {
453  struct ipath_ibdev *dev = to_idev(ibqp->device);
454  struct ipath_qp *qp = to_iqp(ibqp);
455  enum ib_qp_state cur_state, new_state;
456  int lastwqe = 0;
457  int ret;
458 
459  spin_lock_irq(&qp->s_lock);
460 
461  cur_state = attr_mask & IB_QP_CUR_STATE ?
462  attr->cur_qp_state : qp->state;
463  new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
464 
465  if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type,
466  attr_mask))
467  goto inval;
468 
469  if (attr_mask & IB_QP_AV) {
470  if (attr->ah_attr.dlid == 0 ||
471  attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
472  goto inval;
473 
474  if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
475  (attr->ah_attr.grh.sgid_index > 1))
476  goto inval;
477  }
478 
479  if (attr_mask & IB_QP_PKEY_INDEX)
480  if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
481  goto inval;
482 
483  if (attr_mask & IB_QP_MIN_RNR_TIMER)
484  if (attr->min_rnr_timer > 31)
485  goto inval;
486 
487  if (attr_mask & IB_QP_PORT)
488  if (attr->port_num == 0 ||
489  attr->port_num > ibqp->device->phys_port_cnt)
490  goto inval;
491 
492  /*
493  * don't allow invalid Path MTU values or greater than 2048
494  * unless we are configured for a 4KB MTU
495  */
496  if ((attr_mask & IB_QP_PATH_MTU) &&
497  (ib_mtu_enum_to_int(attr->path_mtu) == -1 ||
498  (attr->path_mtu > IB_MTU_2048 && !ipath_mtu4096)))
499  goto inval;
500 
501  if (attr_mask & IB_QP_PATH_MIG_STATE)
502  if (attr->path_mig_state != IB_MIG_MIGRATED &&
503  attr->path_mig_state != IB_MIG_REARM)
504  goto inval;
505 
506  if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
508  goto inval;
509 
510  switch (new_state) {
511  case IB_QPS_RESET:
512  if (qp->state != IB_QPS_RESET) {
513  qp->state = IB_QPS_RESET;
514  spin_lock(&dev->pending_lock);
515  if (!list_empty(&qp->timerwait))
516  list_del_init(&qp->timerwait);
517  if (!list_empty(&qp->piowait))
518  list_del_init(&qp->piowait);
519  spin_unlock(&dev->pending_lock);
520  qp->s_flags &= ~IPATH_S_ANY_WAIT;
521  spin_unlock_irq(&qp->s_lock);
522  /* Stop the sending tasklet */
523  tasklet_kill(&qp->s_task);
525  spin_lock_irq(&qp->s_lock);
526  }
527  ipath_reset_qp(qp, ibqp->qp_type);
528  break;
529 
530  case IB_QPS_SQD:
531  qp->s_draining = qp->s_last != qp->s_cur;
532  qp->state = new_state;
533  break;
534 
535  case IB_QPS_SQE:
536  if (qp->ibqp.qp_type == IB_QPT_RC)
537  goto inval;
538  qp->state = new_state;
539  break;
540 
541  case IB_QPS_ERR:
542  lastwqe = ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
543  break;
544 
545  default:
546  qp->state = new_state;
547  break;
548  }
549 
550  if (attr_mask & IB_QP_PKEY_INDEX)
551  qp->s_pkey_index = attr->pkey_index;
552 
553  if (attr_mask & IB_QP_DEST_QPN)
554  qp->remote_qpn = attr->dest_qp_num;
555 
556  if (attr_mask & IB_QP_SQ_PSN) {
557  qp->s_psn = qp->s_next_psn = attr->sq_psn;
558  qp->s_last_psn = qp->s_next_psn - 1;
559  }
560 
561  if (attr_mask & IB_QP_RQ_PSN)
562  qp->r_psn = attr->rq_psn;
563 
564  if (attr_mask & IB_QP_ACCESS_FLAGS)
565  qp->qp_access_flags = attr->qp_access_flags;
566 
567  if (attr_mask & IB_QP_AV) {
568  qp->remote_ah_attr = attr->ah_attr;
569  qp->s_dmult = ipath_ib_rate_to_mult(attr->ah_attr.static_rate);
570  }
571 
572  if (attr_mask & IB_QP_PATH_MTU)
573  qp->path_mtu = attr->path_mtu;
574 
575  if (attr_mask & IB_QP_RETRY_CNT)
576  qp->s_retry = qp->s_retry_cnt = attr->retry_cnt;
577 
578  if (attr_mask & IB_QP_RNR_RETRY) {
579  qp->s_rnr_retry = attr->rnr_retry;
580  if (qp->s_rnr_retry > 7)
581  qp->s_rnr_retry = 7;
582  qp->s_rnr_retry_cnt = qp->s_rnr_retry;
583  }
584 
585  if (attr_mask & IB_QP_MIN_RNR_TIMER)
586  qp->r_min_rnr_timer = attr->min_rnr_timer;
587 
588  if (attr_mask & IB_QP_TIMEOUT)
589  qp->timeout = attr->timeout;
590 
591  if (attr_mask & IB_QP_QKEY)
592  qp->qkey = attr->qkey;
593 
594  if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
596 
597  if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
598  qp->s_max_rd_atomic = attr->max_rd_atomic;
599 
600  spin_unlock_irq(&qp->s_lock);
601 
602  if (lastwqe) {
603  struct ib_event ev;
604 
605  ev.device = qp->ibqp.device;
606  ev.element.qp = &qp->ibqp;
608  qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
609  }
610  ret = 0;
611  goto bail;
612 
613 inval:
614  spin_unlock_irq(&qp->s_lock);
615  ret = -EINVAL;
616 
617 bail:
618  return ret;
619 }
620 
621 int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
622  int attr_mask, struct ib_qp_init_attr *init_attr)
623 {
624  struct ipath_qp *qp = to_iqp(ibqp);
625 
626  attr->qp_state = qp->state;
627  attr->cur_qp_state = attr->qp_state;
628  attr->path_mtu = qp->path_mtu;
629  attr->path_mig_state = 0;
630  attr->qkey = qp->qkey;
631  attr->rq_psn = qp->r_psn;
632  attr->sq_psn = qp->s_next_psn;
633  attr->dest_qp_num = qp->remote_qpn;
634  attr->qp_access_flags = qp->qp_access_flags;
635  attr->cap.max_send_wr = qp->s_size - 1;
636  attr->cap.max_recv_wr = qp->ibqp.srq ? 0 : qp->r_rq.size - 1;
637  attr->cap.max_send_sge = qp->s_max_sge;
638  attr->cap.max_recv_sge = qp->r_rq.max_sge;
639  attr->cap.max_inline_data = 0;
640  attr->ah_attr = qp->remote_ah_attr;
641  memset(&attr->alt_ah_attr, 0, sizeof(attr->alt_ah_attr));
642  attr->pkey_index = qp->s_pkey_index;
643  attr->alt_pkey_index = 0;
644  attr->en_sqd_async_notify = 0;
645  attr->sq_draining = qp->s_draining;
646  attr->max_rd_atomic = qp->s_max_rd_atomic;
648  attr->min_rnr_timer = qp->r_min_rnr_timer;
649  attr->port_num = 1;
650  attr->timeout = qp->timeout;
651  attr->retry_cnt = qp->s_retry_cnt;
652  attr->rnr_retry = qp->s_rnr_retry_cnt;
653  attr->alt_port_num = 0;
654  attr->alt_timeout = 0;
655 
656  init_attr->event_handler = qp->ibqp.event_handler;
657  init_attr->qp_context = qp->ibqp.qp_context;
658  init_attr->send_cq = qp->ibqp.send_cq;
659  init_attr->recv_cq = qp->ibqp.recv_cq;
660  init_attr->srq = qp->ibqp.srq;
661  init_attr->cap = attr->cap;
662  if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
663  init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
664  else
665  init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
666  init_attr->qp_type = qp->ibqp.qp_type;
667  init_attr->port_num = 1;
668  return 0;
669 }
670 
678 {
679  u32 aeth = qp->r_msn & IPATH_MSN_MASK;
680 
681  if (qp->ibqp.srq) {
682  /*
683  * Shared receive queues don't generate credits.
684  * Set the credit field to the invalid value.
685  */
687  } else {
688  u32 min, max, x;
689  u32 credits;
690  struct ipath_rwq *wq = qp->r_rq.wq;
691  u32 head;
692  u32 tail;
693 
694  /* sanity check pointers before trusting them */
695  head = wq->head;
696  if (head >= qp->r_rq.size)
697  head = 0;
698  tail = wq->tail;
699  if (tail >= qp->r_rq.size)
700  tail = 0;
701  /*
702  * Compute the number of credits available (RWQEs).
703  * XXX Not holding the r_rq.lock here so there is a small
704  * chance that the pair of reads are not atomic.
705  */
706  credits = head - tail;
707  if ((int)credits < 0)
708  credits += qp->r_rq.size;
709  /*
710  * Binary search the credit table to find the code to
711  * use.
712  */
713  min = 0;
714  max = 31;
715  for (;;) {
716  x = (min + max) / 2;
717  if (credit_table[x] == credits)
718  break;
719  if (credit_table[x] > credits)
720  max = x;
721  else if (min == x)
722  break;
723  else
724  min = x;
725  }
726  aeth |= x << IPATH_AETH_CREDIT_SHIFT;
727  }
728  return cpu_to_be32(aeth);
729 }
730 
741 struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
742  struct ib_qp_init_attr *init_attr,
743  struct ib_udata *udata)
744 {
745  struct ipath_qp *qp;
746  int err;
747  struct ipath_swqe *swq = NULL;
748  struct ipath_ibdev *dev;
749  size_t sz;
750  size_t sg_list_sz;
751  struct ib_qp *ret;
752 
753  if (init_attr->create_flags) {
754  ret = ERR_PTR(-EINVAL);
755  goto bail;
756  }
757 
758  if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
759  init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs) {
760  ret = ERR_PTR(-EINVAL);
761  goto bail;
762  }
763 
764  /* Check receive queue parameters if no SRQ is specified. */
765  if (!init_attr->srq) {
766  if (init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
767  init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
768  ret = ERR_PTR(-EINVAL);
769  goto bail;
770  }
771  if (init_attr->cap.max_send_sge +
772  init_attr->cap.max_send_wr +
773  init_attr->cap.max_recv_sge +
774  init_attr->cap.max_recv_wr == 0) {
775  ret = ERR_PTR(-EINVAL);
776  goto bail;
777  }
778  }
779 
780  switch (init_attr->qp_type) {
781  case IB_QPT_UC:
782  case IB_QPT_RC:
783  case IB_QPT_UD:
784  case IB_QPT_SMI:
785  case IB_QPT_GSI:
786  sz = sizeof(struct ipath_sge) *
787  init_attr->cap.max_send_sge +
788  sizeof(struct ipath_swqe);
789  swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
790  if (swq == NULL) {
791  ret = ERR_PTR(-ENOMEM);
792  goto bail;
793  }
794  sz = sizeof(*qp);
795  sg_list_sz = 0;
796  if (init_attr->srq) {
797  struct ipath_srq *srq = to_isrq(init_attr->srq);
798 
799  if (srq->rq.max_sge > 1)
800  sg_list_sz = sizeof(*qp->r_sg_list) *
801  (srq->rq.max_sge - 1);
802  } else if (init_attr->cap.max_recv_sge > 1)
803  sg_list_sz = sizeof(*qp->r_sg_list) *
804  (init_attr->cap.max_recv_sge - 1);
805  qp = kmalloc(sz + sg_list_sz, GFP_KERNEL);
806  if (!qp) {
807  ret = ERR_PTR(-ENOMEM);
808  goto bail_swq;
809  }
810  if (sg_list_sz && (init_attr->qp_type == IB_QPT_UD ||
811  init_attr->qp_type == IB_QPT_SMI ||
812  init_attr->qp_type == IB_QPT_GSI)) {
813  qp->r_ud_sg_list = kmalloc(sg_list_sz, GFP_KERNEL);
814  if (!qp->r_ud_sg_list) {
815  ret = ERR_PTR(-ENOMEM);
816  goto bail_qp;
817  }
818  } else
819  qp->r_ud_sg_list = NULL;
820  if (init_attr->srq) {
821  sz = 0;
822  qp->r_rq.size = 0;
823  qp->r_rq.max_sge = 0;
824  qp->r_rq.wq = NULL;
825  init_attr->cap.max_recv_wr = 0;
826  init_attr->cap.max_recv_sge = 0;
827  } else {
828  qp->r_rq.size = init_attr->cap.max_recv_wr + 1;
829  qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
830  sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
831  sizeof(struct ipath_rwqe);
832  qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) +
833  qp->r_rq.size * sz);
834  if (!qp->r_rq.wq) {
835  ret = ERR_PTR(-ENOMEM);
836  goto bail_sg_list;
837  }
838  }
839 
840  /*
841  * ib_create_qp() will initialize qp->ibqp
842  * except for qp->ibqp.qp_num.
843  */
844  spin_lock_init(&qp->s_lock);
845  spin_lock_init(&qp->r_rq.lock);
846  atomic_set(&qp->refcount, 0);
849  tasklet_init(&qp->s_task, ipath_do_send, (unsigned long)qp);
850  INIT_LIST_HEAD(&qp->piowait);
851  INIT_LIST_HEAD(&qp->timerwait);
852  qp->state = IB_QPS_RESET;
853  qp->s_wq = swq;
854  qp->s_size = init_attr->cap.max_send_wr + 1;
855  qp->s_max_sge = init_attr->cap.max_send_sge;
856  if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
857  qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
858  else
859  qp->s_flags = 0;
860  dev = to_idev(ibpd->device);
861  err = ipath_alloc_qpn(&dev->qp_table, qp,
862  init_attr->qp_type);
863  if (err) {
864  ret = ERR_PTR(err);
865  vfree(qp->r_rq.wq);
866  goto bail_sg_list;
867  }
868  qp->ip = NULL;
869  qp->s_tx = NULL;
870  ipath_reset_qp(qp, init_attr->qp_type);
871  break;
872 
873  default:
874  /* Don't support raw QPs */
875  ret = ERR_PTR(-ENOSYS);
876  goto bail;
877  }
878 
879  init_attr->cap.max_inline_data = 0;
880 
881  /*
882  * Return the address of the RWQ as the offset to mmap.
883  * See ipath_mmap() for details.
884  */
885  if (udata && udata->outlen >= sizeof(__u64)) {
886  if (!qp->r_rq.wq) {
887  __u64 offset = 0;
888 
889  err = ib_copy_to_udata(udata, &offset,
890  sizeof(offset));
891  if (err) {
892  ret = ERR_PTR(err);
893  goto bail_ip;
894  }
895  } else {
896  u32 s = sizeof(struct ipath_rwq) +
897  qp->r_rq.size * sz;
898 
899  qp->ip =
900  ipath_create_mmap_info(dev, s,
901  ibpd->uobject->context,
902  qp->r_rq.wq);
903  if (!qp->ip) {
904  ret = ERR_PTR(-ENOMEM);
905  goto bail_ip;
906  }
907 
908  err = ib_copy_to_udata(udata, &(qp->ip->offset),
909  sizeof(qp->ip->offset));
910  if (err) {
911  ret = ERR_PTR(err);
912  goto bail_ip;
913  }
914  }
915  }
916 
917  spin_lock(&dev->n_qps_lock);
918  if (dev->n_qps_allocated == ib_ipath_max_qps) {
919  spin_unlock(&dev->n_qps_lock);
920  ret = ERR_PTR(-ENOMEM);
921  goto bail_ip;
922  }
923 
924  dev->n_qps_allocated++;
925  spin_unlock(&dev->n_qps_lock);
926 
927  if (qp->ip) {
928  spin_lock_irq(&dev->pending_lock);
929  list_add(&qp->ip->pending_mmaps, &dev->pending_mmaps);
930  spin_unlock_irq(&dev->pending_lock);
931  }
932 
933  ret = &qp->ibqp;
934  goto bail;
935 
936 bail_ip:
937  if (qp->ip)
938  kref_put(&qp->ip->ref, ipath_release_mmap_info);
939  else
940  vfree(qp->r_rq.wq);
941  ipath_free_qp(&dev->qp_table, qp);
942  free_qpn(&dev->qp_table, qp->ibqp.qp_num);
943 bail_sg_list:
944  kfree(qp->r_ud_sg_list);
945 bail_qp:
946  kfree(qp);
947 bail_swq:
948  vfree(swq);
949 bail:
950  return ret;
951 }
952 
962 int ipath_destroy_qp(struct ib_qp *ibqp)
963 {
964  struct ipath_qp *qp = to_iqp(ibqp);
965  struct ipath_ibdev *dev = to_idev(ibqp->device);
966 
967  /* Make sure HW and driver activity is stopped. */
968  spin_lock_irq(&qp->s_lock);
969  if (qp->state != IB_QPS_RESET) {
970  qp->state = IB_QPS_RESET;
971  spin_lock(&dev->pending_lock);
972  if (!list_empty(&qp->timerwait))
973  list_del_init(&qp->timerwait);
974  if (!list_empty(&qp->piowait))
975  list_del_init(&qp->piowait);
976  spin_unlock(&dev->pending_lock);
977  qp->s_flags &= ~IPATH_S_ANY_WAIT;
978  spin_unlock_irq(&qp->s_lock);
979  /* Stop the sending tasklet */
980  tasklet_kill(&qp->s_task);
982  } else
983  spin_unlock_irq(&qp->s_lock);
984 
985  ipath_free_qp(&dev->qp_table, qp);
986 
987  if (qp->s_tx) {
988  atomic_dec(&qp->refcount);
989  if (qp->s_tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF)
990  kfree(qp->s_tx->txreq.map_addr);
991  spin_lock_irq(&dev->pending_lock);
992  list_add(&qp->s_tx->txreq.list, &dev->txreq_free);
993  spin_unlock_irq(&dev->pending_lock);
994  qp->s_tx = NULL;
995  }
996 
997  wait_event(qp->wait, !atomic_read(&qp->refcount));
998 
999  /* all user's cleaned up, mark it available */
1000  free_qpn(&dev->qp_table, qp->ibqp.qp_num);
1001  spin_lock(&dev->n_qps_lock);
1002  dev->n_qps_allocated--;
1003  spin_unlock(&dev->n_qps_lock);
1004 
1005  if (qp->ip)
1006  kref_put(&qp->ip->ref, ipath_release_mmap_info);
1007  else
1008  vfree(qp->r_rq.wq);
1009  kfree(qp->r_ud_sg_list);
1010  vfree(qp->s_wq);
1011  kfree(qp);
1012  return 0;
1013 }
1014 
1023 {
1024  int i;
1025  int ret;
1026 
1027  idev->qp_table.last = 1; /* QPN 0 and 1 are special. */
1028  idev->qp_table.max = size;
1029  idev->qp_table.nmaps = 1;
1030  idev->qp_table.table = kzalloc(size * sizeof(*idev->qp_table.table),
1031  GFP_KERNEL);
1032  if (idev->qp_table.table == NULL) {
1033  ret = -ENOMEM;
1034  goto bail;
1035  }
1036 
1037  for (i = 0; i < ARRAY_SIZE(idev->qp_table.map); i++) {
1038  atomic_set(&idev->qp_table.map[i].n_free, BITS_PER_PAGE);
1039  idev->qp_table.map[i].page = NULL;
1040  }
1041 
1042  ret = 0;
1043 
1044 bail:
1045  return ret;
1046 }
1047 
1056 {
1057  u32 credit = (aeth >> IPATH_AETH_CREDIT_SHIFT) & IPATH_AETH_CREDIT_MASK;
1058 
1059  /*
1060  * If the credit is invalid, we can send
1061  * as many packets as we like. Otherwise, we have to
1062  * honor the credit field.
1063  */
1064  if (credit == IPATH_AETH_CREDIT_INVAL)
1065  qp->s_lsn = (u32) -1;
1066  else if (qp->s_lsn != (u32) -1) {
1067  /* Compute new LSN (i.e., MSN + credit) */
1068  credit = (aeth + credit_table[credit]) & IPATH_MSN_MASK;
1069  if (ipath_cmp24(credit, qp->s_lsn) > 0)
1070  qp->s_lsn = credit;
1071  }
1072 
1073  /* Restart sending if it was blocked due to lack of credits. */
1074  if ((qp->s_flags & IPATH_S_WAIT_SSN_CREDIT) &&
1075  qp->s_cur != qp->s_head &&
1076  (qp->s_lsn == (u32) -1 ||
1077  ipath_cmp24(get_swqe_ptr(qp, qp->s_cur)->ssn,
1078  qp->s_lsn + 1) <= 0))
1079  ipath_schedule_send(qp);
1080 }