Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
iwch_qp.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/sched.h>
33 #include <linux/gfp.h>
34 #include "iwch_provider.h"
35 #include "iwch.h"
36 #include "iwch_cm.h"
37 #include "cxio_hal.h"
38 #include "cxio_resource.h"
39 
40 #define NO_SUPPORT -1
41 
42 static int build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
43  u8 * flit_cnt)
44 {
45  int i;
46  u32 plen;
47 
48  switch (wr->opcode) {
49  case IB_WR_SEND:
50  if (wr->send_flags & IB_SEND_SOLICITED)
51  wqe->send.rdmaop = T3_SEND_WITH_SE;
52  else
53  wqe->send.rdmaop = T3_SEND;
54  wqe->send.rem_stag = 0;
55  break;
57  if (wr->send_flags & IB_SEND_SOLICITED)
58  wqe->send.rdmaop = T3_SEND_WITH_SE_INV;
59  else
60  wqe->send.rdmaop = T3_SEND_WITH_INV;
61  wqe->send.rem_stag = cpu_to_be32(wr->ex.invalidate_rkey);
62  break;
63  default:
64  return -EINVAL;
65  }
66  if (wr->num_sge > T3_MAX_SGE)
67  return -EINVAL;
68  wqe->send.reserved[0] = 0;
69  wqe->send.reserved[1] = 0;
70  wqe->send.reserved[2] = 0;
71  plen = 0;
72  for (i = 0; i < wr->num_sge; i++) {
73  if ((plen + wr->sg_list[i].length) < plen)
74  return -EMSGSIZE;
75 
76  plen += wr->sg_list[i].length;
77  wqe->send.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
78  wqe->send.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
79  wqe->send.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
80  }
81  wqe->send.num_sgle = cpu_to_be32(wr->num_sge);
82  *flit_cnt = 4 + ((wr->num_sge) << 1);
83  wqe->send.plen = cpu_to_be32(plen);
84  return 0;
85 }
86 
87 static int build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
88  u8 *flit_cnt)
89 {
90  int i;
91  u32 plen;
92  if (wr->num_sge > T3_MAX_SGE)
93  return -EINVAL;
94  wqe->write.rdmaop = T3_RDMA_WRITE;
95  wqe->write.reserved[0] = 0;
96  wqe->write.reserved[1] = 0;
97  wqe->write.reserved[2] = 0;
98  wqe->write.stag_sink = cpu_to_be32(wr->wr.rdma.rkey);
99  wqe->write.to_sink = cpu_to_be64(wr->wr.rdma.remote_addr);
100 
101  if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
102  plen = 4;
103  wqe->write.sgl[0].stag = wr->ex.imm_data;
104  wqe->write.sgl[0].len = cpu_to_be32(0);
105  wqe->write.num_sgle = cpu_to_be32(0);
106  *flit_cnt = 6;
107  } else {
108  plen = 0;
109  for (i = 0; i < wr->num_sge; i++) {
110  if ((plen + wr->sg_list[i].length) < plen) {
111  return -EMSGSIZE;
112  }
113  plen += wr->sg_list[i].length;
114  wqe->write.sgl[i].stag =
115  cpu_to_be32(wr->sg_list[i].lkey);
116  wqe->write.sgl[i].len =
117  cpu_to_be32(wr->sg_list[i].length);
118  wqe->write.sgl[i].to =
119  cpu_to_be64(wr->sg_list[i].addr);
120  }
121  wqe->write.num_sgle = cpu_to_be32(wr->num_sge);
122  *flit_cnt = 5 + ((wr->num_sge) << 1);
123  }
124  wqe->write.plen = cpu_to_be32(plen);
125  return 0;
126 }
127 
128 static int build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
129  u8 *flit_cnt)
130 {
131  if (wr->num_sge > 1)
132  return -EINVAL;
133  wqe->read.rdmaop = T3_READ_REQ;
134  if (wr->opcode == IB_WR_RDMA_READ_WITH_INV)
135  wqe->read.local_inv = 1;
136  else
137  wqe->read.local_inv = 0;
138  wqe->read.reserved[0] = 0;
139  wqe->read.reserved[1] = 0;
140  wqe->read.rem_stag = cpu_to_be32(wr->wr.rdma.rkey);
141  wqe->read.rem_to = cpu_to_be64(wr->wr.rdma.remote_addr);
142  wqe->read.local_stag = cpu_to_be32(wr->sg_list[0].lkey);
143  wqe->read.local_len = cpu_to_be32(wr->sg_list[0].length);
144  wqe->read.local_to = cpu_to_be64(wr->sg_list[0].addr);
145  *flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
146  return 0;
147 }
148 
149 static int build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr,
150  u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq)
151 {
152  int i;
153  __be64 *p;
154 
155  if (wr->wr.fast_reg.page_list_len > T3_MAX_FASTREG_DEPTH)
156  return -EINVAL;
157  *wr_cnt = 1;
158  wqe->fastreg.stag = cpu_to_be32(wr->wr.fast_reg.rkey);
159  wqe->fastreg.len = cpu_to_be32(wr->wr.fast_reg.length);
160  wqe->fastreg.va_base_hi = cpu_to_be32(wr->wr.fast_reg.iova_start >> 32);
161  wqe->fastreg.va_base_lo_fbo =
162  cpu_to_be32(wr->wr.fast_reg.iova_start & 0xffffffff);
163  wqe->fastreg.page_type_perms = cpu_to_be32(
164  V_FR_PAGE_COUNT(wr->wr.fast_reg.page_list_len) |
165  V_FR_PAGE_SIZE(wr->wr.fast_reg.page_shift-12) |
167  V_FR_PERMS(iwch_ib_to_tpt_access(wr->wr.fast_reg.access_flags)));
168  p = &wqe->fastreg.pbl_addrs[0];
169  for (i = 0; i < wr->wr.fast_reg.page_list_len; i++, p++) {
170 
171  /* If we need a 2nd WR, then set it up */
172  if (i == T3_MAX_FASTREG_FRAG) {
173  *wr_cnt = 2;
174  wqe = (union t3_wr *)(wq->queue +
175  Q_PTR2IDX((wq->wptr+1), wq->size_log2));
176  build_fw_riwrh((void *)wqe, T3_WR_FASTREG, 0,
177  Q_GENBIT(wq->wptr + 1, wq->size_log2),
178  0, 1 + wr->wr.fast_reg.page_list_len - T3_MAX_FASTREG_FRAG,
179  T3_EOP);
180 
181  p = &wqe->pbl_frag.pbl_addrs[0];
182  }
183  *p = cpu_to_be64((u64)wr->wr.fast_reg.page_list->page_list[i]);
184  }
185  *flit_cnt = 5 + wr->wr.fast_reg.page_list_len;
186  if (*flit_cnt > 15)
187  *flit_cnt = 15;
188  return 0;
189 }
190 
191 static int build_inv_stag(union t3_wr *wqe, struct ib_send_wr *wr,
192  u8 *flit_cnt)
193 {
194  wqe->local_inv.stag = cpu_to_be32(wr->ex.invalidate_rkey);
195  wqe->local_inv.reserved = 0;
196  *flit_cnt = sizeof(struct t3_local_inv_wr) >> 3;
197  return 0;
198 }
199 
200 static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
201  u32 num_sgle, u32 * pbl_addr, u8 * page_size)
202 {
203  int i;
204  struct iwch_mr *mhp;
205  u64 offset;
206  for (i = 0; i < num_sgle; i++) {
207 
208  mhp = get_mhp(rhp, (sg_list[i].lkey) >> 8);
209  if (!mhp) {
210  PDBG("%s %d\n", __func__, __LINE__);
211  return -EIO;
212  }
213  if (!mhp->attr.state) {
214  PDBG("%s %d\n", __func__, __LINE__);
215  return -EIO;
216  }
217  if (mhp->attr.zbva) {
218  PDBG("%s %d\n", __func__, __LINE__);
219  return -EIO;
220  }
221 
222  if (sg_list[i].addr < mhp->attr.va_fbo) {
223  PDBG("%s %d\n", __func__, __LINE__);
224  return -EINVAL;
225  }
226  if (sg_list[i].addr + ((u64) sg_list[i].length) <
227  sg_list[i].addr) {
228  PDBG("%s %d\n", __func__, __LINE__);
229  return -EINVAL;
230  }
231  if (sg_list[i].addr + ((u64) sg_list[i].length) >
232  mhp->attr.va_fbo + ((u64) mhp->attr.len)) {
233  PDBG("%s %d\n", __func__, __LINE__);
234  return -EINVAL;
235  }
236  offset = sg_list[i].addr - mhp->attr.va_fbo;
237  offset += mhp->attr.va_fbo &
238  ((1UL << (12 + mhp->attr.page_size)) - 1);
239  pbl_addr[i] = ((mhp->attr.pbl_addr -
240  rhp->rdev.rnic_info.pbl_base) >> 3) +
241  (offset >> (12 + mhp->attr.page_size));
242  page_size[i] = mhp->attr.page_size;
243  }
244  return 0;
245 }
246 
247 static int build_rdma_recv(struct iwch_qp *qhp, union t3_wr *wqe,
248  struct ib_recv_wr *wr)
249 {
250  int i, err = 0;
251  u32 pbl_addr[T3_MAX_SGE];
252  u8 page_size[T3_MAX_SGE];
253 
254  err = iwch_sgl2pbl_map(qhp->rhp, wr->sg_list, wr->num_sge, pbl_addr,
255  page_size);
256  if (err)
257  return err;
258  wqe->recv.pagesz[0] = page_size[0];
259  wqe->recv.pagesz[1] = page_size[1];
260  wqe->recv.pagesz[2] = page_size[2];
261  wqe->recv.pagesz[3] = page_size[3];
262  wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
263  for (i = 0; i < wr->num_sge; i++) {
264  wqe->recv.sgl[i].stag = cpu_to_be32(wr->sg_list[i].lkey);
265  wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
266 
267  /* to in the WQE == the offset into the page */
268  wqe->recv.sgl[i].to = cpu_to_be64(((u32)wr->sg_list[i].addr) &
269  ((1UL << (12 + page_size[i])) - 1));
270 
271  /* pbl_addr is the adapters address in the PBL */
272  wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_addr[i]);
273  }
274  for (; i < T3_MAX_SGE; i++) {
275  wqe->recv.sgl[i].stag = 0;
276  wqe->recv.sgl[i].len = 0;
277  wqe->recv.sgl[i].to = 0;
278  wqe->recv.pbl_addr[i] = 0;
279  }
280  qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
281  qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
282  qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
283  qhp->wq.rq_size_log2)].pbl_addr = 0;
284  return 0;
285 }
286 
287 static int build_zero_stag_recv(struct iwch_qp *qhp, union t3_wr *wqe,
288  struct ib_recv_wr *wr)
289 {
290  int i;
291  u32 pbl_addr;
292  u32 pbl_offset;
293 
294 
295  /*
296  * The T3 HW requires the PBL in the HW recv descriptor to reference
297  * a PBL entry. So we allocate the max needed PBL memory here and pass
298  * it to the uP in the recv WR. The uP will build the PBL and setup
299  * the HW recv descriptor.
300  */
301  pbl_addr = cxio_hal_pblpool_alloc(&qhp->rhp->rdev, T3_STAG0_PBL_SIZE);
302  if (!pbl_addr)
303  return -ENOMEM;
304 
305  /*
306  * Compute the 8B aligned offset.
307  */
308  pbl_offset = (pbl_addr - qhp->rhp->rdev.rnic_info.pbl_base) >> 3;
309 
310  wqe->recv.num_sgle = cpu_to_be32(wr->num_sge);
311 
312  for (i = 0; i < wr->num_sge; i++) {
313 
314  /*
315  * Use a 128MB page size. This and an imposed 128MB
316  * sge length limit allows us to require only a 2-entry HW
317  * PBL for each SGE. This restriction is acceptable since
318  * since it is not possible to allocate 128MB of contiguous
319  * DMA coherent memory!
320  */
321  if (wr->sg_list[i].length > T3_STAG0_MAX_PBE_LEN)
322  return -EINVAL;
323  wqe->recv.pagesz[i] = T3_STAG0_PAGE_SHIFT;
324 
325  /*
326  * T3 restricts a recv to all zero-stag or all non-zero-stag.
327  */
328  if (wr->sg_list[i].lkey != 0)
329  return -EINVAL;
330  wqe->recv.sgl[i].stag = 0;
331  wqe->recv.sgl[i].len = cpu_to_be32(wr->sg_list[i].length);
332  wqe->recv.sgl[i].to = cpu_to_be64(wr->sg_list[i].addr);
333  wqe->recv.pbl_addr[i] = cpu_to_be32(pbl_offset);
334  pbl_offset += 2;
335  }
336  for (; i < T3_MAX_SGE; i++) {
337  wqe->recv.pagesz[i] = 0;
338  wqe->recv.sgl[i].stag = 0;
339  wqe->recv.sgl[i].len = 0;
340  wqe->recv.sgl[i].to = 0;
341  wqe->recv.pbl_addr[i] = 0;
342  }
343  qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
344  qhp->wq.rq_size_log2)].wr_id = wr->wr_id;
345  qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr,
346  qhp->wq.rq_size_log2)].pbl_addr = pbl_addr;
347  return 0;
348 }
349 
350 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
351  struct ib_send_wr **bad_wr)
352 {
353  int err = 0;
354  u8 uninitialized_var(t3_wr_flit_cnt);
355  enum t3_wr_opcode t3_wr_opcode = 0;
357  struct iwch_qp *qhp;
358  u32 idx;
359  union t3_wr *wqe;
360  u32 num_wrs;
361  unsigned long flag;
362  struct t3_swsq *sqp;
363  int wr_cnt = 1;
364 
365  qhp = to_iwch_qp(ibqp);
366  spin_lock_irqsave(&qhp->lock, flag);
367  if (qhp->attr.state > IWCH_QP_STATE_RTS) {
368  spin_unlock_irqrestore(&qhp->lock, flag);
369  err = -EINVAL;
370  goto out;
371  }
372  num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
373  qhp->wq.sq_size_log2);
374  if (num_wrs == 0) {
375  spin_unlock_irqrestore(&qhp->lock, flag);
376  err = -ENOMEM;
377  goto out;
378  }
379  while (wr) {
380  if (num_wrs == 0) {
381  err = -ENOMEM;
382  break;
383  }
384  idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
385  wqe = (union t3_wr *) (qhp->wq.queue + idx);
386  t3_wr_flags = 0;
387  if (wr->send_flags & IB_SEND_SOLICITED)
388  t3_wr_flags |= T3_SOLICITED_EVENT_FLAG;
389  if (wr->send_flags & IB_SEND_SIGNALED)
390  t3_wr_flags |= T3_COMPLETION_FLAG;
391  sqp = qhp->wq.sq +
392  Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
393  switch (wr->opcode) {
394  case IB_WR_SEND:
395  case IB_WR_SEND_WITH_INV:
396  if (wr->send_flags & IB_SEND_FENCE)
397  t3_wr_flags |= T3_READ_FENCE_FLAG;
398  t3_wr_opcode = T3_WR_SEND;
399  err = build_rdma_send(wqe, wr, &t3_wr_flit_cnt);
400  break;
401  case IB_WR_RDMA_WRITE:
403  t3_wr_opcode = T3_WR_WRITE;
404  err = build_rdma_write(wqe, wr, &t3_wr_flit_cnt);
405  break;
406  case IB_WR_RDMA_READ:
408  t3_wr_opcode = T3_WR_READ;
409  t3_wr_flags = 0; /* T3 reads are always signaled */
410  err = build_rdma_read(wqe, wr, &t3_wr_flit_cnt);
411  if (err)
412  break;
413  sqp->read_len = wqe->read.local_len;
414  if (!qhp->wq.oldest_read)
415  qhp->wq.oldest_read = sqp;
416  break;
417  case IB_WR_FAST_REG_MR:
418  t3_wr_opcode = T3_WR_FASTREG;
419  err = build_fastreg(wqe, wr, &t3_wr_flit_cnt,
420  &wr_cnt, &qhp->wq);
421  break;
422  case IB_WR_LOCAL_INV:
423  if (wr->send_flags & IB_SEND_FENCE)
424  t3_wr_flags |= T3_LOCAL_FENCE_FLAG;
425  t3_wr_opcode = T3_WR_INV_STAG;
426  err = build_inv_stag(wqe, wr, &t3_wr_flit_cnt);
427  break;
428  default:
429  PDBG("%s post of type=%d TBD!\n", __func__,
430  wr->opcode);
431  err = -EINVAL;
432  }
433  if (err)
434  break;
435  wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
436  sqp->wr_id = wr->wr_id;
437  sqp->opcode = wr2opcode(t3_wr_opcode);
438  sqp->sq_wptr = qhp->wq.sq_wptr;
439  sqp->complete = 0;
440  sqp->signaled = (wr->send_flags & IB_SEND_SIGNALED);
441 
442  build_fw_riwrh((void *) wqe, t3_wr_opcode, t3_wr_flags,
443  Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
444  0, t3_wr_flit_cnt,
445  (wr_cnt == 1) ? T3_SOPEOP : T3_SOP);
446  PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n",
447  __func__, (unsigned long long) wr->wr_id, idx,
448  Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2),
449  sqp->opcode);
450  wr = wr->next;
451  num_wrs--;
452  qhp->wq.wptr += wr_cnt;
453  ++(qhp->wq.sq_wptr);
454  }
455  spin_unlock_irqrestore(&qhp->lock, flag);
456  if (cxio_wq_db_enabled(&qhp->wq))
457  ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
458 
459 out:
460  if (err)
461  *bad_wr = wr;
462  return err;
463 }
464 
465 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
466  struct ib_recv_wr **bad_wr)
467 {
468  int err = 0;
469  struct iwch_qp *qhp;
470  u32 idx;
471  union t3_wr *wqe;
472  u32 num_wrs;
473  unsigned long flag;
474 
475  qhp = to_iwch_qp(ibqp);
476  spin_lock_irqsave(&qhp->lock, flag);
477  if (qhp->attr.state > IWCH_QP_STATE_RTS) {
478  spin_unlock_irqrestore(&qhp->lock, flag);
479  err = -EINVAL;
480  goto out;
481  }
482  num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr,
483  qhp->wq.rq_size_log2) - 1;
484  if (!wr) {
485  spin_unlock_irqrestore(&qhp->lock, flag);
486  err = -ENOMEM;
487  goto out;
488  }
489  while (wr) {
490  if (wr->num_sge > T3_MAX_SGE) {
491  err = -EINVAL;
492  break;
493  }
494  idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
495  wqe = (union t3_wr *) (qhp->wq.queue + idx);
496  if (num_wrs)
497  if (wr->sg_list[0].lkey)
498  err = build_rdma_recv(qhp, wqe, wr);
499  else
500  err = build_zero_stag_recv(qhp, wqe, wr);
501  else
502  err = -ENOMEM;
503 
504  if (err)
505  break;
506 
507  build_fw_riwrh((void *) wqe, T3_WR_RCV, T3_COMPLETION_FLAG,
508  Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2),
509  0, sizeof(struct t3_receive_wr) >> 3, T3_SOPEOP);
510  PDBG("%s cookie 0x%llx idx 0x%x rq_wptr 0x%x rw_rptr 0x%x "
511  "wqe %p \n", __func__, (unsigned long long) wr->wr_id,
512  idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe);
513  ++(qhp->wq.rq_wptr);
514  ++(qhp->wq.wptr);
515  wr = wr->next;
516  num_wrs--;
517  }
518  spin_unlock_irqrestore(&qhp->lock, flag);
519  if (cxio_wq_db_enabled(&qhp->wq))
520  ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
521 
522 out:
523  if (err)
524  *bad_wr = wr;
525  return err;
526 }
527 
528 int iwch_bind_mw(struct ib_qp *qp,
529  struct ib_mw *mw,
530  struct ib_mw_bind *mw_bind)
531 {
532  struct iwch_dev *rhp;
533  struct iwch_mw *mhp;
534  struct iwch_qp *qhp;
535  union t3_wr *wqe;
536  u32 pbl_addr;
537  u8 page_size;
538  u32 num_wrs;
539  unsigned long flag;
540  struct ib_sge sgl;
541  int err=0;
543  u32 idx;
544  struct t3_swsq *sqp;
545 
546  qhp = to_iwch_qp(qp);
547  mhp = to_iwch_mw(mw);
548  rhp = qhp->rhp;
549 
550  spin_lock_irqsave(&qhp->lock, flag);
551  if (qhp->attr.state > IWCH_QP_STATE_RTS) {
552  spin_unlock_irqrestore(&qhp->lock, flag);
553  return -EINVAL;
554  }
555  num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr,
556  qhp->wq.sq_size_log2);
557  if (num_wrs == 0) {
558  spin_unlock_irqrestore(&qhp->lock, flag);
559  return -ENOMEM;
560  }
561  idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2);
562  PDBG("%s: idx 0x%0x, mw 0x%p, mw_bind 0x%p\n", __func__, idx,
563  mw, mw_bind);
564  wqe = (union t3_wr *) (qhp->wq.queue + idx);
565 
566  t3_wr_flags = 0;
567  if (mw_bind->send_flags & IB_SEND_SIGNALED)
568  t3_wr_flags = T3_COMPLETION_FLAG;
569 
570  sgl.addr = mw_bind->addr;
571  sgl.lkey = mw_bind->mr->lkey;
572  sgl.length = mw_bind->length;
573  wqe->bind.reserved = 0;
574  wqe->bind.type = TPT_VATO;
575 
576  /* TBD: check perms */
577  wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags);
578  wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey);
579  wqe->bind.mw_stag = cpu_to_be32(mw->rkey);
580  wqe->bind.mw_len = cpu_to_be32(mw_bind->length);
581  wqe->bind.mw_va = cpu_to_be64(mw_bind->addr);
582  err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);
583  if (err) {
584  spin_unlock_irqrestore(&qhp->lock, flag);
585  return err;
586  }
587  wqe->send.wrid.id0.hi = qhp->wq.sq_wptr;
588  sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2);
589  sqp->wr_id = mw_bind->wr_id;
590  sqp->opcode = T3_BIND_MW;
591  sqp->sq_wptr = qhp->wq.sq_wptr;
592  sqp->complete = 0;
593  sqp->signaled = (mw_bind->send_flags & IB_SEND_SIGNALED);
594  wqe->bind.mr_pbl_addr = cpu_to_be32(pbl_addr);
595  wqe->bind.mr_pagesz = page_size;
596  build_fw_riwrh((void *)wqe, T3_WR_BIND, t3_wr_flags,
597  Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0,
598  sizeof(struct t3_bind_mw_wr) >> 3, T3_SOPEOP);
599  ++(qhp->wq.wptr);
600  ++(qhp->wq.sq_wptr);
601  spin_unlock_irqrestore(&qhp->lock, flag);
602 
603  if (cxio_wq_db_enabled(&qhp->wq))
604  ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
605 
606  return err;
607 }
608 
609 static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
610  u8 *layer_type, u8 *ecode)
611 {
613  int tagged = 0;
614  int opcode = -1;
615  int rqtype = 0;
616  int send_inv = 0;
617 
618  if (rsp_msg) {
619  status = CQE_STATUS(rsp_msg->cqe);
620  opcode = CQE_OPCODE(rsp_msg->cqe);
621  rqtype = RQ_TYPE(rsp_msg->cqe);
622  send_inv = (opcode == T3_SEND_WITH_INV) ||
623  (opcode == T3_SEND_WITH_SE_INV);
624  tagged = (opcode == T3_RDMA_WRITE) ||
625  (rqtype && (opcode == T3_READ_RESP));
626  }
627 
628  switch (status) {
629  case TPT_ERR_STAG:
630  if (send_inv) {
631  *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
632  *ecode = RDMAP_CANT_INV_STAG;
633  } else {
634  *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
635  *ecode = RDMAP_INV_STAG;
636  }
637  break;
638  case TPT_ERR_PDID:
639  *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
640  if ((opcode == T3_SEND_WITH_INV) ||
641  (opcode == T3_SEND_WITH_SE_INV))
642  *ecode = RDMAP_CANT_INV_STAG;
643  else
644  *ecode = RDMAP_STAG_NOT_ASSOC;
645  break;
646  case TPT_ERR_QPID:
647  *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
648  *ecode = RDMAP_STAG_NOT_ASSOC;
649  break;
650  case TPT_ERR_ACCESS:
651  *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
652  *ecode = RDMAP_ACC_VIOL;
653  break;
654  case TPT_ERR_WRAP:
655  *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
656  *ecode = RDMAP_TO_WRAP;
657  break;
658  case TPT_ERR_BOUND:
659  if (tagged) {
660  *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
661  *ecode = DDPT_BASE_BOUNDS;
662  } else {
663  *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
664  *ecode = RDMAP_BASE_BOUNDS;
665  }
666  break;
669  *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
670  *ecode = RDMAP_CANT_INV_STAG;
671  break;
672  case TPT_ERR_ECC:
673  case TPT_ERR_ECC_PSTAG:
675  *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
676  *ecode = 0;
677  break;
678  case TPT_ERR_OUT_OF_RQE:
679  *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
680  *ecode = DDPU_INV_MSN_NOBUF;
681  break;
683  *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
684  *ecode = DDPT_BASE_BOUNDS;
685  break;
686  case TPT_ERR_CRC:
687  *layer_type = LAYER_MPA|DDP_LLP;
688  *ecode = MPA_CRC_ERR;
689  break;
690  case TPT_ERR_MARKER:
691  *layer_type = LAYER_MPA|DDP_LLP;
692  *ecode = MPA_MARKER_ERR;
693  break;
694  case TPT_ERR_PDU_LEN_ERR:
695  *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
696  *ecode = DDPU_MSG_TOOBIG;
697  break;
698  case TPT_ERR_DDP_VERSION:
699  if (tagged) {
700  *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
701  *ecode = DDPT_INV_VERS;
702  } else {
703  *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
704  *ecode = DDPU_INV_VERS;
705  }
706  break;
708  *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
709  *ecode = RDMAP_INV_VERS;
710  break;
711  case TPT_ERR_OPCODE:
712  *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
713  *ecode = RDMAP_INV_OPCODE;
714  break;
716  *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
717  *ecode = DDPU_INV_QN;
718  break;
719  case TPT_ERR_MSN:
720  case TPT_ERR_MSN_GAP:
721  case TPT_ERR_MSN_RANGE:
723  *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
724  *ecode = DDPU_INV_MSN_RANGE;
725  break;
726  case TPT_ERR_TBIT:
727  *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
728  *ecode = 0;
729  break;
730  case TPT_ERR_MO:
731  *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
732  *ecode = DDPU_INV_MO;
733  break;
734  default:
735  *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
736  *ecode = 0;
737  break;
738  }
739 }
740 
741 int iwch_post_zb_read(struct iwch_ep *ep)
742 {
743  union t3_wr *wqe;
744  struct sk_buff *skb;
745  u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
746 
747  PDBG("%s enter\n", __func__);
748  skb = alloc_skb(40, GFP_KERNEL);
749  if (!skb) {
750  printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
751  return -ENOMEM;
752  }
753  wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr));
754  memset(wqe, 0, sizeof(struct t3_rdma_read_wr));
755  wqe->read.rdmaop = T3_READ_REQ;
756  wqe->read.reserved[0] = 0;
757  wqe->read.reserved[1] = 0;
758  wqe->read.rem_stag = cpu_to_be32(1);
759  wqe->read.rem_to = cpu_to_be64(1);
760  wqe->read.local_stag = cpu_to_be32(1);
761  wqe->read.local_len = cpu_to_be32(0);
762  wqe->read.local_to = cpu_to_be64(1);
763  wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
764  wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)|
765  V_FW_RIWR_LEN(flit_cnt));
767  return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb);
768 }
769 
770 /*
771  * This posts a TERMINATE with layer=RDMA, type=catastrophic.
772  */
773 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg)
774 {
775  union t3_wr *wqe;
776  struct terminate_message *term;
777  struct sk_buff *skb;
778 
779  PDBG("%s %d\n", __func__, __LINE__);
780  skb = alloc_skb(40, GFP_ATOMIC);
781  if (!skb) {
782  printk(KERN_ERR "%s cannot send TERMINATE!\n", __func__);
783  return -ENOMEM;
784  }
785  wqe = (union t3_wr *)skb_put(skb, 40);
786  memset(wqe, 0, 40);
787  wqe->send.rdmaop = T3_TERMINATE;
788 
789  /* immediate data length */
790  wqe->send.plen = htonl(4);
791 
792  /* immediate data starts here. */
793  term = (struct terminate_message *)wqe->send.sgl;
794  build_term_codes(rsp_msg, &term->layer_etype, &term->ecode);
795  wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_SEND) |
797  wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid));
799  return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
800 }
801 
802 /*
803  * Assumes qhp lock is held.
804  */
805 static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp,
806  struct iwch_cq *schp)
807 {
808  int count;
809  int flushed;
810 
811 
812  PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp);
813  /* take a ref on the qhp since we must release the lock */
814  atomic_inc(&qhp->refcnt);
815  spin_unlock(&qhp->lock);
816 
817  /* locking hierarchy: cq lock first, then qp lock. */
818  spin_lock(&rchp->lock);
819  spin_lock(&qhp->lock);
820  cxio_flush_hw_cq(&rchp->cq);
821  cxio_count_rcqes(&rchp->cq, &qhp->wq, &count);
822  flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count);
823  spin_unlock(&qhp->lock);
824  spin_unlock(&rchp->lock);
825  if (flushed) {
826  spin_lock(&rchp->comp_handler_lock);
827  (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
828  spin_unlock(&rchp->comp_handler_lock);
829  }
830 
831  /* locking hierarchy: cq lock first, then qp lock. */
832  spin_lock(&schp->lock);
833  spin_lock(&qhp->lock);
834  cxio_flush_hw_cq(&schp->cq);
835  cxio_count_scqes(&schp->cq, &qhp->wq, &count);
836  flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count);
837  spin_unlock(&qhp->lock);
838  spin_unlock(&schp->lock);
839  if (flushed) {
840  spin_lock(&schp->comp_handler_lock);
841  (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context);
842  spin_unlock(&schp->comp_handler_lock);
843  }
844 
845  /* deref */
846  if (atomic_dec_and_test(&qhp->refcnt))
847  wake_up(&qhp->wait);
848 
849  spin_lock(&qhp->lock);
850 }
851 
852 static void flush_qp(struct iwch_qp *qhp)
853 {
854  struct iwch_cq *rchp, *schp;
855 
856  rchp = get_chp(qhp->rhp, qhp->attr.rcq);
857  schp = get_chp(qhp->rhp, qhp->attr.scq);
858 
859  if (qhp->ibqp.uobject) {
860  cxio_set_wq_in_error(&qhp->wq);
861  cxio_set_cq_in_error(&rchp->cq);
862  spin_lock(&rchp->comp_handler_lock);
863  (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
864  spin_unlock(&rchp->comp_handler_lock);
865  if (schp != rchp) {
866  cxio_set_cq_in_error(&schp->cq);
867  spin_lock(&schp->comp_handler_lock);
868  (*schp->ibcq.comp_handler)(&schp->ibcq,
869  schp->ibcq.cq_context);
870  spin_unlock(&schp->comp_handler_lock);
871  }
872  return;
873  }
874  __flush_qp(qhp, rchp, schp);
875 }
876 
877 
878 /*
879  * Return count of RECV WRs posted
880  */
882 {
883  union t3_wr *wqe = qhp->wq.queue;
884  u16 count = 0;
885  while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
886  count++;
887  wqe++;
888  }
889  PDBG("%s qhp %p count %u\n", __func__, qhp, count);
890  return count;
891 }
892 
893 static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
894  enum iwch_qp_attr_mask mask,
895  struct iwch_qp_attributes *attrs)
896 {
897  struct t3_rdma_init_attr init_attr;
898  int ret;
899 
900  init_attr.tid = qhp->ep->hwtid;
901  init_attr.qpid = qhp->wq.qpid;
902  init_attr.pdid = qhp->attr.pd;
903  init_attr.scqid = qhp->attr.scq;
904  init_attr.rcqid = qhp->attr.rcq;
905  init_attr.rq_addr = qhp->wq.rq_addr;
906  init_attr.rq_size = 1 << qhp->wq.rq_size_log2;
907  init_attr.mpaattrs = uP_RI_MPA_IETF_ENABLE |
908  qhp->attr.mpa_attr.recv_marker_enabled |
909  (qhp->attr.mpa_attr.xmit_marker_enabled << 1) |
910  (qhp->attr.mpa_attr.crc_enabled << 2);
911 
912  init_attr.qpcaps = uP_RI_QP_RDMA_READ_ENABLE |
915  if (!qhp->ibqp.uobject)
916  init_attr.qpcaps |= uP_RI_QP_STAG0_ENABLE |
918 
919  init_attr.tcp_emss = qhp->ep->emss;
920  init_attr.ord = qhp->attr.max_ord;
921  init_attr.ird = qhp->attr.max_ird;
922  init_attr.qp_dma_addr = qhp->wq.dma_addr;
923  init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
924  init_attr.rqe_count = iwch_rqes_posted(qhp);
925  init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
926  init_attr.chan = qhp->ep->l2t->smt_idx;
927  if (peer2peer) {
928  init_attr.rtr_type = RTR_READ;
929  if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
930  init_attr.ord = 1;
931  if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
932  init_attr.ird = 1;
933  } else
934  init_attr.rtr_type = 0;
935  init_attr.irs = qhp->ep->rcv_seq;
936  PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
937  "flags 0x%x qpcaps 0x%x\n", __func__,
938  init_attr.rq_addr, init_attr.rq_size,
939  init_attr.flags, init_attr.qpcaps);
940  ret = cxio_rdma_init(&rhp->rdev, &init_attr);
941  PDBG("%s ret %d\n", __func__, ret);
942  return ret;
943 }
944 
945 int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
946  enum iwch_qp_attr_mask mask,
947  struct iwch_qp_attributes *attrs,
948  int internal)
949 {
950  int ret = 0;
951  struct iwch_qp_attributes newattr = qhp->attr;
952  unsigned long flag;
953  int disconnect = 0;
954  int terminate = 0;
955  int abort = 0;
956  int free = 0;
957  struct iwch_ep *ep = NULL;
958 
959  PDBG("%s qhp %p qpid 0x%x ep %p state %d -> %d\n", __func__,
960  qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state,
961  (mask & IWCH_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
962 
963  spin_lock_irqsave(&qhp->lock, flag);
964 
965  /* Process attr changes if in IDLE */
966  if (mask & IWCH_QP_ATTR_VALID_MODIFY) {
967  if (qhp->attr.state != IWCH_QP_STATE_IDLE) {
968  ret = -EIO;
969  goto out;
970  }
972  newattr.enable_rdma_read = attrs->enable_rdma_read;
974  newattr.enable_rdma_write = attrs->enable_rdma_write;
976  newattr.enable_bind = attrs->enable_bind;
977  if (mask & IWCH_QP_ATTR_MAX_ORD) {
978  if (attrs->max_ord >
979  rhp->attr.max_rdma_read_qp_depth) {
980  ret = -EINVAL;
981  goto out;
982  }
983  newattr.max_ord = attrs->max_ord;
984  }
985  if (mask & IWCH_QP_ATTR_MAX_IRD) {
986  if (attrs->max_ird >
987  rhp->attr.max_rdma_reads_per_qp) {
988  ret = -EINVAL;
989  goto out;
990  }
991  newattr.max_ird = attrs->max_ird;
992  }
993  qhp->attr = newattr;
994  }
995 
996  if (!(mask & IWCH_QP_ATTR_NEXT_STATE))
997  goto out;
998  if (qhp->attr.state == attrs->next_state)
999  goto out;
1000 
1001  switch (qhp->attr.state) {
1002  case IWCH_QP_STATE_IDLE:
1003  switch (attrs->next_state) {
1004  case IWCH_QP_STATE_RTS:
1005  if (!(mask & IWCH_QP_ATTR_LLP_STREAM_HANDLE)) {
1006  ret = -EINVAL;
1007  goto out;
1008  }
1009  if (!(mask & IWCH_QP_ATTR_MPA_ATTR)) {
1010  ret = -EINVAL;
1011  goto out;
1012  }
1013  qhp->attr.mpa_attr = attrs->mpa_attr;
1014  qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1015  qhp->ep = qhp->attr.llp_stream_handle;
1016  qhp->attr.state = IWCH_QP_STATE_RTS;
1017 
1018  /*
1019  * Ref the endpoint here and deref when we
1020  * disassociate the endpoint from the QP. This
1021  * happens in CLOSING->IDLE transition or *->ERROR
1022  * transition.
1023  */
1024  get_ep(&qhp->ep->com);
1025  spin_unlock_irqrestore(&qhp->lock, flag);
1026  ret = rdma_init(rhp, qhp, mask, attrs);
1027  spin_lock_irqsave(&qhp->lock, flag);
1028  if (ret)
1029  goto err;
1030  break;
1031  case IWCH_QP_STATE_ERROR:
1032  qhp->attr.state = IWCH_QP_STATE_ERROR;
1033  flush_qp(qhp);
1034  break;
1035  default:
1036  ret = -EINVAL;
1037  goto out;
1038  }
1039  break;
1040  case IWCH_QP_STATE_RTS:
1041  switch (attrs->next_state) {
1042  case IWCH_QP_STATE_CLOSING:
1043  BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2);
1044  qhp->attr.state = IWCH_QP_STATE_CLOSING;
1045  if (!internal) {
1046  abort=0;
1047  disconnect = 1;
1048  ep = qhp->ep;
1049  get_ep(&ep->com);
1050  }
1051  break;
1053  qhp->attr.state = IWCH_QP_STATE_TERMINATE;
1054  if (qhp->ibqp.uobject)
1055  cxio_set_wq_in_error(&qhp->wq);
1056  if (!internal)
1057  terminate = 1;
1058  break;
1059  case IWCH_QP_STATE_ERROR:
1060  qhp->attr.state = IWCH_QP_STATE_ERROR;
1061  if (!internal) {
1062  abort=1;
1063  disconnect = 1;
1064  ep = qhp->ep;
1065  get_ep(&ep->com);
1066  }
1067  goto err;
1068  break;
1069  default:
1070  ret = -EINVAL;
1071  goto out;
1072  }
1073  break;
1074  case IWCH_QP_STATE_CLOSING:
1075  if (!internal) {
1076  ret = -EINVAL;
1077  goto out;
1078  }
1079  switch (attrs->next_state) {
1080  case IWCH_QP_STATE_IDLE:
1081  flush_qp(qhp);
1082  qhp->attr.state = IWCH_QP_STATE_IDLE;
1083  qhp->attr.llp_stream_handle = NULL;
1084  put_ep(&qhp->ep->com);
1085  qhp->ep = NULL;
1086  wake_up(&qhp->wait);
1087  break;
1088  case IWCH_QP_STATE_ERROR:
1089  goto err;
1090  default:
1091  ret = -EINVAL;
1092  goto err;
1093  }
1094  break;
1095  case IWCH_QP_STATE_ERROR:
1096  if (attrs->next_state != IWCH_QP_STATE_IDLE) {
1097  ret = -EINVAL;
1098  goto out;
1099  }
1100 
1101  if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) ||
1102  !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) {
1103  ret = -EINVAL;
1104  goto out;
1105  }
1106  qhp->attr.state = IWCH_QP_STATE_IDLE;
1107  break;
1109  if (!internal) {
1110  ret = -EINVAL;
1111  goto out;
1112  }
1113  goto err;
1114  break;
1115  default:
1116  printk(KERN_ERR "%s in a bad state %d\n",
1117  __func__, qhp->attr.state);
1118  ret = -EINVAL;
1119  goto err;
1120  break;
1121  }
1122  goto out;
1123 err:
1124  PDBG("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep,
1125  qhp->wq.qpid);
1126 
1127  /* disassociate the LLP connection */
1128  qhp->attr.llp_stream_handle = NULL;
1129  ep = qhp->ep;
1130  qhp->ep = NULL;
1131  qhp->attr.state = IWCH_QP_STATE_ERROR;
1132  free=1;
1133  wake_up(&qhp->wait);
1134  BUG_ON(!ep);
1135  flush_qp(qhp);
1136 out:
1137  spin_unlock_irqrestore(&qhp->lock, flag);
1138 
1139  if (terminate)
1140  iwch_post_terminate(qhp, NULL);
1141 
1142  /*
1143  * If disconnect is 1, then we need to initiate a disconnect
1144  * on the EP. This can be a normal close (RTS->CLOSING) or
1145  * an abnormal close (RTS/CLOSING->ERROR).
1146  */
1147  if (disconnect) {
1148  iwch_ep_disconnect(ep, abort, GFP_KERNEL);
1149  put_ep(&ep->com);
1150  }
1151 
1152  /*
1153  * If free is 1, then we've disassociated the EP from the QP
1154  * and we need to dereference the EP.
1155  */
1156  if (free)
1157  put_ep(&ep->com);
1158 
1159  PDBG("%s exit state %d\n", __func__, qhp->attr.state);
1160  return ret;
1161 }