Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
ipath_ud.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3  * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses. You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  * Redistribution and use in source and binary forms, with or
12  * without modification, are permitted provided that the following
13  * conditions are met:
14  *
15  * - Redistributions of source code must retain the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above
20  * copyright notice, this list of conditions and the following
21  * disclaimer in the documentation and/or other materials
22  * provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/sched.h>
35 #include <rdma/ib_smi.h>
36 
37 #include "ipath_verbs.h"
38 #include "ipath_kernel.h"
39 
50 static void ipath_ud_loopback(struct ipath_qp *sqp, struct ipath_swqe *swqe)
51 {
52  struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
53  struct ipath_qp *qp;
54  struct ib_ah_attr *ah_attr;
55  unsigned long flags;
56  struct ipath_rq *rq;
57  struct ipath_srq *srq;
58  struct ipath_sge_state rsge;
59  struct ipath_sge *sge;
60  struct ipath_rwq *wq;
61  struct ipath_rwqe *wqe;
62  void (*handler)(struct ib_event *, void *);
63  struct ib_wc wc;
64  u32 tail;
65  u32 rlen;
66  u32 length;
67 
68  qp = ipath_lookup_qpn(&dev->qp_table, swqe->wr.wr.ud.remote_qpn);
69  if (!qp || !(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_RECV_OK)) {
70  dev->n_pkt_drops++;
71  goto done;
72  }
73 
74  /*
75  * Check that the qkey matches (except for QP0, see 9.6.1.4.1).
76  * Qkeys with the high order bit set mean use the
77  * qkey from the QP context instead of the WR (see 10.2.5).
78  */
79  if (unlikely(qp->ibqp.qp_num &&
80  ((int) swqe->wr.wr.ud.remote_qkey < 0 ?
81  sqp->qkey : swqe->wr.wr.ud.remote_qkey) != qp->qkey)) {
82  /* XXX OK to lose a count once in a while. */
83  dev->qkey_violations++;
84  dev->n_pkt_drops++;
85  goto drop;
86  }
87 
88  /*
89  * A GRH is expected to precede the data even if not
90  * present on the wire.
91  */
92  length = swqe->length;
93  memset(&wc, 0, sizeof wc);
94  wc.byte_len = length + sizeof(struct ib_grh);
95 
96  if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
97  wc.wc_flags = IB_WC_WITH_IMM;
98  wc.ex.imm_data = swqe->wr.ex.imm_data;
99  }
100 
101  /*
102  * This would be a lot simpler if we could call ipath_get_rwqe()
103  * but that uses state that the receive interrupt handler uses
104  * so we would need to lock out receive interrupts while doing
105  * local loopback.
106  */
107  if (qp->ibqp.srq) {
108  srq = to_isrq(qp->ibqp.srq);
109  handler = srq->ibsrq.event_handler;
110  rq = &srq->rq;
111  } else {
112  srq = NULL;
113  handler = NULL;
114  rq = &qp->r_rq;
115  }
116 
117  /*
118  * Get the next work request entry to find where to put the data.
119  * Note that it is safe to drop the lock after changing rq->tail
120  * since ipath_post_receive() won't fill the empty slot.
121  */
122  spin_lock_irqsave(&rq->lock, flags);
123  wq = rq->wq;
124  tail = wq->tail;
125  /* Validate tail before using it since it is user writable. */
126  if (tail >= rq->size)
127  tail = 0;
128  if (unlikely(tail == wq->head)) {
129  spin_unlock_irqrestore(&rq->lock, flags);
130  dev->n_pkt_drops++;
131  goto drop;
132  }
133  wqe = get_rwqe_ptr(rq, tail);
134  rsge.sg_list = qp->r_ud_sg_list;
135  if (!ipath_init_sge(qp, wqe, &rlen, &rsge)) {
136  spin_unlock_irqrestore(&rq->lock, flags);
137  dev->n_pkt_drops++;
138  goto drop;
139  }
140  /* Silently drop packets which are too big. */
141  if (wc.byte_len > rlen) {
142  spin_unlock_irqrestore(&rq->lock, flags);
143  dev->n_pkt_drops++;
144  goto drop;
145  }
146  if (++tail >= rq->size)
147  tail = 0;
148  wq->tail = tail;
149  wc.wr_id = wqe->wr_id;
150  if (handler) {
151  u32 n;
152 
153  /*
154  * validate head pointer value and compute
155  * the number of remaining WQEs.
156  */
157  n = wq->head;
158  if (n >= rq->size)
159  n = 0;
160  if (n < tail)
161  n += rq->size - tail;
162  else
163  n -= tail;
164  if (n < srq->limit) {
165  struct ib_event ev;
166 
167  srq->limit = 0;
168  spin_unlock_irqrestore(&rq->lock, flags);
169  ev.device = qp->ibqp.device;
170  ev.element.srq = qp->ibqp.srq;
171  ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
172  handler(&ev, srq->ibsrq.srq_context);
173  } else
174  spin_unlock_irqrestore(&rq->lock, flags);
175  } else
176  spin_unlock_irqrestore(&rq->lock, flags);
177 
178  ah_attr = &to_iah(swqe->wr.wr.ud.ah)->attr;
179  if (ah_attr->ah_flags & IB_AH_GRH) {
180  ipath_copy_sge(&rsge, &ah_attr->grh, sizeof(struct ib_grh));
181  wc.wc_flags |= IB_WC_GRH;
182  } else
183  ipath_skip_sge(&rsge, sizeof(struct ib_grh));
184  sge = swqe->sg_list;
185  while (length) {
186  u32 len = sge->length;
187 
188  if (len > length)
189  len = length;
190  if (len > sge->sge_length)
191  len = sge->sge_length;
192  BUG_ON(len == 0);
193  ipath_copy_sge(&rsge, sge->vaddr, len);
194  sge->vaddr += len;
195  sge->length -= len;
196  sge->sge_length -= len;
197  if (sge->sge_length == 0) {
198  if (--swqe->wr.num_sge)
199  sge++;
200  } else if (sge->length == 0 && sge->mr != NULL) {
201  if (++sge->n >= IPATH_SEGSZ) {
202  if (++sge->m >= sge->mr->mapsz)
203  break;
204  sge->n = 0;
205  }
206  sge->vaddr =
207  sge->mr->map[sge->m]->segs[sge->n].vaddr;
208  sge->length =
209  sge->mr->map[sge->m]->segs[sge->n].length;
210  }
211  length -= len;
212  }
213  wc.status = IB_WC_SUCCESS;
214  wc.opcode = IB_WC_RECV;
215  wc.qp = &qp->ibqp;
216  wc.src_qp = sqp->ibqp.qp_num;
217  /* XXX do we know which pkey matched? Only needed for GSI. */
218  wc.pkey_index = 0;
219  wc.slid = dev->dd->ipath_lid |
220  (ah_attr->src_path_bits &
221  ((1 << dev->dd->ipath_lmc) - 1));
222  wc.sl = ah_attr->sl;
223  wc.dlid_path_bits =
224  ah_attr->dlid & ((1 << dev->dd->ipath_lmc) - 1);
225  wc.port_num = 1;
226  /* Signal completion event if the solicited bit is set. */
227  ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
228  swqe->wr.send_flags & IB_SEND_SOLICITED);
229 drop:
230  if (atomic_dec_and_test(&qp->refcount))
231  wake_up(&qp->wait);
232 done:;
233 }
234 
242 {
243  struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
244  struct ipath_other_headers *ohdr;
245  struct ib_ah_attr *ah_attr;
246  struct ipath_swqe *wqe;
247  unsigned long flags;
248  u32 nwords;
249  u32 extra_bytes;
250  u32 bth0;
251  u16 lrh0;
252  u16 lid;
253  int ret = 0;
254  int next_cur;
255 
256  spin_lock_irqsave(&qp->s_lock, flags);
257 
260  goto bail;
261  /* We are in the error state, flush the work request. */
262  if (qp->s_last == qp->s_head)
263  goto bail;
264  /* If DMAs are in progress, we can't flush immediately. */
265  if (atomic_read(&qp->s_dma_busy)) {
266  qp->s_flags |= IPATH_S_WAIT_DMA;
267  goto bail;
268  }
269  wqe = get_swqe_ptr(qp, qp->s_last);
271  goto done;
272  }
273 
274  if (qp->s_cur == qp->s_head)
275  goto bail;
276 
277  wqe = get_swqe_ptr(qp, qp->s_cur);
278  next_cur = qp->s_cur + 1;
279  if (next_cur >= qp->s_size)
280  next_cur = 0;
281 
282  /* Construct the header. */
283  ah_attr = &to_iah(wqe->wr.wr.ud.ah)->attr;
284  if (ah_attr->dlid >= IPATH_MULTICAST_LID_BASE) {
285  if (ah_attr->dlid != IPATH_PERMISSIVE_LID)
286  dev->n_multicast_xmit++;
287  else
288  dev->n_unicast_xmit++;
289  } else {
290  dev->n_unicast_xmit++;
291  lid = ah_attr->dlid & ~((1 << dev->dd->ipath_lmc) - 1);
292  if (unlikely(lid == dev->dd->ipath_lid)) {
293  /*
294  * If DMAs are in progress, we can't generate
295  * a completion for the loopback packet since
296  * it would be out of order.
297  * XXX Instead of waiting, we could queue a
298  * zero length descriptor so we get a callback.
299  */
300  if (atomic_read(&qp->s_dma_busy)) {
301  qp->s_flags |= IPATH_S_WAIT_DMA;
302  goto bail;
303  }
304  qp->s_cur = next_cur;
305  spin_unlock_irqrestore(&qp->s_lock, flags);
306  ipath_ud_loopback(qp, wqe);
307  spin_lock_irqsave(&qp->s_lock, flags);
309  goto done;
310  }
311  }
312 
313  qp->s_cur = next_cur;
314  extra_bytes = -wqe->length & 3;
315  nwords = (wqe->length + extra_bytes) >> 2;
316 
317  /* header size in 32-bit words LRH+BTH+DETH = (8+12+8)/4. */
318  qp->s_hdrwords = 7;
319  qp->s_cur_size = wqe->length;
320  qp->s_cur_sge = &qp->s_sge;
321  qp->s_dmult = ah_attr->static_rate;
322  qp->s_wqe = wqe;
323  qp->s_sge.sge = wqe->sg_list[0];
324  qp->s_sge.sg_list = wqe->sg_list + 1;
325  qp->s_sge.num_sge = wqe->wr.num_sge;
326 
327  if (ah_attr->ah_flags & IB_AH_GRH) {
328  /* Header size in 32-bit words. */
329  qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
330  &ah_attr->grh,
331  qp->s_hdrwords, nwords);
332  lrh0 = IPATH_LRH_GRH;
333  ohdr = &qp->s_hdr.u.l.oth;
334  /*
335  * Don't worry about sending to locally attached multicast
336  * QPs. It is unspecified by the spec. what happens.
337  */
338  } else {
339  /* Header size in 32-bit words. */
340  lrh0 = IPATH_LRH_BTH;
341  ohdr = &qp->s_hdr.u.oth;
342  }
343  if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
344  qp->s_hdrwords++;
345  ohdr->u.ud.imm_data = wqe->wr.ex.imm_data;
346  bth0 = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE << 24;
347  } else
348  bth0 = IB_OPCODE_UD_SEND_ONLY << 24;
349  lrh0 |= ah_attr->sl << 4;
350  if (qp->ibqp.qp_type == IB_QPT_SMI)
351  lrh0 |= 0xF000; /* Set VL (see ch. 13.5.3.1) */
352  qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
353  qp->s_hdr.lrh[1] = cpu_to_be16(ah_attr->dlid); /* DEST LID */
354  qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
355  SIZE_OF_CRC);
356  lid = dev->dd->ipath_lid;
357  if (lid) {
358  lid |= ah_attr->src_path_bits &
359  ((1 << dev->dd->ipath_lmc) - 1);
360  qp->s_hdr.lrh[3] = cpu_to_be16(lid);
361  } else
362  qp->s_hdr.lrh[3] = IB_LID_PERMISSIVE;
363  if (wqe->wr.send_flags & IB_SEND_SOLICITED)
364  bth0 |= 1 << 23;
365  bth0 |= extra_bytes << 20;
366  bth0 |= qp->ibqp.qp_type == IB_QPT_SMI ? IPATH_DEFAULT_P_KEY :
367  ipath_get_pkey(dev->dd, qp->s_pkey_index);
368  ohdr->bth[0] = cpu_to_be32(bth0);
369  /*
370  * Use the multicast QP if the destination LID is a multicast LID.
371  */
372  ohdr->bth[1] = ah_attr->dlid >= IPATH_MULTICAST_LID_BASE &&
373  ah_attr->dlid != IPATH_PERMISSIVE_LID ?
375  cpu_to_be32(wqe->wr.wr.ud.remote_qpn);
376  ohdr->bth[2] = cpu_to_be32(qp->s_next_psn++ & IPATH_PSN_MASK);
377  /*
378  * Qkeys with the high order bit set mean use the
379  * qkey from the QP context instead of the WR (see 10.2.5).
380  */
381  ohdr->u.ud.deth[0] = cpu_to_be32((int)wqe->wr.wr.ud.remote_qkey < 0 ?
382  qp->qkey : wqe->wr.wr.ud.remote_qkey);
383  ohdr->u.ud.deth[1] = cpu_to_be32(qp->ibqp.qp_num);
384 
385 done:
386  ret = 1;
387  goto unlock;
388 
389 bail:
390  qp->s_flags &= ~IPATH_S_BUSY;
391 unlock:
392  spin_unlock_irqrestore(&qp->s_lock, flags);
393  return ret;
394 }
395 
409 void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
410  int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
411 {
412  struct ipath_other_headers *ohdr;
413  int opcode;
414  u32 hdrsize;
415  u32 pad;
416  struct ib_wc wc;
417  u32 qkey;
418  u32 src_qp;
419  u16 dlid;
420  int header_in_data;
421 
422  /* Check for GRH */
423  if (!has_grh) {
424  ohdr = &hdr->u.oth;
425  hdrsize = 8 + 12 + 8; /* LRH + BTH + DETH */
426  qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
427  src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
428  header_in_data = 0;
429  } else {
430  ohdr = &hdr->u.l.oth;
431  hdrsize = 8 + 40 + 12 + 8; /* LRH + GRH + BTH + DETH */
432  /*
433  * The header with GRH is 68 bytes and the core driver sets
434  * the eager header buffer size to 56 bytes so the last 12
435  * bytes of the IB header is in the data buffer.
436  */
437  header_in_data = dev->dd->ipath_rcvhdrentsize == 16;
438  if (header_in_data) {
439  qkey = be32_to_cpu(((__be32 *) data)[1]);
440  src_qp = be32_to_cpu(((__be32 *) data)[2]);
441  data += 12;
442  } else {
443  qkey = be32_to_cpu(ohdr->u.ud.deth[0]);
444  src_qp = be32_to_cpu(ohdr->u.ud.deth[1]);
445  }
446  }
447  src_qp &= IPATH_QPN_MASK;
448 
449  /*
450  * Check that the permissive LID is only used on QP0
451  * and the QKEY matches (see 9.6.1.4.1 and 9.6.1.5.1).
452  */
453  if (qp->ibqp.qp_num) {
454  if (unlikely(hdr->lrh[1] == IB_LID_PERMISSIVE ||
455  hdr->lrh[3] == IB_LID_PERMISSIVE)) {
456  dev->n_pkt_drops++;
457  goto bail;
458  }
459  if (unlikely(qkey != qp->qkey)) {
460  /* XXX OK to lose a count once in a while. */
461  dev->qkey_violations++;
462  dev->n_pkt_drops++;
463  goto bail;
464  }
465  } else if (hdr->lrh[1] == IB_LID_PERMISSIVE ||
466  hdr->lrh[3] == IB_LID_PERMISSIVE) {
467  struct ib_smp *smp = (struct ib_smp *) data;
468 
470  dev->n_pkt_drops++;
471  goto bail;
472  }
473  }
474 
475  /*
476  * The opcode is in the low byte when its in network order
477  * (top byte when in host order).
478  */
479  opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
480  if (qp->ibqp.qp_num > 1 &&
481  opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
482  if (header_in_data) {
483  wc.ex.imm_data = *(__be32 *) data;
484  data += sizeof(__be32);
485  } else
486  wc.ex.imm_data = ohdr->u.ud.imm_data;
488  hdrsize += sizeof(u32);
489  } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
490  wc.ex.imm_data = 0;
491  wc.wc_flags = 0;
492  } else {
493  dev->n_pkt_drops++;
494  goto bail;
495  }
496 
497  /* Get the number of bytes the message was padded by. */
498  pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
499  if (unlikely(tlen < (hdrsize + pad + 4))) {
500  /* Drop incomplete packets. */
501  dev->n_pkt_drops++;
502  goto bail;
503  }
504  tlen -= hdrsize + pad + 4;
505 
506  /* Drop invalid MAD packets (see 13.5.3.1). */
507  if (unlikely((qp->ibqp.qp_num == 0 &&
508  (tlen != 256 ||
509  (be16_to_cpu(hdr->lrh[0]) >> 12) != 15)) ||
510  (qp->ibqp.qp_num == 1 &&
511  (tlen != 256 ||
512  (be16_to_cpu(hdr->lrh[0]) >> 12) == 15)))) {
513  dev->n_pkt_drops++;
514  goto bail;
515  }
516 
517  /*
518  * A GRH is expected to precede the data even if not
519  * present on the wire.
520  */
521  wc.byte_len = tlen + sizeof(struct ib_grh);
522 
523  /*
524  * Get the next work request entry to find where to put the data.
525  */
526  if (qp->r_flags & IPATH_R_REUSE_SGE)
527  qp->r_flags &= ~IPATH_R_REUSE_SGE;
528  else if (!ipath_get_rwqe(qp, 0)) {
529  /*
530  * Count VL15 packets dropped due to no receive buffer.
531  * Otherwise, count them as buffer overruns since usually,
532  * the HW will be able to receive packets even if there are
533  * no QPs with posted receive buffers.
534  */
535  if (qp->ibqp.qp_num == 0)
536  dev->n_vl15_dropped++;
537  else
538  dev->rcv_errors++;
539  goto bail;
540  }
541  /* Silently drop packets which are too big. */
542  if (wc.byte_len > qp->r_len) {
543  qp->r_flags |= IPATH_R_REUSE_SGE;
544  dev->n_pkt_drops++;
545  goto bail;
546  }
547  if (has_grh) {
548  ipath_copy_sge(&qp->r_sge, &hdr->u.l.grh,
549  sizeof(struct ib_grh));
550  wc.wc_flags |= IB_WC_GRH;
551  } else
552  ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
553  ipath_copy_sge(&qp->r_sge, data,
554  wc.byte_len - sizeof(struct ib_grh));
556  goto bail;
557  wc.wr_id = qp->r_wr_id;
558  wc.status = IB_WC_SUCCESS;
559  wc.opcode = IB_WC_RECV;
560  wc.vendor_err = 0;
561  wc.qp = &qp->ibqp;
562  wc.src_qp = src_qp;
563  /* XXX do we know which pkey matched? Only needed for GSI. */
564  wc.pkey_index = 0;
565  wc.slid = be16_to_cpu(hdr->lrh[3]);
566  wc.sl = (be16_to_cpu(hdr->lrh[0]) >> 4) & 0xF;
567  dlid = be16_to_cpu(hdr->lrh[1]);
568  /*
569  * Save the LMC lower bits if the destination LID is a unicast LID.
570  */
571  wc.dlid_path_bits = dlid >= IPATH_MULTICAST_LID_BASE ? 0 :
572  dlid & ((1 << dev->dd->ipath_lmc) - 1);
573  wc.port_num = 1;
574  /* Signal completion event if the solicited bit is set. */
575  ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
576  (ohdr->bth[0] &
577  cpu_to_be32(1 << 23)) != 0);
578 
579 bail:;
580 }