Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
mthca_srq.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005 Cisco Systems. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/sched.h>
36 
37 #include <asm/io.h>
38 
39 #include "mthca_dev.h"
40 #include "mthca_cmd.h"
41 #include "mthca_memfree.h"
42 #include "mthca_wqe.h"
43 
44 enum {
46 };
47 
49  __be64 wqe_base_ds; /* low 6 bits is descriptor size */
56 };
57 
70 };
71 
72 static void *get_wqe(struct mthca_srq *srq, int n)
73 {
74  if (srq->is_direct)
75  return srq->queue.direct.buf + (n << srq->wqe_shift);
76  else
77  return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
78  ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
79 }
80 
81 /*
82  * Return a pointer to the location within a WQE that we're using as a
83  * link when the WQE is in the free list. We use the imm field
84  * because in the Tavor case, posting a WQE may overwrite the next
85  * segment of the previous WQE, but a receive WQE will never touch the
86  * imm field. This avoids corrupting our free list if the previous
87  * WQE has already completed and been put on the free list when we
88  * post the next WQE.
89  */
90 static inline int *wqe_to_link(void *wqe)
91 {
92  return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
93 }
94 
95 static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
96  struct mthca_pd *pd,
97  struct mthca_srq *srq,
99 {
100  memset(context, 0, sizeof *context);
101 
102  context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
103  context->state_pd = cpu_to_be32(pd->pd_num);
104  context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
105 
106  if (pd->ibpd.uobject)
107  context->uar =
108  cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
109  else
110  context->uar = cpu_to_be32(dev->driver_uar.index);
111 }
112 
113 static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
114  struct mthca_pd *pd,
115  struct mthca_srq *srq,
116  struct mthca_arbel_srq_context *context)
117 {
118  int logsize, max;
119 
120  memset(context, 0, sizeof *context);
121 
122  /*
123  * Put max in a temporary variable to work around gcc bug
124  * triggered by ilog2() on sparc64.
125  */
126  max = srq->max;
127  logsize = ilog2(max);
128  context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
129  context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
130  context->db_index = cpu_to_be32(srq->db_index);
131  context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
132  if (pd->ibpd.uobject)
133  context->logstride_usrpage |=
134  cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
135  else
136  context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
137  context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
138 }
139 
140 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
141 {
142  mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
143  srq->is_direct, &srq->mr);
144  kfree(srq->wrid);
145 }
146 
147 static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
148  struct mthca_srq *srq)
149 {
150  struct mthca_data_seg *scatter;
151  void *wqe;
152  int err;
153  int i;
154 
155  if (pd->ibpd.uobject)
156  return 0;
157 
158  srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
159  if (!srq->wrid)
160  return -ENOMEM;
161 
162  err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
164  &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
165  if (err) {
166  kfree(srq->wrid);
167  return err;
168  }
169 
170  /*
171  * Now initialize the SRQ buffer so that all of the WQEs are
172  * linked into the list of free WQEs. In addition, set the
173  * scatter list L_Keys to the sentry value of 0x100.
174  */
175  for (i = 0; i < srq->max; ++i) {
176  struct mthca_next_seg *next;
177 
178  next = wqe = get_wqe(srq, i);
179 
180  if (i < srq->max - 1) {
181  *wqe_to_link(wqe) = i + 1;
182  next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
183  } else {
184  *wqe_to_link(wqe) = -1;
185  next->nda_op = 0;
186  }
187 
188  for (scatter = wqe + sizeof (struct mthca_next_seg);
189  (void *) scatter < wqe + (1 << srq->wqe_shift);
190  ++scatter)
191  scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
192  }
193 
194  srq->last = get_wqe(srq, srq->max - 1);
195 
196  return 0;
197 }
198 
199 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
200  struct ib_srq_attr *attr, struct mthca_srq *srq)
201 {
202  struct mthca_mailbox *mailbox;
203  int ds;
204  int err;
205 
206  /* Sanity check SRQ size before proceeding */
207  if (attr->max_wr > dev->limits.max_srq_wqes ||
208  attr->max_sge > dev->limits.max_srq_sge)
209  return -EINVAL;
210 
211  srq->max = attr->max_wr;
212  srq->max_gs = attr->max_sge;
213  srq->counter = 0;
214 
215  if (mthca_is_memfree(dev))
216  srq->max = roundup_pow_of_two(srq->max + 1);
217  else
218  srq->max = srq->max + 1;
219 
220  ds = max(64UL,
221  roundup_pow_of_two(sizeof (struct mthca_next_seg) +
222  srq->max_gs * sizeof (struct mthca_data_seg)));
223 
224  if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
225  return -EINVAL;
226 
227  srq->wqe_shift = ilog2(ds);
228 
229  srq->srqn = mthca_alloc(&dev->srq_table.alloc);
230  if (srq->srqn == -1)
231  return -ENOMEM;
232 
233  if (mthca_is_memfree(dev)) {
234  err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
235  if (err)
236  goto err_out;
237 
238  if (!pd->ibpd.uobject) {
240  srq->srqn, &srq->db);
241  if (srq->db_index < 0) {
242  err = -ENOMEM;
243  goto err_out_icm;
244  }
245  }
246  }
247 
248  mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
249  if (IS_ERR(mailbox)) {
250  err = PTR_ERR(mailbox);
251  goto err_out_db;
252  }
253 
254  err = mthca_alloc_srq_buf(dev, pd, srq);
255  if (err)
256  goto err_out_mailbox;
257 
258  spin_lock_init(&srq->lock);
259  srq->refcount = 1;
260  init_waitqueue_head(&srq->wait);
261  mutex_init(&srq->mutex);
262 
263  if (mthca_is_memfree(dev))
264  mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
265  else
266  mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
267 
268  err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
269 
270  if (err) {
271  mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
272  goto err_out_free_buf;
273  }
274 
275  spin_lock_irq(&dev->srq_table.lock);
276  if (mthca_array_set(&dev->srq_table.srq,
277  srq->srqn & (dev->limits.num_srqs - 1),
278  srq)) {
279  spin_unlock_irq(&dev->srq_table.lock);
280  goto err_out_free_srq;
281  }
282  spin_unlock_irq(&dev->srq_table.lock);
283 
284  mthca_free_mailbox(dev, mailbox);
285 
286  srq->first_free = 0;
287  srq->last_free = srq->max - 1;
288 
289  attr->max_wr = srq->max - 1;
290  attr->max_sge = srq->max_gs;
291 
292  return 0;
293 
294 err_out_free_srq:
295  err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
296  if (err)
297  mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
298 
299 err_out_free_buf:
300  if (!pd->ibpd.uobject)
301  mthca_free_srq_buf(dev, srq);
302 
303 err_out_mailbox:
304  mthca_free_mailbox(dev, mailbox);
305 
306 err_out_db:
307  if (!pd->ibpd.uobject && mthca_is_memfree(dev))
309 
310 err_out_icm:
311  mthca_table_put(dev, dev->srq_table.table, srq->srqn);
312 
313 err_out:
314  mthca_free(&dev->srq_table.alloc, srq->srqn);
315 
316  return err;
317 }
318 
319 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
320 {
321  int c;
322 
323  spin_lock_irq(&dev->srq_table.lock);
324  c = srq->refcount;
325  spin_unlock_irq(&dev->srq_table.lock);
326 
327  return c;
328 }
329 
330 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
331 {
332  struct mthca_mailbox *mailbox;
333  int err;
334 
335  mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
336  if (IS_ERR(mailbox)) {
337  mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
338  return;
339  }
340 
341  err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
342  if (err)
343  mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
344 
345  spin_lock_irq(&dev->srq_table.lock);
346  mthca_array_clear(&dev->srq_table.srq,
347  srq->srqn & (dev->limits.num_srqs - 1));
348  --srq->refcount;
349  spin_unlock_irq(&dev->srq_table.lock);
350 
351  wait_event(srq->wait, !get_srq_refcount(dev, srq));
352 
353  if (!srq->ibsrq.uobject) {
354  mthca_free_srq_buf(dev, srq);
355  if (mthca_is_memfree(dev))
357  }
358 
359  mthca_table_put(dev, dev->srq_table.table, srq->srqn);
360  mthca_free(&dev->srq_table.alloc, srq->srqn);
361  mthca_free_mailbox(dev, mailbox);
362 }
363 
364 int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
365  enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
366 {
367  struct mthca_dev *dev = to_mdev(ibsrq->device);
368  struct mthca_srq *srq = to_msrq(ibsrq);
369  int ret = 0;
370 
371  /* We don't support resizing SRQs (yet?) */
372  if (attr_mask & IB_SRQ_MAX_WR)
373  return -EINVAL;
374 
375  if (attr_mask & IB_SRQ_LIMIT) {
376  u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
377  if (attr->srq_limit > max_wr)
378  return -EINVAL;
379 
380  mutex_lock(&srq->mutex);
381  ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
382  mutex_unlock(&srq->mutex);
383  }
384 
385  return ret;
386 }
387 
388 int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
389 {
390  struct mthca_dev *dev = to_mdev(ibsrq->device);
391  struct mthca_srq *srq = to_msrq(ibsrq);
392  struct mthca_mailbox *mailbox;
393  struct mthca_arbel_srq_context *arbel_ctx;
394  struct mthca_tavor_srq_context *tavor_ctx;
395  int err;
396 
397  mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
398  if (IS_ERR(mailbox))
399  return PTR_ERR(mailbox);
400 
401  err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
402  if (err)
403  goto out;
404 
405  if (mthca_is_memfree(dev)) {
406  arbel_ctx = mailbox->buf;
407  srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
408  } else {
409  tavor_ctx = mailbox->buf;
410  srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
411  }
412 
413  srq_attr->max_wr = srq->max - 1;
414  srq_attr->max_sge = srq->max_gs;
415 
416 out:
417  mthca_free_mailbox(dev, mailbox);
418 
419  return err;
420 }
421 
422 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
424 {
425  struct mthca_srq *srq;
426  struct ib_event event;
427 
428  spin_lock(&dev->srq_table.lock);
429  srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
430  if (srq)
431  ++srq->refcount;
432  spin_unlock(&dev->srq_table.lock);
433 
434  if (!srq) {
435  mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
436  return;
437  }
438 
439  if (!srq->ibsrq.event_handler)
440  goto out;
441 
442  event.device = &dev->ib_dev;
443  event.event = event_type;
444  event.element.srq = &srq->ibsrq;
445  srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
446 
447 out:
448  spin_lock(&dev->srq_table.lock);
449  if (!--srq->refcount)
450  wake_up(&srq->wait);
451  spin_unlock(&dev->srq_table.lock);
452 }
453 
454 /*
455  * This function must be called with IRQs disabled.
456  */
457 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
458 {
459  int ind;
460  struct mthca_next_seg *last_free;
461 
462  ind = wqe_addr >> srq->wqe_shift;
463 
464  spin_lock(&srq->lock);
465 
466  last_free = get_wqe(srq, srq->last_free);
467  *wqe_to_link(last_free) = ind;
468  last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
469  *wqe_to_link(get_wqe(srq, ind)) = -1;
470  srq->last_free = ind;
471 
472  spin_unlock(&srq->lock);
473 }
474 
475 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
476  struct ib_recv_wr **bad_wr)
477 {
478  struct mthca_dev *dev = to_mdev(ibsrq->device);
479  struct mthca_srq *srq = to_msrq(ibsrq);
480  unsigned long flags;
481  int err = 0;
482  int first_ind;
483  int ind;
484  int next_ind;
485  int nreq;
486  int i;
487  void *wqe;
488  void *prev_wqe;
489 
490  spin_lock_irqsave(&srq->lock, flags);
491 
492  first_ind = srq->first_free;
493 
494  for (nreq = 0; wr; wr = wr->next) {
495  ind = srq->first_free;
496  wqe = get_wqe(srq, ind);
497  next_ind = *wqe_to_link(wqe);
498 
499  if (unlikely(next_ind < 0)) {
500  mthca_err(dev, "SRQ %06x full\n", srq->srqn);
501  err = -ENOMEM;
502  *bad_wr = wr;
503  break;
504  }
505 
506  prev_wqe = srq->last;
507  srq->last = wqe;
508 
509  ((struct mthca_next_seg *) wqe)->ee_nds = 0;
510  /* flags field will always remain 0 */
511 
512  wqe += sizeof (struct mthca_next_seg);
513 
514  if (unlikely(wr->num_sge > srq->max_gs)) {
515  err = -EINVAL;
516  *bad_wr = wr;
517  srq->last = prev_wqe;
518  break;
519  }
520 
521  for (i = 0; i < wr->num_sge; ++i) {
522  mthca_set_data_seg(wqe, wr->sg_list + i);
523  wqe += sizeof (struct mthca_data_seg);
524  }
525 
526  if (i < srq->max_gs)
527  mthca_set_data_seg_inval(wqe);
528 
529  ((struct mthca_next_seg *) prev_wqe)->ee_nds =
531 
532  srq->wrid[ind] = wr->wr_id;
533  srq->first_free = next_ind;
534 
535  ++nreq;
537  nreq = 0;
538 
539  /*
540  * Make sure that descriptors are written
541  * before doorbell is rung.
542  */
543  wmb();
544 
545  mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
547  MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
548 
549  first_ind = srq->first_free;
550  }
551  }
552 
553  if (likely(nreq)) {
554  /*
555  * Make sure that descriptors are written before
556  * doorbell is rung.
557  */
558  wmb();
559 
560  mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
562  MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
563  }
564 
565  /*
566  * Make sure doorbells don't leak out of SRQ spinlock and
567  * reach the HCA out of order:
568  */
569  mmiowb();
570 
571  spin_unlock_irqrestore(&srq->lock, flags);
572  return err;
573 }
574 
575 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
576  struct ib_recv_wr **bad_wr)
577 {
578  struct mthca_dev *dev = to_mdev(ibsrq->device);
579  struct mthca_srq *srq = to_msrq(ibsrq);
580  unsigned long flags;
581  int err = 0;
582  int ind;
583  int next_ind;
584  int nreq;
585  int i;
586  void *wqe;
587 
588  spin_lock_irqsave(&srq->lock, flags);
589 
590  for (nreq = 0; wr; ++nreq, wr = wr->next) {
591  ind = srq->first_free;
592  wqe = get_wqe(srq, ind);
593  next_ind = *wqe_to_link(wqe);
594 
595  if (unlikely(next_ind < 0)) {
596  mthca_err(dev, "SRQ %06x full\n", srq->srqn);
597  err = -ENOMEM;
598  *bad_wr = wr;
599  break;
600  }
601 
602  ((struct mthca_next_seg *) wqe)->ee_nds = 0;
603  /* flags field will always remain 0 */
604 
605  wqe += sizeof (struct mthca_next_seg);
606 
607  if (unlikely(wr->num_sge > srq->max_gs)) {
608  err = -EINVAL;
609  *bad_wr = wr;
610  break;
611  }
612 
613  for (i = 0; i < wr->num_sge; ++i) {
614  mthca_set_data_seg(wqe, wr->sg_list + i);
615  wqe += sizeof (struct mthca_data_seg);
616  }
617 
618  if (i < srq->max_gs)
619  mthca_set_data_seg_inval(wqe);
620 
621  srq->wrid[ind] = wr->wr_id;
622  srq->first_free = next_ind;
623  }
624 
625  if (likely(nreq)) {
626  srq->counter += nreq;
627 
628  /*
629  * Make sure that descriptors are written before
630  * we write doorbell record.
631  */
632  wmb();
633  *srq->db = cpu_to_be32(srq->counter);
634  }
635 
636  spin_unlock_irqrestore(&srq->lock, flags);
637  return err;
638 }
639 
640 int mthca_max_srq_sge(struct mthca_dev *dev)
641 {
642  if (mthca_is_memfree(dev))
643  return dev->limits.max_sg;
644 
645  /*
646  * SRQ allocations are based on powers of 2 for Tavor,
647  * (although they only need to be multiples of 16 bytes).
648  *
649  * Therefore, we need to base the max number of sg entries on
650  * the largest power of 2 descriptor size that is <= to the
651  * actual max WQE descriptor size, rather than return the
652  * max_sg value given by the firmware (which is based on WQE
653  * sizes as multiples of 16, not powers of 2).
654  *
655  * If SRQ implementation is changed for Tavor to be based on
656  * multiples of 16, the calculation below can be deleted and
657  * the FW max_sg value returned.
658  */
659  return min_t(int, dev->limits.max_sg,
660  ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
661  sizeof (struct mthca_next_seg)) /
662  sizeof (struct mthca_data_seg));
663 }
664 
666 {
667  int err;
668 
669  if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
670  return 0;
671 
672  spin_lock_init(&dev->srq_table.lock);
673 
674  err = mthca_alloc_init(&dev->srq_table.alloc,
675  dev->limits.num_srqs,
676  dev->limits.num_srqs - 1,
677  dev->limits.reserved_srqs);
678  if (err)
679  return err;
680 
681  err = mthca_array_init(&dev->srq_table.srq,
682  dev->limits.num_srqs);
683  if (err)
684  mthca_alloc_cleanup(&dev->srq_table.alloc);
685 
686  return err;
687 }
688 
690 {
691  if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
692  return;
693 
694  mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
695  mthca_alloc_cleanup(&dev->srq_table.alloc);
696 }