Linux Kernel  3.7.1
 All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Groups Pages
device.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses. You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  * Redistribution and use in source and binary forms, with or
11  * without modification, are permitted provided that the following
12  * conditions are met:
13  *
14  * - Redistributions of source code must retain the above
15  * copyright notice, this list of conditions and the following
16  * disclaimer.
17  *
18  * - Redistributions in binary form must reproduce the above
19  * copyright notice, this list of conditions and the following
20  * disclaimer in the documentation and/or other materials
21  * provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/moduleparam.h>
34 #include <linux/debugfs.h>
35 #include <linux/vmalloc.h>
36 
37 #include <rdma/ib_verbs.h>
38 
39 #include "iw_cxgb4.h"
40 
41 #define DRV_VERSION "0.1"
42 
43 MODULE_AUTHOR("Steve Wise");
44 MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
45 MODULE_LICENSE("Dual BSD/GPL");
47 
48 struct uld_ctx {
49  struct list_head entry;
51  struct c4iw_dev *dev;
52 };
53 
54 static LIST_HEAD(uld_ctx_list);
55 static DEFINE_MUTEX(dev_mutex);
56 
57 static struct dentry *c4iw_debugfs_root;
58 
60  struct c4iw_dev *devp;
61  char *buf;
62  int bufsize;
63  int pos;
64 };
65 
66 static int count_idrs(int id, void *p, void *data)
67 {
68  int *countp = data;
69 
70  *countp = *countp + 1;
71  return 0;
72 }
73 
74 static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
75  loff_t *ppos)
76 {
77  struct c4iw_debugfs_data *d = file->private_data;
78 
79  return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
80 }
81 
82 static int dump_qp(int id, void *p, void *data)
83 {
84  struct c4iw_qp *qp = p;
85  struct c4iw_debugfs_data *qpd = data;
86  int space;
87  int cc;
88 
89  if (id != qp->wq.sq.qid)
90  return 0;
91 
92  space = qpd->bufsize - qpd->pos - 1;
93  if (space == 0)
94  return 1;
95 
96  if (qp->ep)
97  cc = snprintf(qpd->buf + qpd->pos, space,
98  "qp sq id %u rq id %u state %u onchip %u "
99  "ep tid %u state %u %pI4:%u->%pI4:%u\n",
100  qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state,
101  qp->wq.sq.flags & T4_SQ_ONCHIP,
102  qp->ep->hwtid, (int)qp->ep->com.state,
103  &qp->ep->com.local_addr.sin_addr.s_addr,
104  ntohs(qp->ep->com.local_addr.sin_port),
105  &qp->ep->com.remote_addr.sin_addr.s_addr,
106  ntohs(qp->ep->com.remote_addr.sin_port));
107  else
108  cc = snprintf(qpd->buf + qpd->pos, space,
109  "qp sq id %u rq id %u state %u onchip %u\n",
110  qp->wq.sq.qid, qp->wq.rq.qid,
111  (int)qp->attr.state,
112  qp->wq.sq.flags & T4_SQ_ONCHIP);
113  if (cc < space)
114  qpd->pos += cc;
115  return 0;
116 }
117 
118 static int qp_release(struct inode *inode, struct file *file)
119 {
120  struct c4iw_debugfs_data *qpd = file->private_data;
121  if (!qpd) {
122  printk(KERN_INFO "%s null qpd?\n", __func__);
123  return 0;
124  }
125  vfree(qpd->buf);
126  kfree(qpd);
127  return 0;
128 }
129 
130 static int qp_open(struct inode *inode, struct file *file)
131 {
132  struct c4iw_debugfs_data *qpd;
133  int ret = 0;
134  int count = 1;
135 
136  qpd = kmalloc(sizeof *qpd, GFP_KERNEL);
137  if (!qpd) {
138  ret = -ENOMEM;
139  goto out;
140  }
141  qpd->devp = inode->i_private;
142  qpd->pos = 0;
143 
144  spin_lock_irq(&qpd->devp->lock);
145  idr_for_each(&qpd->devp->qpidr, count_idrs, &count);
146  spin_unlock_irq(&qpd->devp->lock);
147 
148  qpd->bufsize = count * 128;
149  qpd->buf = vmalloc(qpd->bufsize);
150  if (!qpd->buf) {
151  ret = -ENOMEM;
152  goto err1;
153  }
154 
155  spin_lock_irq(&qpd->devp->lock);
156  idr_for_each(&qpd->devp->qpidr, dump_qp, qpd);
157  spin_unlock_irq(&qpd->devp->lock);
158 
159  qpd->buf[qpd->pos++] = 0;
160  file->private_data = qpd;
161  goto out;
162 err1:
163  kfree(qpd);
164 out:
165  return ret;
166 }
167 
168 static const struct file_operations qp_debugfs_fops = {
169  .owner = THIS_MODULE,
170  .open = qp_open,
171  .release = qp_release,
172  .read = debugfs_read,
173  .llseek = default_llseek,
174 };
175 
176 static int dump_stag(int id, void *p, void *data)
177 {
178  struct c4iw_debugfs_data *stagd = data;
179  int space;
180  int cc;
181 
182  space = stagd->bufsize - stagd->pos - 1;
183  if (space == 0)
184  return 1;
185 
186  cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
187  if (cc < space)
188  stagd->pos += cc;
189  return 0;
190 }
191 
192 static int stag_release(struct inode *inode, struct file *file)
193 {
194  struct c4iw_debugfs_data *stagd = file->private_data;
195  if (!stagd) {
196  printk(KERN_INFO "%s null stagd?\n", __func__);
197  return 0;
198  }
199  kfree(stagd->buf);
200  kfree(stagd);
201  return 0;
202 }
203 
204 static int stag_open(struct inode *inode, struct file *file)
205 {
206  struct c4iw_debugfs_data *stagd;
207  int ret = 0;
208  int count = 1;
209 
210  stagd = kmalloc(sizeof *stagd, GFP_KERNEL);
211  if (!stagd) {
212  ret = -ENOMEM;
213  goto out;
214  }
215  stagd->devp = inode->i_private;
216  stagd->pos = 0;
217 
218  spin_lock_irq(&stagd->devp->lock);
219  idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
220  spin_unlock_irq(&stagd->devp->lock);
221 
222  stagd->bufsize = count * sizeof("0x12345678\n");
223  stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
224  if (!stagd->buf) {
225  ret = -ENOMEM;
226  goto err1;
227  }
228 
229  spin_lock_irq(&stagd->devp->lock);
230  idr_for_each(&stagd->devp->mmidr, dump_stag, stagd);
231  spin_unlock_irq(&stagd->devp->lock);
232 
233  stagd->buf[stagd->pos++] = 0;
234  file->private_data = stagd;
235  goto out;
236 err1:
237  kfree(stagd);
238 out:
239  return ret;
240 }
241 
242 static const struct file_operations stag_debugfs_fops = {
243  .owner = THIS_MODULE,
244  .open = stag_open,
245  .release = stag_release,
246  .read = debugfs_read,
247  .llseek = default_llseek,
248 };
249 
250 static char *db_state_str[] = {"NORMAL", "FLOW_CONTROL", "RECOVERY"};
251 
252 static int stats_show(struct seq_file *seq, void *v)
253 {
254  struct c4iw_dev *dev = seq->private;
255 
256  seq_printf(seq, " Object: %10s %10s %10s %10s\n", "Total", "Current",
257  "Max", "Fail");
258  seq_printf(seq, " PDID: %10llu %10llu %10llu %10llu\n",
259  dev->rdev.stats.pd.total, dev->rdev.stats.pd.cur,
260  dev->rdev.stats.pd.max, dev->rdev.stats.pd.fail);
261  seq_printf(seq, " QID: %10llu %10llu %10llu %10llu\n",
262  dev->rdev.stats.qid.total, dev->rdev.stats.qid.cur,
263  dev->rdev.stats.qid.max, dev->rdev.stats.qid.fail);
264  seq_printf(seq, " TPTMEM: %10llu %10llu %10llu %10llu\n",
265  dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur,
266  dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail);
267  seq_printf(seq, " PBLMEM: %10llu %10llu %10llu %10llu\n",
268  dev->rdev.stats.pbl.total, dev->rdev.stats.pbl.cur,
269  dev->rdev.stats.pbl.max, dev->rdev.stats.pbl.fail);
270  seq_printf(seq, " RQTMEM: %10llu %10llu %10llu %10llu\n",
271  dev->rdev.stats.rqt.total, dev->rdev.stats.rqt.cur,
272  dev->rdev.stats.rqt.max, dev->rdev.stats.rqt.fail);
273  seq_printf(seq, " OCQPMEM: %10llu %10llu %10llu %10llu\n",
274  dev->rdev.stats.ocqp.total, dev->rdev.stats.ocqp.cur,
275  dev->rdev.stats.ocqp.max, dev->rdev.stats.ocqp.fail);
276  seq_printf(seq, " DB FULL: %10llu\n", dev->rdev.stats.db_full);
277  seq_printf(seq, " DB EMPTY: %10llu\n", dev->rdev.stats.db_empty);
278  seq_printf(seq, " DB DROP: %10llu\n", dev->rdev.stats.db_drop);
279  seq_printf(seq, " DB State: %s Transitions %llu\n",
280  db_state_str[dev->db_state],
281  dev->rdev.stats.db_state_transitions);
282  return 0;
283 }
284 
285 static int stats_open(struct inode *inode, struct file *file)
286 {
287  return single_open(file, stats_show, inode->i_private);
288 }
289 
290 static ssize_t stats_clear(struct file *file, const char __user *buf,
291  size_t count, loff_t *pos)
292 {
293  struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
294 
295  mutex_lock(&dev->rdev.stats.lock);
296  dev->rdev.stats.pd.max = 0;
297  dev->rdev.stats.pd.fail = 0;
298  dev->rdev.stats.qid.max = 0;
299  dev->rdev.stats.qid.fail = 0;
300  dev->rdev.stats.stag.max = 0;
301  dev->rdev.stats.stag.fail = 0;
302  dev->rdev.stats.pbl.max = 0;
303  dev->rdev.stats.pbl.fail = 0;
304  dev->rdev.stats.rqt.max = 0;
305  dev->rdev.stats.rqt.fail = 0;
306  dev->rdev.stats.ocqp.max = 0;
307  dev->rdev.stats.ocqp.fail = 0;
308  dev->rdev.stats.db_full = 0;
309  dev->rdev.stats.db_empty = 0;
310  dev->rdev.stats.db_drop = 0;
311  dev->rdev.stats.db_state_transitions = 0;
312  mutex_unlock(&dev->rdev.stats.lock);
313  return count;
314 }
315 
316 static const struct file_operations stats_debugfs_fops = {
317  .owner = THIS_MODULE,
318  .open = stats_open,
319  .release = single_release,
320  .read = seq_read,
321  .llseek = seq_lseek,
322  .write = stats_clear,
323 };
324 
325 static int setup_debugfs(struct c4iw_dev *devp)
326 {
327  struct dentry *de;
328 
329  if (!devp->debugfs_root)
330  return -1;
331 
332  de = debugfs_create_file("qps", S_IWUSR, devp->debugfs_root,
333  (void *)devp, &qp_debugfs_fops);
334  if (de && de->d_inode)
335  de->d_inode->i_size = 4096;
336 
337  de = debugfs_create_file("stags", S_IWUSR, devp->debugfs_root,
338  (void *)devp, &stag_debugfs_fops);
339  if (de && de->d_inode)
340  de->d_inode->i_size = 4096;
341 
342  de = debugfs_create_file("stats", S_IWUSR, devp->debugfs_root,
343  (void *)devp, &stats_debugfs_fops);
344  if (de && de->d_inode)
345  de->d_inode->i_size = 4096;
346 
347  return 0;
348 }
349 
351  struct c4iw_dev_ucontext *uctx)
352 {
353  struct list_head *pos, *nxt;
354  struct c4iw_qid_list *entry;
355 
356  mutex_lock(&uctx->lock);
357  list_for_each_safe(pos, nxt, &uctx->qpids) {
358  entry = list_entry(pos, struct c4iw_qid_list, entry);
359  list_del_init(&entry->entry);
360  if (!(entry->qid & rdev->qpmask)) {
361  c4iw_put_resource(&rdev->resource.qid_table,
362  entry->qid);
363  mutex_lock(&rdev->stats.lock);
364  rdev->stats.qid.cur -= rdev->qpmask + 1;
365  mutex_unlock(&rdev->stats.lock);
366  }
367  kfree(entry);
368  }
369 
370  list_for_each_safe(pos, nxt, &uctx->qpids) {
371  entry = list_entry(pos, struct c4iw_qid_list, entry);
372  list_del_init(&entry->entry);
373  kfree(entry);
374  }
375  mutex_unlock(&uctx->lock);
376 }
377 
379  struct c4iw_dev_ucontext *uctx)
380 {
381  INIT_LIST_HEAD(&uctx->qpids);
382  INIT_LIST_HEAD(&uctx->cqids);
383  mutex_init(&uctx->lock);
384 }
385 
386 /* Caller takes care of locking if needed */
387 static int c4iw_rdev_open(struct c4iw_rdev *rdev)
388 {
389  int err;
390 
391  c4iw_init_dev_ucontext(rdev, &rdev->uctx);
392 
393  /*
394  * qpshift is the number of bits to shift the qpid left in order
395  * to get the correct address of the doorbell for that qp.
396  */
397  rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density);
398  rdev->qpmask = rdev->lldi.udb_density - 1;
399  rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density);
400  rdev->cqmask = rdev->lldi.ucq_density - 1;
401  PDBG("%s dev %s stag start 0x%0x size 0x%0x num stags %d "
402  "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
403  "qp qid start %u size %u cq qid start %u size %u\n",
404  __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
405  rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
406  rdev->lldi.vr->pbl.start,
407  rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
408  rdev->lldi.vr->rq.size,
409  rdev->lldi.vr->qp.start,
410  rdev->lldi.vr->qp.size,
411  rdev->lldi.vr->cq.start,
412  rdev->lldi.vr->cq.size);
413  PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
414  "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
415  (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
416  (void *)pci_resource_start(rdev->lldi.pdev, 2),
417  rdev->lldi.db_reg,
418  rdev->lldi.gts_reg,
419  rdev->qpshift, rdev->qpmask,
420  rdev->cqshift, rdev->cqmask);
421 
422  if (c4iw_num_stags(rdev) == 0) {
423  err = -EINVAL;
424  goto err1;
425  }
426 
427  rdev->stats.pd.total = T4_MAX_NUM_PD;
428  rdev->stats.stag.total = rdev->lldi.vr->stag.size;
429  rdev->stats.pbl.total = rdev->lldi.vr->pbl.size;
430  rdev->stats.rqt.total = rdev->lldi.vr->rq.size;
431  rdev->stats.ocqp.total = rdev->lldi.vr->ocq.size;
432  rdev->stats.qid.total = rdev->lldi.vr->qp.size;
433 
434  err = c4iw_init_resource(rdev, c4iw_num_stags(rdev), T4_MAX_NUM_PD);
435  if (err) {
436  printk(KERN_ERR MOD "error %d initializing resources\n", err);
437  goto err1;
438  }
439  err = c4iw_pblpool_create(rdev);
440  if (err) {
441  printk(KERN_ERR MOD "error %d initializing pbl pool\n", err);
442  goto err2;
443  }
444  err = c4iw_rqtpool_create(rdev);
445  if (err) {
446  printk(KERN_ERR MOD "error %d initializing rqt pool\n", err);
447  goto err3;
448  }
449  err = c4iw_ocqp_pool_create(rdev);
450  if (err) {
451  printk(KERN_ERR MOD "error %d initializing ocqp pool\n", err);
452  goto err4;
453  }
454  return 0;
455 err4:
456  c4iw_rqtpool_destroy(rdev);
457 err3:
458  c4iw_pblpool_destroy(rdev);
459 err2:
461 err1:
462  return err;
463 }
464 
465 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
466 {
467  c4iw_pblpool_destroy(rdev);
468  c4iw_rqtpool_destroy(rdev);
470 }
471 
472 static void c4iw_dealloc(struct uld_ctx *ctx)
473 {
474  c4iw_rdev_close(&ctx->dev->rdev);
475  idr_destroy(&ctx->dev->cqidr);
476  idr_destroy(&ctx->dev->qpidr);
477  idr_destroy(&ctx->dev->mmidr);
478  iounmap(ctx->dev->rdev.oc_mw_kva);
479  ib_dealloc_device(&ctx->dev->ibdev);
480  ctx->dev = NULL;
481 }
482 
483 static void c4iw_remove(struct uld_ctx *ctx)
484 {
485  PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
487  c4iw_dealloc(ctx);
488 }
489 
490 static int rdma_supported(const struct cxgb4_lld_info *infop)
491 {
492  return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 &&
493  infop->vr->rq.size > 0 && infop->vr->qp.size > 0 &&
494  infop->vr->cq.size > 0 && infop->vr->ocq.size > 0;
495 }
496 
497 static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
498 {
499  struct c4iw_dev *devp;
500  int ret;
501 
502  if (!rdma_supported(infop)) {
503  printk(KERN_INFO MOD "%s: RDMA not supported on this device.\n",
504  pci_name(infop->pdev));
505  return ERR_PTR(-ENOSYS);
506  }
507  devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
508  if (!devp) {
509  printk(KERN_ERR MOD "Cannot allocate ib device\n");
510  return ERR_PTR(-ENOMEM);
511  }
512  devp->rdev.lldi = *infop;
513 
514  devp->rdev.oc_mw_pa = pci_resource_start(devp->rdev.lldi.pdev, 2) +
515  (pci_resource_len(devp->rdev.lldi.pdev, 2) -
516  roundup_pow_of_two(devp->rdev.lldi.vr->ocq.size));
517  devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
518  devp->rdev.lldi.vr->ocq.size);
519 
520  PDBG(KERN_INFO MOD "ocq memory: "
521  "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
522  devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
523  devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
524 
525  ret = c4iw_rdev_open(&devp->rdev);
526  if (ret) {
527  printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
528  ib_dealloc_device(&devp->ibdev);
529  return ERR_PTR(ret);
530  }
531 
532  idr_init(&devp->cqidr);
533  idr_init(&devp->qpidr);
534  idr_init(&devp->mmidr);
535  spin_lock_init(&devp->lock);
536  mutex_init(&devp->rdev.stats.lock);
537  mutex_init(&devp->db_mutex);
538 
539  if (c4iw_debugfs_root) {
541  pci_name(devp->rdev.lldi.pdev),
542  c4iw_debugfs_root);
543  setup_debugfs(devp);
544  }
545  return devp;
546 }
547 
548 static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
549 {
550  struct uld_ctx *ctx;
551  static int vers_printed;
552  int i;
553 
554  if (!vers_printed++)
555  printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
556  DRV_VERSION);
557 
558  ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
559  if (!ctx) {
560  ctx = ERR_PTR(-ENOMEM);
561  goto out;
562  }
563  ctx->lldi = *infop;
564 
565  PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
566  __func__, pci_name(ctx->lldi.pdev),
567  ctx->lldi.nchan, ctx->lldi.nrxq,
568  ctx->lldi.ntxq, ctx->lldi.nports);
569 
570  mutex_lock(&dev_mutex);
571  list_add_tail(&ctx->entry, &uld_ctx_list);
572  mutex_unlock(&dev_mutex);
573 
574  for (i = 0; i < ctx->lldi.nrxq; i++)
575  PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
576 out:
577  return ctx;
578 }
579 
580 static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
581  const struct pkt_gl *gl)
582 {
583  struct uld_ctx *ctx = handle;
584  struct c4iw_dev *dev = ctx->dev;
585  struct sk_buff *skb;
586  const struct cpl_act_establish *rpl;
587  unsigned int opcode;
588 
589  if (gl == NULL) {
590  /* omit RSS and rsp_ctrl at end of descriptor */
591  unsigned int len = 64 - sizeof(struct rsp_ctrl) - 8;
592 
593  skb = alloc_skb(256, GFP_ATOMIC);
594  if (!skb)
595  goto nomem;
596  __skb_put(skb, len);
597  skb_copy_to_linear_data(skb, &rsp[1], len);
598  } else if (gl == CXGB4_MSG_AN) {
599  const struct rsp_ctrl *rc = (void *)rsp;
600 
602  c4iw_ev_handler(dev, qid);
603  return 0;
604  } else {
605  skb = cxgb4_pktgl_to_skb(gl, 128, 128);
606  if (unlikely(!skb))
607  goto nomem;
608  }
609 
610  rpl = cplhdr(skb);
611  opcode = rpl->ot.opcode;
612 
613  if (c4iw_handlers[opcode])
615  else
616  printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__,
617  opcode);
618 
619  return 0;
620 nomem:
621  return -1;
622 }
623 
624 static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
625 {
626  struct uld_ctx *ctx = handle;
627 
628  PDBG("%s new_state %u\n", __func__, new_state);
629  switch (new_state) {
630  case CXGB4_STATE_UP:
631  printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
632  if (!ctx->dev) {
633  int ret;
634 
635  ctx->dev = c4iw_alloc(&ctx->lldi);
636  if (IS_ERR(ctx->dev)) {
637  printk(KERN_ERR MOD
638  "%s: initialization failed: %ld\n",
639  pci_name(ctx->lldi.pdev),
640  PTR_ERR(ctx->dev));
641  ctx->dev = NULL;
642  break;
643  }
644  ret = c4iw_register_device(ctx->dev);
645  if (ret) {
646  printk(KERN_ERR MOD
647  "%s: RDMA registration failed: %d\n",
648  pci_name(ctx->lldi.pdev), ret);
649  c4iw_dealloc(ctx);
650  }
651  }
652  break;
653  case CXGB4_STATE_DOWN:
654  printk(KERN_INFO MOD "%s: Down\n",
655  pci_name(ctx->lldi.pdev));
656  if (ctx->dev)
657  c4iw_remove(ctx);
658  break;
660  printk(KERN_INFO MOD "%s: Fatal Error\n",
661  pci_name(ctx->lldi.pdev));
662  if (ctx->dev) {
663  struct ib_event event;
664 
665  ctx->dev->rdev.flags |= T4_FATAL_ERROR;
666  memset(&event, 0, sizeof event);
667  event.event = IB_EVENT_DEVICE_FATAL;
668  event.device = &ctx->dev->ibdev;
670  c4iw_remove(ctx);
671  }
672  break;
673  case CXGB4_STATE_DETACH:
674  printk(KERN_INFO MOD "%s: Detach\n",
675  pci_name(ctx->lldi.pdev));
676  if (ctx->dev)
677  c4iw_remove(ctx);
678  break;
679  }
680  return 0;
681 }
682 
683 static int disable_qp_db(int id, void *p, void *data)
684 {
685  struct c4iw_qp *qp = p;
686 
687  t4_disable_wq_db(&qp->wq);
688  return 0;
689 }
690 
691 static void stop_queues(struct uld_ctx *ctx)
692 {
693  spin_lock_irq(&ctx->dev->lock);
694  if (ctx->dev->db_state == NORMAL) {
695  ctx->dev->rdev.stats.db_state_transitions++;
696  ctx->dev->db_state = FLOW_CONTROL;
697  idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
698  }
699  spin_unlock_irq(&ctx->dev->lock);
700 }
701 
702 static int enable_qp_db(int id, void *p, void *data)
703 {
704  struct c4iw_qp *qp = p;
705 
706  t4_enable_wq_db(&qp->wq);
707  return 0;
708 }
709 
710 static void resume_queues(struct uld_ctx *ctx)
711 {
712  spin_lock_irq(&ctx->dev->lock);
713  if (ctx->dev->qpcnt <= db_fc_threshold &&
714  ctx->dev->db_state == FLOW_CONTROL) {
715  ctx->dev->db_state = NORMAL;
716  ctx->dev->rdev.stats.db_state_transitions++;
717  idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
718  }
719  spin_unlock_irq(&ctx->dev->lock);
720 }
721 
722 struct qp_list {
723  unsigned idx;
724  struct c4iw_qp **qps;
725 };
726 
727 static int add_and_ref_qp(int id, void *p, void *data)
728 {
729  struct qp_list *qp_listp = data;
730  struct c4iw_qp *qp = p;
731 
732  c4iw_qp_add_ref(&qp->ibqp);
733  qp_listp->qps[qp_listp->idx++] = qp;
734  return 0;
735 }
736 
737 static int count_qps(int id, void *p, void *data)
738 {
739  unsigned *countp = data;
740  (*countp)++;
741  return 0;
742 }
743 
744 static void deref_qps(struct qp_list qp_list)
745 {
746  int idx;
747 
748  for (idx = 0; idx < qp_list.idx; idx++)
749  c4iw_qp_rem_ref(&qp_list.qps[idx]->ibqp);
750 }
751 
752 static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
753 {
754  int idx;
755  int ret;
756 
757  for (idx = 0; idx < qp_list->idx; idx++) {
758  struct c4iw_qp *qp = qp_list->qps[idx];
759 
760  ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
761  qp->wq.sq.qid,
762  t4_sq_host_wq_pidx(&qp->wq),
763  t4_sq_wq_size(&qp->wq));
764  if (ret) {
765  printk(KERN_ERR MOD "%s: Fatal error - "
766  "DB overflow recovery failed - "
767  "error syncing SQ qid %u\n",
768  pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
769  return;
770  }
771 
772  ret = cxgb4_sync_txq_pidx(qp->rhp->rdev.lldi.ports[0],
773  qp->wq.rq.qid,
774  t4_rq_host_wq_pidx(&qp->wq),
775  t4_rq_wq_size(&qp->wq));
776 
777  if (ret) {
778  printk(KERN_ERR MOD "%s: Fatal error - "
779  "DB overflow recovery failed - "
780  "error syncing RQ qid %u\n",
781  pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
782  return;
783  }
784 
785  /* Wait for the dbfifo to drain */
786  while (cxgb4_dbfifo_count(qp->rhp->rdev.lldi.ports[0], 1) > 0) {
789  }
790  }
791 }
792 
793 static void recover_queues(struct uld_ctx *ctx)
794 {
795  int count = 0;
796  struct qp_list qp_list;
797  int ret;
798 
799  /* lock out kernel db ringers */
800  mutex_lock(&ctx->dev->db_mutex);
801 
802  /* put all queues in to recovery mode */
803  spin_lock_irq(&ctx->dev->lock);
804  ctx->dev->db_state = RECOVERY;
805  ctx->dev->rdev.stats.db_state_transitions++;
806  idr_for_each(&ctx->dev->qpidr, disable_qp_db, NULL);
807  spin_unlock_irq(&ctx->dev->lock);
808 
809  /* slow everybody down */
812 
813  /* Wait for the dbfifo to completely drain. */
814  while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
817  }
818 
819  /* flush the SGE contexts */
820  ret = cxgb4_flush_eq_cache(ctx->dev->rdev.lldi.ports[0]);
821  if (ret) {
822  printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
823  pci_name(ctx->lldi.pdev));
824  goto out;
825  }
826 
827  /* Count active queues so we can build a list of queues to recover */
828  spin_lock_irq(&ctx->dev->lock);
829  idr_for_each(&ctx->dev->qpidr, count_qps, &count);
830 
831  qp_list.qps = kzalloc(count * sizeof *qp_list.qps, GFP_ATOMIC);
832  if (!qp_list.qps) {
833  printk(KERN_ERR MOD "%s: Fatal error - DB overflow recovery failed\n",
834  pci_name(ctx->lldi.pdev));
835  spin_unlock_irq(&ctx->dev->lock);
836  goto out;
837  }
838  qp_list.idx = 0;
839 
840  /* add and ref each qp so it doesn't get freed */
841  idr_for_each(&ctx->dev->qpidr, add_and_ref_qp, &qp_list);
842 
843  spin_unlock_irq(&ctx->dev->lock);
844 
845  /* now traverse the list in a safe context to recover the db state*/
846  recover_lost_dbs(ctx, &qp_list);
847 
848  /* we're almost done! deref the qps and clean up */
849  deref_qps(qp_list);
850  kfree(qp_list.qps);
851 
852  /* Wait for the dbfifo to completely drain again */
853  while (cxgb4_dbfifo_count(ctx->dev->rdev.lldi.ports[0], 1) > 0) {
856  }
857 
858  /* resume the queues */
859  spin_lock_irq(&ctx->dev->lock);
860  if (ctx->dev->qpcnt > db_fc_threshold)
861  ctx->dev->db_state = FLOW_CONTROL;
862  else {
863  ctx->dev->db_state = NORMAL;
864  idr_for_each(&ctx->dev->qpidr, enable_qp_db, NULL);
865  }
866  ctx->dev->rdev.stats.db_state_transitions++;
867  spin_unlock_irq(&ctx->dev->lock);
868 
869 out:
870  /* start up kernel db ringers again */
871  mutex_unlock(&ctx->dev->db_mutex);
872 }
873 
874 static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
875 {
876  struct uld_ctx *ctx = handle;
877 
878  switch (control) {
880  stop_queues(ctx);
881  mutex_lock(&ctx->dev->rdev.stats.lock);
882  ctx->dev->rdev.stats.db_full++;
883  mutex_unlock(&ctx->dev->rdev.stats.lock);
884  break;
886  resume_queues(ctx);
887  mutex_lock(&ctx->dev->rdev.stats.lock);
888  ctx->dev->rdev.stats.db_empty++;
889  mutex_unlock(&ctx->dev->rdev.stats.lock);
890  break;
892  recover_queues(ctx);
893  mutex_lock(&ctx->dev->rdev.stats.lock);
894  ctx->dev->rdev.stats.db_drop++;
895  mutex_unlock(&ctx->dev->rdev.stats.lock);
896  break;
897  default:
898  printk(KERN_WARNING MOD "%s: unknown control cmd %u\n",
899  pci_name(ctx->lldi.pdev), control);
900  break;
901  }
902  return 0;
903 }
904 
905 static struct cxgb4_uld_info c4iw_uld_info = {
906  .name = DRV_NAME,
907  .add = c4iw_uld_add,
908  .rx_handler = c4iw_uld_rx_handler,
909  .state_change = c4iw_uld_state_change,
910  .control = c4iw_uld_control,
911 };
912 
913 static int __init c4iw_init_module(void)
914 {
915  int err;
916 
917  err = c4iw_cm_init();
918  if (err)
919  return err;
920 
921  c4iw_debugfs_root = debugfs_create_dir(DRV_NAME, NULL);
922  if (!c4iw_debugfs_root)
923  printk(KERN_WARNING MOD
924  "could not create debugfs entry, continuing\n");
925 
926  cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
927 
928  return 0;
929 }
930 
931 static void __exit c4iw_exit_module(void)
932 {
933  struct uld_ctx *ctx, *tmp;
934 
935  mutex_lock(&dev_mutex);
936  list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
937  if (ctx->dev)
938  c4iw_remove(ctx);
939  kfree(ctx);
940  }
941  mutex_unlock(&dev_mutex);
943  c4iw_cm_term();
944  debugfs_remove_recursive(c4iw_debugfs_root);
945 }
946 
947 module_init(c4iw_init_module);
948 module_exit(c4iw_exit_module);