32 #include <linux/module.h>
41 #define DRV_VERSION "0.1"
57 static struct dentry *c4iw_debugfs_root;
66 static int count_idrs(
int id,
void *
p,
void *
data)
70 *countp = *countp + 1;
82 static int dump_qp(
int id,
void *
p,
void *
data)
89 if (
id != qp->
wq.sq.qid)
98 "qp sq id %u rq id %u state %u onchip %u "
99 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
100 qp->
wq.sq.qid, qp->
wq.rq.qid, (
int)qp->
attr.state,
102 qp->
ep->hwtid, (
int)qp->
ep->com.state,
103 &qp->
ep->com.local_addr.sin_addr.s_addr,
104 ntohs(qp->
ep->com.local_addr.sin_port),
105 &qp->
ep->com.remote_addr.sin_addr.s_addr,
106 ntohs(qp->
ep->com.remote_addr.sin_port));
109 "qp sq id %u rq id %u state %u onchip %u\n",
110 qp->
wq.sq.qid, qp->
wq.rq.qid,
130 static int qp_open(
struct inode *inode,
struct file *file)
144 spin_lock_irq(&qpd->
devp->lock);
146 spin_unlock_irq(&qpd->
devp->lock);
155 spin_lock_irq(&qpd->
devp->lock);
157 spin_unlock_irq(&qpd->
devp->lock);
171 .release = qp_release,
172 .read = debugfs_read,
176 static int dump_stag(
int id,
void *p,
void *data)
192 static int stag_release(
struct inode *inode,
struct file *file)
204 static int stag_open(
struct inode *inode,
struct file *file)
218 spin_lock_irq(&stagd->
devp->lock);
220 spin_unlock_irq(&stagd->
devp->lock);
222 stagd->
bufsize = count *
sizeof(
"0x12345678\n");
229 spin_lock_irq(&stagd->
devp->lock);
231 spin_unlock_irq(&stagd->
devp->lock);
233 stagd->
buf[stagd->
pos++] = 0;
245 .release = stag_release,
246 .read = debugfs_read,
250 static char *db_state_str[] = {
"NORMAL",
"FLOW_CONTROL",
"RECOVERY"};
252 static int stats_show(
struct seq_file *seq,
void *
v)
256 seq_printf(seq,
" Object: %10s %10s %10s %10s\n",
"Total",
"Current",
258 seq_printf(seq,
" PDID: %10llu %10llu %10llu %10llu\n",
259 dev->
rdev.stats.pd.total, dev->
rdev.stats.pd.cur,
260 dev->
rdev.stats.pd.max, dev->
rdev.stats.pd.fail);
261 seq_printf(seq,
" QID: %10llu %10llu %10llu %10llu\n",
262 dev->
rdev.stats.qid.total, dev->
rdev.stats.qid.cur,
263 dev->
rdev.stats.qid.max, dev->
rdev.stats.qid.fail);
264 seq_printf(seq,
" TPTMEM: %10llu %10llu %10llu %10llu\n",
265 dev->
rdev.stats.stag.total, dev->
rdev.stats.stag.cur,
266 dev->
rdev.stats.stag.max, dev->
rdev.stats.stag.fail);
267 seq_printf(seq,
" PBLMEM: %10llu %10llu %10llu %10llu\n",
268 dev->
rdev.stats.pbl.total, dev->
rdev.stats.pbl.cur,
269 dev->
rdev.stats.pbl.max, dev->
rdev.stats.pbl.fail);
270 seq_printf(seq,
" RQTMEM: %10llu %10llu %10llu %10llu\n",
271 dev->
rdev.stats.rqt.total, dev->
rdev.stats.rqt.cur,
272 dev->
rdev.stats.rqt.max, dev->
rdev.stats.rqt.fail);
273 seq_printf(seq,
" OCQPMEM: %10llu %10llu %10llu %10llu\n",
274 dev->
rdev.stats.ocqp.total, dev->
rdev.stats.ocqp.cur,
275 dev->
rdev.stats.ocqp.max, dev->
rdev.stats.ocqp.fail);
279 seq_printf(seq,
" DB State: %s Transitions %llu\n",
281 dev->
rdev.stats.db_state_transitions);
285 static int stats_open(
struct inode *inode,
struct file *file)
290 static ssize_t stats_clear(
struct file *file,
const char __user *
buf,
291 size_t count, loff_t *
pos)
296 dev->
rdev.stats.pd.max = 0;
297 dev->
rdev.stats.pd.fail = 0;
298 dev->
rdev.stats.qid.max = 0;
299 dev->
rdev.stats.qid.fail = 0;
300 dev->
rdev.stats.stag.max = 0;
301 dev->
rdev.stats.stag.fail = 0;
302 dev->
rdev.stats.pbl.max = 0;
303 dev->
rdev.stats.pbl.fail = 0;
304 dev->
rdev.stats.rqt.max = 0;
305 dev->
rdev.stats.rqt.fail = 0;
306 dev->
rdev.stats.ocqp.max = 0;
307 dev->
rdev.stats.ocqp.fail = 0;
308 dev->
rdev.stats.db_full = 0;
309 dev->
rdev.stats.db_empty = 0;
310 dev->
rdev.stats.db_drop = 0;
311 dev->
rdev.stats.db_state_transitions = 0;
322 .write = stats_clear,
325 static int setup_debugfs(
struct c4iw_dev *devp)
333 (
void *)devp, &qp_debugfs_fops);
338 (
void *)devp, &stag_debugfs_fops);
343 (
void *)devp, &stats_debugfs_fops);
359 list_del_init(&entry->
entry);
372 list_del_init(&entry->
entry);
381 INIT_LIST_HEAD(&uctx->
qpids);
382 INIT_LIST_HEAD(&uctx->
cqids);
401 PDBG(
"%s dev %s stag start 0x%0x size 0x%0x num stags %d "
402 "pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x "
403 "qp qid start %u size %u cq qid start %u size %u\n",
404 __func__, pci_name(rdev->
lldi.pdev), rdev->
lldi.vr->stag.start,
405 rdev->
lldi.vr->stag.size, c4iw_num_stags(rdev),
406 rdev->
lldi.vr->pbl.start,
407 rdev->
lldi.vr->pbl.size, rdev->
lldi.vr->rq.start,
408 rdev->
lldi.vr->rq.size,
409 rdev->
lldi.vr->qp.start,
410 rdev->
lldi.vr->qp.size,
411 rdev->
lldi.vr->cq.start,
412 rdev->
lldi.vr->cq.size);
413 PDBG(
"udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
414 "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
422 if (c4iw_num_stags(rdev) == 0) {
428 rdev->
stats.stag.total = rdev->
lldi.vr->stag.size;
429 rdev->
stats.pbl.total = rdev->
lldi.vr->pbl.size;
430 rdev->
stats.rqt.total = rdev->
lldi.vr->rq.size;
431 rdev->
stats.ocqp.total = rdev->
lldi.vr->ocq.size;
432 rdev->
stats.qid.total = rdev->
lldi.vr->qp.size;
465 static void c4iw_rdev_close(
struct c4iw_rdev *rdev)
474 c4iw_rdev_close(&ctx->
dev->rdev);
485 PDBG(
"%s c4iw_dev %p\n", __func__, ctx->
dev);
492 return infop->
vr->stag.size > 0 && infop->
vr->pbl.size > 0 &&
493 infop->
vr->rq.size > 0 && infop->
vr->qp.size > 0 &&
494 infop->
vr->cq.size > 0 && infop->
vr->ocq.size > 0;
502 if (!rdma_supported(infop)) {
504 pci_name(infop->
pdev));
512 devp->
rdev.lldi = *infop;
518 devp->
rdev.lldi.vr->ocq.size);
521 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
522 devp->
rdev.lldi.vr->ocq.start, devp->
rdev.lldi.vr->ocq.size,
523 devp->
rdev.oc_mw_pa, devp->
rdev.oc_mw_kva);
525 ret = c4iw_rdev_open(&devp->
rdev);
539 if (c4iw_debugfs_root) {
541 pci_name(devp->
rdev.lldi.pdev),
551 static int vers_printed;
565 PDBG(
"%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
566 __func__, pci_name(ctx->
lldi.pdev),
574 for (i = 0; i < ctx->
lldi.nrxq; i++)
575 PDBG(
"rxqid[%u] %u\n", i, ctx->
lldi.rxq_ids[i]);
591 unsigned int len = 64 -
sizeof(
struct rsp_ctrl) - 8;
597 skb_copy_to_linear_data(skb, &rsp[1], len);
628 PDBG(
"%s new_state %u\n", __func__, new_state);
635 ctx->
dev = c4iw_alloc(&ctx->
lldi);
636 if (IS_ERR(ctx->
dev)) {
638 "%s: initialization failed: %ld\n",
639 pci_name(ctx->
lldi.pdev),
647 "%s: RDMA registration failed: %d\n",
648 pci_name(ctx->
lldi.pdev), ret);
655 pci_name(ctx->
lldi.pdev));
661 pci_name(ctx->
lldi.pdev));
668 event.device = &ctx->
dev->ibdev;
675 pci_name(ctx->
lldi.pdev));
683 static int disable_qp_db(
int id,
void *p,
void *data)
687 t4_disable_wq_db(&qp->
wq);
691 static void stop_queues(
struct uld_ctx *ctx)
693 spin_lock_irq(&ctx->
dev->lock);
695 ctx->
dev->rdev.stats.db_state_transitions++;
699 spin_unlock_irq(&ctx->
dev->lock);
702 static int enable_qp_db(
int id,
void *p,
void *data)
706 t4_enable_wq_db(&qp->
wq);
710 static void resume_queues(
struct uld_ctx *ctx)
712 spin_lock_irq(&ctx->
dev->lock);
716 ctx->
dev->rdev.stats.db_state_transitions++;
719 spin_unlock_irq(&ctx->
dev->lock);
727 static int add_and_ref_qp(
int id,
void *p,
void *data)
733 qp_listp->
qps[qp_listp->
idx++] = qp;
737 static int count_qps(
int id,
void *p,
void *data)
739 unsigned *countp =
data;
748 for (idx = 0; idx < qp_list.
idx; idx++)
752 static void recover_lost_dbs(
struct uld_ctx *ctx,
struct qp_list *qp_list)
757 for (idx = 0; idx < qp_list->
idx; idx++) {
762 t4_sq_host_wq_pidx(&qp->
wq),
763 t4_sq_wq_size(&qp->
wq));
766 "DB overflow recovery failed - "
767 "error syncing SQ qid %u\n",
768 pci_name(ctx->
lldi.pdev), qp->
wq.sq.qid);
774 t4_rq_host_wq_pidx(&qp->
wq),
775 t4_rq_wq_size(&qp->
wq));
779 "DB overflow recovery failed - "
780 "error syncing RQ qid %u\n",
781 pci_name(ctx->
lldi.pdev), qp->
wq.rq.qid);
793 static void recover_queues(
struct uld_ctx *ctx)
796 struct qp_list qp_list;
803 spin_lock_irq(&ctx->
dev->lock);
805 ctx->
dev->rdev.stats.db_state_transitions++;
807 spin_unlock_irq(&ctx->
dev->lock);
822 printk(
KERN_ERR MOD
"%s: Fatal error - DB overflow recovery failed\n",
823 pci_name(ctx->
lldi.pdev));
828 spin_lock_irq(&ctx->
dev->lock);
833 printk(
KERN_ERR MOD
"%s: Fatal error - DB overflow recovery failed\n",
834 pci_name(ctx->
lldi.pdev));
835 spin_unlock_irq(&ctx->
dev->lock);
843 spin_unlock_irq(&ctx->
dev->lock);
846 recover_lost_dbs(ctx, &qp_list);
859 spin_lock_irq(&ctx->
dev->lock);
866 ctx->
dev->rdev.stats.db_state_transitions++;
867 spin_unlock_irq(&ctx->
dev->lock);
882 ctx->
dev->rdev.stats.db_full++;
888 ctx->
dev->rdev.stats.db_empty++;
894 ctx->
dev->rdev.stats.db_drop++;
899 pci_name(ctx->
lldi.pdev), control);
908 .rx_handler = c4iw_uld_rx_handler,
909 .state_change = c4iw_uld_state_change,
910 .control = c4iw_uld_control,
913 static int __init c4iw_init_module(
void)
922 if (!c4iw_debugfs_root)
924 "could not create debugfs entry, continuing\n");
931 static void __exit c4iw_exit_module(
void)