29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/module.h>
33 #include <linux/net.h>
34 #include <linux/ipv6.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
45 #include <linux/slab.h>
53 #define P9_RDMA_SQ_DEPTH 32
54 #define P9_RDMA_RQ_DEPTH 32
55 #define P9_RDMA_SEND_SGE 4
56 #define P9_RDMA_RECV_SGE 4
59 #define P9_RDMA_TIMEOUT 30000
60 #define P9_RDMA_MAXSIZE (4*4096)
184 "failed to allocate copy of option string\n");
187 options = tmp_options;
198 "integer field, but no integer?\n");
229 switch (event->
event) {
307 rdma->
state = P9_RDMA_FLUSHING;
315 ib_dma_unmap_single(rdma->
cm_id->device,
323 event->
event, context);
334 while ((ret = ib_poll_cq(cq, 1, &
wc)) > 0) {
340 handle_recv(client, rdma, c,
wc.status,
wc.byte_len);
344 handle_send(client, rdma, c,
wc.status,
wc.byte_len);
349 pr_err(
"unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
357 static void cq_event_handler(
struct ib_event *
e,
void *
v)
370 if (rdma->
qp && !IS_ERR(rdma->
qp))
373 if (rdma->
pd && !IS_ERR(rdma->
pd))
376 if (rdma->
cq && !IS_ERR(rdma->
cq))
392 c->
busa = ib_dma_map_single(rdma->
cm_id->device,
395 if (ib_dma_mapping_error(rdma->
cm_id->device, c->
busa))
404 wr.wr_id = (
unsigned long) c;
407 return ib_post_recv(rdma->
qp, &
wr, &bad_wr);
440 req->
rc->sdata = (
char *) req->
rc +
442 req->
rc->capacity = client->
msize;
445 rpl_context->
rc = req->
rc;
446 if (!rpl_context->
rc) {
459 err = post_recv(client, rpl_context);
476 c->
busa = ib_dma_map_single(rdma->
cm_id->device,
477 c->
req->tc->sdata, c->
req->tc->size,
479 if (ib_dma_mapping_error(rdma->
cm_id->device, c->
busa))
483 sge.length = c->
req->tc->size;
488 wr.wr_id = (
unsigned long) c;
497 return ib_post_send(rdma->
qp, &
wr, &bad_wr);
511 if (rdma->
state < P9_RDMA_CLOSING) {
512 rdma->
state = P9_RDMA_CLOSING;
513 spin_unlock_irqrestore(&rdma->
req_lock, flags);
516 spin_unlock_irqrestore(&rdma->
req_lock, flags);
520 static void rdma_close(
struct p9_client *client)
527 rdma = client->
trans;
533 rdma_destroy_trans(rdma);
552 init_completion(&rdma->
cm_done);
572 rdma_create_trans(
struct p9_client *client,
const char *
addr,
char *args)
582 err = parse_opts(args, &opts);
587 rdma = alloc_rdma(&opts);
594 if (IS_ERR(rdma->
cm_id))
598 client->
trans = rdma;
610 if (err || (rdma->
state != P9_RDMA_ADDR_RESOLVED))
618 if (err || (rdma->
state != P9_RDMA_ROUTE_RESOLVED))
628 cq_event_handler, client,
630 if (IS_ERR(rdma->
cq))
636 if (IS_ERR(rdma->
pd))
642 rdma->
lkey = rdma->
cm_id->device->local_dma_lkey;
651 memset(&qp_attr, 0,
sizeof qp_attr);
652 qp_attr.event_handler = qp_event_handler;
653 qp_attr.qp_context =
client;
654 qp_attr.cap.max_send_wr = opts.
sq_depth;
655 qp_attr.cap.max_recv_wr = opts.
rq_depth;
660 qp_attr.send_cq = rdma->
cq;
661 qp_attr.recv_cq = rdma->
cq;
668 memset(&conn_param, 0,
sizeof(conn_param));
669 conn_param.private_data =
NULL;
670 conn_param.private_data_len = 0;
677 if (err || (rdma->
state != P9_RDMA_CONNECTED))
685 rdma_destroy_trans(rdma);
694 .create = rdma_create_trans,
696 .request = rdma_request,
697 .cancel = rdma_cancel,
703 static int __init p9_trans_rdma_init(
void)
709 static void __exit p9_trans_rdma_exit(
void)