33 #define pr_fmt(fmt) PFX fmt
35 #include <linux/module.h>
37 #include <linux/slab.h>
39 #include <linux/string.h>
41 #include <linux/random.h>
46 #include <scsi/scsi.h>
54 #define DRV_NAME "ib_srp"
55 #define PFX DRV_NAME ": "
56 #define DRV_VERSION "0.2"
57 #define DRV_RELDATE "November 1, 2005"
64 static unsigned int srp_sg_tablesize;
65 static unsigned int cmd_sg_entries;
66 static unsigned int indirect_sg_entries;
67 static bool allow_ext_sg;
68 static int topspin_workarounds = 1;
75 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
83 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
87 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
91 static void srp_recv_completion(
struct ib_cq *cq,
void *target_ptr);
92 static void srp_send_completion(
struct ib_cq *cq,
void *target_ptr);
100 .remove = srp_remove_one
110 static const char *srp_target_info(
struct Scsi_Host *host)
112 return host_to_target(host)->target_name;
117 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
118 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
120 return topspin_workarounds &&
131 iu =
kmalloc(
sizeof *iu, gfp_mask);
135 iu->
buf = kzalloc(size, gfp_mask);
139 iu->
dma = ib_dma_map_single(host->
srp_dev->dev, iu->
buf, size,
141 if (ib_dma_mapping_error(host->
srp_dev->dev, iu->
dma))
211 srp_cm_handler, target);
212 if (IS_ERR(new_cm_id))
213 return PTR_ERR(new_cm_id);
217 target->
cm_id = new_cm_id;
227 init_attr = kzalloc(
sizeof *init_attr,
GFP_KERNEL);
234 ret = PTR_ERR(target->
recv_cq);
240 if (IS_ERR(target->send_cq)) {
241 ret = PTR_ERR(target->send_cq);
250 init_attr->
cap.max_recv_sge = 1;
251 init_attr->
cap.max_send_sge = 1;
254 init_attr->
send_cq = target->send_cq;
258 if (IS_ERR(target->
qp)) {
259 ret = PTR_ERR(target->
qp);
263 ret = srp_init_qp(target, target->
qp);
298 static void srp_path_rec_completion(
int status,
307 PFX "Got failed path rec status %d\n", status);
309 target->
path = *pathrec;
315 target->
path.numb_path = 1;
317 init_completion(&target->
done);
330 srp_path_rec_completion,
339 PFX "Path record query failed\n");
356 req->param.primary_path = &target->
path;
357 req->param.alternate_path =
NULL;
359 req->param.qp_num = target->
qp->qp_num;
360 req->param.qp_type = target->
qp->qp_type;
361 req->param.private_data = &
req->priv;
362 req->param.private_data_len =
sizeof req->priv;
363 req->param.flow_control = 1;
366 req->param.starting_psn &= 0xffffff;
372 req->param.responder_resources = 4;
373 req->param.remote_cm_response_timeout = 20;
374 req->param.local_cm_response_timeout = 20;
375 req->param.retry_count = 7;
376 req->param.rnr_retry_count = 7;
377 req->param.max_cm_retries = 15;
395 &target->
path.sgid.global.interface_id, 8);
404 &target->
path.sgid.global.interface_id, 8);
414 if (srp_target_is_topspin(target)) {
416 PFX "Topspin/Cisco initiator port ID workaround "
417 "activated for target GUID %016llx\n",
419 memset(
req->priv.initiator_port_id, 0, 8);
421 &target->
srp_host->srp_dev->dev->node_guid, 8);
435 init_completion(&target->
done);
438 PFX "Sending CM DREQ failed\n");
450 spin_lock_irq(&target->
lock);
451 if (target->
state == old) {
455 spin_unlock_irq(&target->
lock);
488 for (attr = shost->
hostt->shost_attrs; attr && *attr; ++attr)
500 spin_lock(&target->
srp_host->target_lock);
502 spin_unlock(&target->
srp_host->target_lock);
504 srp_del_scsi_host_attr(target->
scsi_host);
508 srp_free_target_ib(target);
509 srp_free_req_data(target);
518 ret = srp_lookup_path(target);
523 init_completion(&target->
done);
524 ret = srp_send_req(target);
540 ret = srp_lookup_path(target);
552 if (!retries-- || srp_new_cm_id(target)) {
554 "giving up on stale connection\n");
560 "retrying stale connection\n");
569 static void srp_unmap_data(
struct scsi_cmnd *scmnd,
576 if (!scsi_sglist(scmnd) ||
585 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
609 }
else if (req->
scmnd == scmnd) {
614 spin_unlock_irqrestore(&target->
lock, flags);
628 srp_unmap_data(scmnd, target, req);
633 spin_unlock_irqrestore(&target->
lock, flags);
641 srp_free_req(target, req, scmnd, 0);
656 srp_disconnect_target(target);
661 ret = srp_new_cm_id(target);
670 ret = srp_init_qp(target, target->
qp);
674 while (ib_poll_cq(target->
recv_cq, 1, &
wc) > 0)
676 while (ib_poll_cq(target->send_cq, 1, &
wc) > 0)
682 srp_reset_req(target, req);
685 INIT_LIST_HEAD(&target->
free_tx);
690 ret = srp_connect_target(target);
701 PFX "reconnect failed (%d), removing target port.\n", ret);
712 spin_lock_irq(&target->
lock);
718 spin_unlock_irq(&target->
lock);
724 unsigned int dma_len,
u32 rkey)
762 srp_map_desc(state, 0, state->
fmr_len, fmr->
fmr->rkey);
767 static void srp_map_update_start(
struct srp_map_state *state,
784 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
796 srp_map_desc(state, dma_addr, dma_len, target->
rkey);
807 ret = srp_map_finish_fmr(state, target);
811 srp_map_desc(state, dma_addr, dma_len, target->
rkey);
812 srp_map_update_start(state,
NULL, 0, 0);
822 srp_map_update_start(state, sg, sg_index, dma_addr);
826 ret = srp_map_finish_fmr(state, target);
830 srp_map_update_start(state, sg, sg_index, dma_addr);
849 ret = srp_map_finish_fmr(state, target);
851 srp_map_update_start(state,
NULL, 0, 0);
861 int i, len, nents,
count, use_fmr;
870 return sizeof (
struct srp_cmd);
875 PFX "Unhandled data direction %d\n",
880 nents = scsi_sg_count(scmnd);
881 scat = scsi_sglist(scmnd);
914 indirect_hdr = (
void *) cmd->
add_data;
919 memset(&state, 0,
sizeof(state));
927 if (srp_map_sg_entry(&state, target, sg, i, use_fmr)) {
932 unsigned int dma_len;
938 dma_addr = ib_sg_dma_address(ibdev, sg);
939 dma_len = ib_sg_dma_len(ibdev, sg);
943 srp_map_desc(&state, dma_addr, dma_len, target->
rkey);
957 if (state.
ndesc == 1) {
970 "Could not fit S/G list into SRP_CMD\n");
1012 unsigned long flags;
1018 spin_unlock_irqrestore(&target->
lock, flags);
1040 srp_send_completion(target->send_cq, target);
1042 if (list_empty(&target->
free_tx))
1061 struct srp_iu *iu,
int len)
1077 return ib_post_send(target->
qp, &
wr, &bad_wr);
1094 return ib_post_recv(target->
qp, &
wr, &bad_wr);
1101 unsigned long flags;
1106 spin_unlock_irqrestore(&target->
lock, flags);
1114 scmnd = srp_claim_req(target, req,
NULL);
1117 "Null scmnd for RSP w/tag %016llx\n",
1118 (
unsigned long long) rsp->
tag);
1122 spin_unlock_irqrestore(&target->
lock, flags);
1140 srp_free_req(target, req, scmnd,
1152 unsigned long flags;
1159 spin_unlock_irqrestore(&target->
lock, flags);
1163 "no IU available to send response\n");
1171 err = srp_post_send(target, iu, len);
1174 "unable to post response: %d\n", err);
1190 if (srp_response_common(target, delta, &rsp,
sizeof rsp))
1192 "problems processing SRP_CRED_REQ\n");
1207 if (srp_response_common(target, delta, &rsp,
sizeof rsp))
1209 "problems processing SRP_AER_REQ\n");
1222 opcode = *(
u8 *) iu->
buf;
1226 PFX "recv completion, opcode 0x%02x\n", opcode);
1233 srp_process_rsp(target, iu->
buf);
1237 srp_process_cred_req(target, iu->
buf);
1241 srp_process_aer_req(target, iu->
buf);
1247 PFX "Got target logout request\n");
1252 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1259 res = srp_post_recv(target, iu);
1262 PFX "Recv failed with error code %d\n", res);
1265 static void srp_recv_completion(
struct ib_cq *cq,
void *target_ptr)
1271 while (ib_poll_cq(cq, 1, &wc) > 0) {
1274 PFX "failed receive status %d\n",
1280 srp_handle_recv(target, &wc);
1284 static void srp_send_completion(
struct ib_cq *cq,
void *target_ptr)
1290 while (ib_poll_cq(cq, 1, &wc) > 0) {
1293 PFX "failed send status %d\n",
1311 unsigned long flags;
1331 spin_unlock_irqrestore(&target->
lock, flags);
1333 dev = target->
srp_host->srp_dev->dev;
1334 ib_dma_sync_single_for_cpu(dev, iu->
dma, target->
max_iu_len,
1341 memset(cmd, 0,
sizeof *cmd);
1351 len = srp_map_data(scmnd, target, req);
1354 PFX "Failed to map data\n");
1358 ib_dma_sync_single_for_device(dev, iu->
dma, target->
max_iu_len,
1361 if (srp_post_send(target, iu, len)) {
1369 srp_unmap_data(scmnd, target, req);
1378 spin_unlock_irqrestore(&target->
lock, flags);
1444 PFX "Unhandled RSP opcode %#x\n", lrsp->
opcode);
1450 ret = srp_alloc_iu_bufs(target);
1471 ret = srp_post_recv(target, iu);
1494 static void srp_cm_rej_handler(
struct ib_cm_id *cm_id,
1504 cpi =
event->param.rej_rcvd.ari;
1515 if (srp_target_is_topspin(target)) {
1525 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1526 (
unsigned long long)
be64_to_cpu(target->
path.dgid.global.subnet_prefix),
1527 (
unsigned long long)
be64_to_cpu(target->
path.dgid.global.interface_id));
1532 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1539 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1551 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1554 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1557 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1558 " opcode 0x%02x\n", opcode);
1579 switch (event->
event) {
1582 PFX "Sending CM REQ failed\n");
1589 srp_cm_rep_handler(cm_id, event->
private_data, target);
1596 srp_cm_rej_handler(cm_id, event, target);
1601 PFX "DREQ received - connection closed\n");
1604 PFX "Sending CM DREP failed\n");
1609 PFX "connection closed\n");
1622 PFX "Unhandled CM event %d\n", event->
event);
1645 spin_lock_irq(&target->
lock);
1647 spin_unlock_irq(&target->
lock);
1652 ib_dma_sync_single_for_cpu(dev, iu->
dma,
sizeof *tsk_mgmt,
1655 memset(tsk_mgmt, 0,
sizeof *tsk_mgmt);
1663 ib_dma_sync_single_for_device(dev, iu->
dma,
sizeof *tsk_mgmt,
1665 if (srp_post_send(target, iu,
sizeof *tsk_mgmt)) {
1677 static int srp_abort(
struct scsi_cmnd *scmnd)
1684 if (!req || target->
qp_in_error || !srp_claim_req(target, req, scmnd))
1686 srp_send_tsk_mgmt(target, req->
index, scmnd->
device->lun,
1688 srp_free_req(target, req, scmnd, 0);
1695 static int srp_reset_device(
struct scsi_cmnd *scmnd)
1713 srp_reset_req(target, req);
1719 static int srp_reset_host(
struct scsi_cmnd *scmnd)
1726 if (!srp_reconnect_target(target))
1737 return sprintf(buf,
"0x%016llx\n",
1746 return sprintf(buf,
"0x%016llx\n",
1755 return sprintf(buf,
"0x%016llx\n",
1772 return sprintf(buf,
"%pI6\n", target->
path.dgid.raw);
1847 &dev_attr_service_id,
1850 &dev_attr_orig_dgid,
1852 &dev_attr_zero_req_lim,
1853 &dev_attr_local_ib_port,
1854 &dev_attr_local_ib_device,
1855 &dev_attr_cmd_sg_entries,
1856 &dev_attr_allow_ext_sg,
1862 .name =
"InfiniBand SRP initiator",
1864 .info = srp_target_info,
1865 .queuecommand = srp_queuecommand,
1866 .eh_abort_handler = srp_abort,
1867 .eh_device_reset_handler = srp_reset_device,
1868 .eh_host_reset_handler = srp_reset_host,
1874 .shost_attrs = srp_host_attrs
1892 if (IS_ERR(rport)) {
1894 return PTR_ERR(rport);
1909 static void srp_release_dev(
struct device *dev)
1917 static struct class srp_class = {
1918 .name =
"infiniband_srp",
1919 .dev_release = srp_release_dev
1967 static int srp_parse_options(
const char *buf,
struct srp_target_port *target)
1983 while ((p =
strsep(&sep_opt,
",")) !=
NULL) {
2018 pr_warn(
"bad dest GID parameter '%s'\n", p);
2023 for (i = 0; i < 16; ++
i) {
2033 pr_warn(
"bad P_Key parameter '%s'\n", p);
2052 pr_warn(
"bad max sect parameter '%s'\n", p);
2060 pr_warn(
"bad max cmd_per_lun parameter '%s'\n",
2064 target->
scsi_host->cmd_per_lun =
min(token, SRP_CMD_SQ_SIZE);
2069 pr_warn(
"bad IO class parameter '%s'\n", p);
2074 pr_warn(
"unknown IO class parameter value %x specified (use %x or %x).\n",
2094 pr_warn(
"bad max cmd_sg_entries parameter '%s'\n",
2103 pr_warn(
"bad allow_ext_sg parameter '%s'\n", p);
2110 if (
match_int(args, &token) || token < 1 ||
2112 pr_warn(
"bad max sg_tablesize parameter '%s'\n",
2120 pr_warn(
"unknown parameter or missing value '%s' in target creation request\n",
2130 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
2131 !(srp_opt_tokens[
i].token & opt_mask))
2132 pr_warn(
"target creation request is missing parameter '%s'\n",
2142 const char *buf,
size_t count)
2157 target_host->
transportt = ib_srp_transport_template;
2163 target = host_to_target(target_host);
2171 target->
sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
2174 ret = srp_parse_options(buf, target);
2180 pr_warn(
"No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
2192 INIT_LIST_HEAD(&target->
free_tx);
2208 if (ib_dma_mapping_error(ibdev, dma_addr))
2219 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
2220 "service_id %016llx dgid %pI6\n",
2225 target->
path.dgid.raw);
2227 ret = srp_create_target_ib(target);
2231 ret = srp_new_cm_id(target);
2236 ret = srp_connect_target(target);
2239 PFX "Connection failed\n");
2243 ret = srp_add_target(host, target);
2245 goto err_disconnect;
2250 srp_disconnect_target(target);
2256 srp_free_target_ib(target);
2259 srp_free_req_data(target);
2303 host->
dev.class = &srp_class;
2304 host->
dev.parent = device->
dev->dma_device;
2327 static void srp_add_one(
struct ib_device *device)
2333 int max_pages_per_fmr, fmr_page_shift,
s,
e,
p;
2340 pr_warn(
"Query device failed for %s\n", device->
name);
2358 INIT_LIST_HEAD(&srp_dev->
dev_list);
2360 srp_dev->
dev = device;
2362 if (IS_ERR(srp_dev->
pd))
2369 if (IS_ERR(srp_dev->
mr))
2375 memset(&fmr_param, 0,
sizeof fmr_param);
2378 fmr_param.cache = 1;
2379 fmr_param.max_pages_per_fmr = max_pages_per_fmr;
2380 fmr_param.page_shift = fmr_page_shift;
2401 for (p = s; p <=
e; ++
p) {
2402 host = srp_add_port(srp_dev, p);
2421 static void srp_remove_one(
struct ib_device *device)
2444 spin_lock_irq(&target->
lock);
2446 spin_unlock_irq(&target->
lock);
2459 srp_del_scsi_host_attr(target->
scsi_host);
2462 srp_disconnect_target(target);
2464 srp_free_target_ib(target);
2465 srp_free_req_data(target);
2483 static int __init srp_init_module(
void)
2489 if (srp_sg_tablesize) {
2490 pr_warn(
"srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
2491 if (!cmd_sg_entries)
2492 cmd_sg_entries = srp_sg_tablesize;
2495 if (!cmd_sg_entries)
2498 if (cmd_sg_entries > 255) {
2499 pr_warn(
"Clamping cmd_sg_entries to 255\n");
2500 cmd_sg_entries = 255;
2503 if (!indirect_sg_entries)
2504 indirect_sg_entries = cmd_sg_entries;
2505 else if (indirect_sg_entries < cmd_sg_entries) {
2506 pr_warn(
"Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
2508 indirect_sg_entries = cmd_sg_entries;
2511 ib_srp_transport_template =
2513 if (!ib_srp_transport_template)
2518 pr_err(
"couldn't register class infiniband_srp\n");
2527 pr_err(
"couldn't register IB client\n");
2537 static void __exit srp_cleanup_module(
void)