24 #include <linux/module.h>
31 #include <linux/slab.h>
38 #include <scsi/scsi.h>
69 "Default timeout in seconds for initialization and EH commands. "
87 MODULE_PARM_DESC(log_level,
"Set to 0 - 4 for increasing verbosity of device driver. "
143 static void ibmvfc_npiv_logout(
struct ibmvfc_host *);
145 static const char *unknown_error =
"unknown error";
147 #ifdef CONFIG_SCSI_IBMVFC_TRACE
163 entry->
fmt = evt->
crq.format;
166 switch (entry->
fmt) {
172 entry->
u.
start.xfer_len = vfc_cmd->
iu.xfer_len;
196 entry->
fmt = evt->
crq.format;
199 switch (entry->
fmt) {
207 entry->
u.
end.fcp_rsp_flags = vfc_cmd->
rsp.flags;
208 entry->
u.
end.rsp_code = vfc_cmd->
rsp.data.info.rsp_code;
209 entry->
u.
end.scsi_status = vfc_cmd->
rsp.scsi_status;
222 #define ibmvfc_trc_start(evt) do { } while (0)
223 #define ibmvfc_trc_end(evt) do { } while (0)
256 int rc = ibmvfc_get_err_index(status, error);
259 return unknown_error;
269 static int ibmvfc_get_err_result(
struct ibmvfc_cmd *vfc_cmd)
276 ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
280 err = ibmvfc_get_err_index(vfc_cmd->
status, vfc_cmd->
error);
294 static int ibmvfc_retry_cmd(
u16 status,
u16 error)
296 int rc = ibmvfc_get_err_index(status, error);
303 static const char *unknown_fc_explain =
"unknown fc explain";
305 static const struct {
309 { 0x00,
"no additional explanation" },
310 { 0x01,
"service parameter error - options" },
311 { 0x03,
"service parameter error - initiator control" },
312 { 0x05,
"service parameter error - recipient control" },
313 { 0x07,
"service parameter error - received data field size" },
314 { 0x09,
"service parameter error - concurrent seq" },
315 { 0x0B,
"service parameter error - credit" },
316 { 0x0D,
"invalid N_Port/F_Port_Name" },
317 { 0x0E,
"invalid node/Fabric Name" },
318 { 0x0F,
"invalid common service parameters" },
319 { 0x11,
"invalid association header" },
320 { 0x13,
"association header required" },
321 { 0x15,
"invalid originator S_ID" },
322 { 0x17,
"invalid OX_ID-RX-ID combination" },
323 { 0x19,
"command (request) already in progress" },
324 { 0x1E,
"N_Port Login requested" },
325 { 0x1F,
"Invalid N_Port_ID" },
328 static const struct {
332 { 0x00,
"no additional explanation" },
333 { 0x01,
"port identifier not registered" },
334 { 0x02,
"port name not registered" },
335 { 0x03,
"node name not registered" },
336 { 0x04,
"class of service not registered" },
337 { 0x06,
"initial process associator not registered" },
338 { 0x07,
"FC-4 TYPEs not registered" },
339 { 0x08,
"symbolic port name not registered" },
340 { 0x09,
"symbolic node name not registered" },
341 { 0x0A,
"port type not registered" },
342 { 0xF0,
"authorization exception" },
343 { 0xF1,
"authentication exception" },
344 { 0xF2,
"data base full" },
345 { 0xF3,
"data base empty" },
346 { 0xF4,
"processing request" },
347 { 0xF5,
"unable to verify connection" },
348 { 0xF6,
"devices not in a common zone" },
358 static const char *ibmvfc_get_ls_explain(
u16 status)
364 return ls_explain[
i].name;
366 return unknown_fc_explain;
376 static const char *ibmvfc_get_gs_explain(
u16 status)
382 return gs_explain[
i].name;
384 return unknown_fc_explain;
387 static const struct {
399 static const char *unknown_fc_type =
"unknown fc type";
408 static const char *ibmvfc_get_fc_type(
u16 status)
416 return unknown_fc_type;
450 static int ibmvfc_set_host_state(
struct ibmvfc_host *vhost,
455 switch (vhost->
state) {
473 static void ibmvfc_set_host_action(
struct ibmvfc_host *vhost,
534 static void ibmvfc_reinit_host(
struct ibmvfc_host *vhost)
553 static void ibmvfc_link_down(
struct ibmvfc_host *vhost,
562 ibmvfc_set_host_state(vhost, state);
583 "Host initialization retries exceeded. Taking adapter offline\n");
591 vhost->async_crq.cur = 0;
594 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
613 struct vio_dev *vdev = to_vio_dev(vhost->dev);
614 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
624 static int ibmvfc_send_crq_init(
struct ibmvfc_host *vhost)
627 return ibmvfc_send_crq(vhost, 0xC001000000000000LL, 0);
637 static int ibmvfc_send_crq_init_complete(
struct ibmvfc_host *vhost)
639 ibmvfc_dbg(vhost,
"Sending CRQ init complete\n");
640 return ibmvfc_send_crq(vhost, 0xC002000000000000LL, 0);
650 static void ibmvfc_release_crq_queue(
struct ibmvfc_host *vhost)
653 struct vio_dev *vdev = to_vio_dev(vhost->
dev);
662 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
663 }
while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
678 static int ibmvfc_reenable_crq_queue(
struct ibmvfc_host *vhost)
681 struct vio_dev *vdev = to_vio_dev(vhost->
dev);
687 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
688 }
while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
691 dev_err(vhost->
dev,
"Error enabling adapter (rc=%d)\n", rc);
703 static int ibmvfc_reset_crq(
struct ibmvfc_host *vhost)
707 struct vio_dev *vdev = to_vio_dev(vhost->
dev);
714 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
715 }
while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
727 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
732 dev_warn(vhost->
dev,
"Partner adapter not ready\n");
734 dev_warn(vhost->
dev,
"Couldn't register crq (rc=%d)\n", rc);
735 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
752 if (index < 0 || index >= pool->
size)
754 if (evt != pool->
events + index)
769 BUG_ON(!ibmvfc_valid_event(pool, evt));
781 static void ibmvfc_scsi_eh_done(
struct ibmvfc_event *evt)
793 ibmvfc_free_event(evt);
807 evt->
cmnd->result = (error_code << 16);
808 evt->
done = ibmvfc_scsi_eh_done;
826 static void ibmvfc_purge_requests(
struct ibmvfc_host *vhost,
int error_code)
832 ibmvfc_fail_request(evt, error_code);
850 static void __ibmvfc_reset_host(
struct ibmvfc_host *vhost)
856 vhost->
job_step = ibmvfc_npiv_logout;
859 ibmvfc_hard_reset_host(vhost);
866 static void ibmvfc_reset_host(
struct ibmvfc_host *vhost)
871 __ibmvfc_reset_host(vhost);
872 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
882 static int ibmvfc_retry_host_init(
struct ibmvfc_host *vhost)
890 "Host initialization retries exceeded. Taking adapter offline\n");
893 __ibmvfc_reset_host(vhost);
919 kref_get(&tgt->
kref);
939 tgt = __ibmvfc_get_target(starget);
940 spin_unlock_irqrestore(shost->
host_lock, flags);
978 ibmvfc_log(vhost, 3,
"Unknown port speed: %lld Gbit\n",
985 spin_unlock_irqrestore(shost->
host_lock, flags);
995 static void ibmvfc_get_host_port_state(
struct Scsi_Host *shost)
1001 switch (vhost->
state) {
1024 spin_unlock_irqrestore(shost->
host_lock, flags);
1035 static void ibmvfc_set_rport_dev_loss_tmo(
struct fc_rport *
rport,
u32 timeout)
1048 static void ibmvfc_release_tgt(
struct kref *
kref)
1061 static void ibmvfc_get_starget_node_name(
struct scsi_target *starget)
1066 kref_put(&tgt->
kref, ibmvfc_release_tgt);
1076 static void ibmvfc_get_starget_port_name(
struct scsi_target *starget)
1081 kref_put(&tgt->
kref, ibmvfc_release_tgt);
1091 static void ibmvfc_get_starget_port_id(
struct scsi_target *starget)
1096 kref_put(&tgt->
kref, ibmvfc_release_tgt);
1106 static int ibmvfc_wait_while_resetting(
struct ibmvfc_host *vhost)
1113 (init_timeout *
HZ));
1115 return timeout ? 0 : -
EIO;
1125 static int ibmvfc_issue_fc_host_lip(
struct Scsi_Host *shost)
1129 dev_err(vhost->
dev,
"Initiating host LIP. Resetting connection\n");
1130 ibmvfc_reset_host(vhost);
1131 return ibmvfc_wait_while_resetting(vhost);
1140 static void ibmvfc_gather_partition_info(
struct ibmvfc_host *vhost)
1144 const unsigned int *
num;
1156 of_node_put(rootdn);
1166 static void ibmvfc_set_login_info(
struct ibmvfc_host *vhost)
1172 memset(login_info, 0,
sizeof(*login_info));
1194 location = location ? location : dev_name(vhost->
dev);
1204 static int ibmvfc_init_event_pool(
struct ibmvfc_host *vhost)
1224 for (i = 0; i < pool->
size; ++
i) {
1227 evt->
crq.valid = 0x80;
1244 static void ibmvfc_free_event_pool(
struct ibmvfc_host *vhost)
1250 for (i = 0; i < pool->
size; ++
i) {
1253 if (pool->
events[i].ext_list)
1255 pool->
events[i].ext_list,
1256 pool->
events[i].ext_list_token);
1290 static void ibmvfc_init_event(
struct ibmvfc_event *evt,
1306 static void ibmvfc_map_sg_list(
struct scsi_cmnd *scmd,
int nseg,
1329 static int ibmvfc_map_sg_data(
struct scsi_cmnd *scmd,
1342 }
else if (
unlikely(sg_mapped < 0)) {
1356 if (sg_mapped == 1) {
1357 ibmvfc_map_sg_list(scmd, sg_mapped, data);
1375 ibmvfc_map_sg_list(scmd, sg_mapped, evt->
ext_list);
1392 dev_err(vhost->
dev,
"Command timed out (%p). Resetting connection\n", evt);
1393 ibmvfc_reset_host(vhost);
1425 evt->
timer.function = (
void (*)(
unsigned long))ibmvfc_timeout;
1431 if ((rc = ibmvfc_send_crq(vhost, crq_as_u64[0], crq_as_u64[1]))) {
1440 if (rc == H_CLOSED) {
1441 if (printk_ratelimit())
1442 dev_warn(vhost->
dev,
"Send warning. Receive queue closed, will retry.\n");
1445 ibmvfc_free_event(evt);
1449 dev_err(vhost->
dev,
"Send error (rc=%d)\n", rc);
1452 evt->
done = ibmvfc_scsi_eh_done;
1474 const char *err = unknown_error;
1475 int index = ibmvfc_get_err_index(vfc_cmd->
status, vfc_cmd->
error);
1488 rsp_code = rsp->
data.
info.rsp_code;
1491 "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
1501 static void ibmvfc_relogin(
struct scsi_device *sdev)
1508 if (rport == tgt->
rport) {
1509 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
1514 ibmvfc_reinit_host(vhost);
1537 scsi_set_resid(cmnd, 0);
1540 cmnd->
result = ibmvfc_get_err_result(vfc_cmd);
1549 ibmvfc_relogin(cmnd->
device);
1554 ibmvfc_log_error(evt);
1558 (scsi_bufflen(cmnd) - scsi_get_resid(cmnd) < cmnd->
underflow))
1568 ibmvfc_free_event(evt);
1578 static inline int ibmvfc_host_chkready(
struct ibmvfc_host *vhost)
1582 switch (vhost->
state) {
1609 static int ibmvfc_queuecommand_lck(
struct scsi_cmnd *cmnd,
1619 if (
unlikely((rc = fc_remote_port_chkready(rport))) ||
1620 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1627 evt = ibmvfc_get_event(vhost);
1631 vfc_cmd = &evt->
iu.
cmd;
1632 memset(vfc_cmd, 0,
sizeof(*vfc_cmd));
1634 vfc_cmd->
resp.len =
sizeof(vfc_cmd->
rsp);
1640 vfc_cmd->
iu.xfer_len = scsi_bufflen(cmnd);
1644 if (scsi_populate_tag_msg(cmnd, tag)) {
1659 if (
likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->
dev))))
1660 return ibmvfc_send_event(evt, vhost, 0);
1662 ibmvfc_free_event(evt);
1668 "Failed to map DMA buffer for command. rc=%d\n", rc);
1686 *evt->sync_iu = *evt->xfer_iu;
1696 static void ibmvfc_bsg_timeout_done(
struct ibmvfc_event *evt)
1700 ibmvfc_free_event(evt);
1702 dev_info(vhost->
dev,
"Passthru command cancelled\n");
1712 static int ibmvfc_bsg_timeout(
struct fc_bsg_job *job)
1718 unsigned long flags;
1724 __ibmvfc_reset_host(vhost);
1725 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
1730 evt = ibmvfc_get_event(vhost);
1735 tmf->common.version = 1;
1737 tmf->common.length =
sizeof(*tmf);
1741 rc = ibmvfc_send_event(evt, vhost, default_timeout);
1745 dev_err(vhost->
dev,
"Failed to send cancel event. rc=%d\n", rc);
1748 dev_info(vhost->
dev,
"Cancelling passthru command to port id 0x%lx\n",
1751 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
1765 static int ibmvfc_bsg_plogi(
struct ibmvfc_host *vhost,
unsigned int port_id)
1771 unsigned long flags;
1772 int rc = 0, issue_login = 1;
1777 if (tgt->
scsi_id == port_id) {
1785 if (
unlikely((rc = ibmvfc_host_chkready(vhost))))
1788 evt = ibmvfc_get_event(vhost);
1791 memset(plogi, 0,
sizeof(*plogi));
1792 plogi->
common.version = 1;
1794 plogi->
common.length =
sizeof(*plogi);
1797 init_completion(&evt->
comp);
1799 rc = ibmvfc_send_event(evt, vhost, default_timeout);
1800 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
1807 if (rsp_iu.plogi.common.status)
1811 ibmvfc_free_event(evt);
1813 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
1825 static int ibmvfc_bsg_request(
struct fc_bsg_job *job)
1832 unsigned long flags, port_id = -1;
1834 int rc = 0, req_seg, rsp_seg, issue_login = 0;
1838 job->
reply->reply_payload_rcv_len = 0;
1844 port_id = (job->
request->rqst_data.h_els.port_id[0] << 16) |
1845 (job->
request->rqst_data.h_els.port_id[1] << 8) |
1846 job->
request->rqst_data.h_els.port_id[2];
1852 port_id = (job->
request->rqst_data.h_ct.port_id[0] << 16) |
1853 (job->
request->rqst_data.h_ct.port_id[1] << 8) |
1854 job->
request->rqst_data.h_ct.port_id[2];
1867 job->
dd_data = (
void *)port_id;
1886 if (req_seg > 1 || rsp_seg > 1) {
1892 rc = ibmvfc_bsg_plogi(vhost, port_id);
1896 if (
unlikely(rc || (rport && (rc = fc_remote_port_chkready(rport)))) ||
1897 unlikely((rc = ibmvfc_host_chkready(vhost)))) {
1898 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
1902 evt = ibmvfc_get_event(vhost);
1906 memset(mad, 0,
sizeof(*mad));
1909 mad->
common.length =
sizeof(*mad) -
sizeof(mad->
fc_iu) -
sizeof(mad->
iu);
1917 mad->
iu.flags = fc_flags;
1925 mad->
iu.tag = (
u64)evt;
1926 rsp_len = mad->
iu.rsp.len;
1929 init_completion(&evt->
comp);
1930 rc = ibmvfc_send_event(evt, vhost, 0);
1931 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
1940 if (rsp_iu.passthru.common.status)
1946 ibmvfc_free_event(evt);
1947 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
1978 int rsp_rc = -
EBUSY;
1979 unsigned long flags;
1984 evt = ibmvfc_get_event(vhost);
1988 memset(tmf, 0,
sizeof(*tmf));
1990 tmf->
resp.len =
sizeof(tmf->
rsp);
1998 tmf->
iu.tmf_flags =
type;
2001 init_completion(&evt->
comp);
2002 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2004 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2015 if (rsp_iu.cmd.status)
2016 rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2020 rsp_code = fc_rsp->
data.
info.rsp_code;
2023 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2024 desc, ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
2025 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->
flags, rsp_code,
2032 ibmvfc_free_event(evt);
2033 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2045 static int ibmvfc_match_rport(
struct ibmvfc_event *evt,
void *rport)
2051 if (cmd_rport == rport)
2080 static int ibmvfc_match_lun(
struct ibmvfc_event *evt,
void *device)
2082 if (evt->
cmnd && evt->
cmnd->device == device)
2096 static int ibmvfc_wait_for_ops(
struct ibmvfc_host *vhost,
void *device,
2102 unsigned long flags;
2110 if (
match(evt, device)) {
2115 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2124 if (
match(evt, device)) {
2129 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2131 dev_err(vhost->
dev,
"Timed out waiting for aborted commands\n");
2153 static int ibmvfc_cancel_all(
struct scsi_device *sdev,
int type)
2161 int rsp_rc = -
EBUSY;
2162 unsigned long flags;
2169 if (evt->
cmnd && evt->
cmnd->device == sdev) {
2178 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2183 evt = ibmvfc_get_event(vhost);
2187 memset(tmf, 0,
sizeof(*tmf));
2190 tmf->
common.length =
sizeof(*tmf);
2198 init_completion(&evt->
comp);
2199 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2202 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2212 status = rsp.mad_common.status;
2214 ibmvfc_free_event(evt);
2215 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2239 evt->
iu.
cmd.cancel_key == cancel_key)
2269 static int ibmvfc_abort_task_set(
struct scsi_device *sdev)
2284 if (evt->
cmnd && evt->
cmnd->device == sdev) {
2293 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2298 evt = ibmvfc_get_event(vhost);
2302 memset(tmf, 0,
sizeof(*tmf));
2304 tmf->
resp.len =
sizeof(tmf->
rsp);
2315 init_completion(&evt->
comp);
2316 rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
2319 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2332 rc = ibmvfc_wait_for_ops(vhost, sdev->
hostdata, ibmvfc_match_key);
2339 ibmvfc_reset_host(vhost);
2341 rc = ibmvfc_wait_for_ops(vhost, sdev->
hostdata, ibmvfc_match_key);
2346 rc = ibmvfc_wait_for_ops(vhost, evt, ibmvfc_match_evt);
2349 ibmvfc_hard_reset_host(vhost);
2350 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2358 if (rsp_iu.cmd.status)
2359 rsp_code = ibmvfc_get_err_result(&rsp_iu.cmd);
2363 rsp_code = fc_rsp->
data.
info.rsp_code;
2366 "flags: %x fcp_rsp: %x, scsi_status: %x\n",
2367 ibmvfc_get_cmd_error(rsp_iu.cmd.status, rsp_iu.cmd.error),
2368 rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->
flags, rsp_code,
2376 ibmvfc_free_event(evt);
2377 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
2388 static int ibmvfc_eh_abort_handler(
struct scsi_cmnd *
cmd)
2392 int cancel_rc, abort_rc;
2397 ibmvfc_wait_while_resetting(vhost);
2399 abort_rc = ibmvfc_abort_task_set(sdev);
2401 if (!cancel_rc && !abort_rc)
2402 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2415 static int ibmvfc_eh_device_reset_handler(
struct scsi_cmnd *cmd)
2419 int cancel_rc, reset_rc;
2424 ibmvfc_wait_while_resetting(vhost);
2428 if (!cancel_rc && !reset_rc)
2429 rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun);
2441 static void ibmvfc_dev_cancel_all_reset(
struct scsi_device *sdev,
void *data)
2443 unsigned long *rc =
data;
2454 static int ibmvfc_eh_target_reset_handler(
struct scsi_cmnd *cmd)
2461 unsigned long cancel_rc = 0;
2465 ibmvfc_wait_while_resetting(vhost);
2469 if (!cancel_rc && !reset_rc)
2470 rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target);
2481 static int ibmvfc_eh_host_reset_handler(
struct scsi_cmnd *cmd)
2487 dev_err(vhost->
dev,
"Resetting connection due to error recovery\n");
2488 rc = ibmvfc_issue_fc_host_lip(vhost->
host);
2499 static void ibmvfc_terminate_rport_io(
struct fc_rport *rport)
2510 if (dev_rport != rport)
2513 ibmvfc_abort_task_set(sdev);
2516 rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport);
2519 ibmvfc_issue_fc_host_lip(shost);
2553 if (ae_desc[i].ae == ae)
2559 static const struct {
2579 if (link_desc[i].state == state)
2580 return link_desc[
i].desc;
2601 switch (crq->
event) {
2615 __ibmvfc_reset_host(vhost);
2623 __ibmvfc_reset_host(vhost);
2630 __ibmvfc_reset_host(vhost);
2636 ibmvfc_reinit_host(vhost);
2653 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
2654 ibmvfc_reinit_host(vhost);
2669 dev_err(vhost->
dev,
"Unknown async event received: %lld\n", crq->
event);
2685 switch (crq->
valid) {
2691 rc = ibmvfc_send_crq_init_complete(vhost);
2693 ibmvfc_init_host(vhost);
2695 dev_err(vhost->
dev,
"Unable to send init rsp. rc=%ld\n", rc);
2698 dev_info(vhost->
dev,
"Partner initialization complete\n");
2699 ibmvfc_init_host(vhost);
2718 ibmvfc_purge_requests(vhost,
DID_ERROR);
2726 dev_err(vhost->
dev,
"Got an invalid message type 0x%02x\n", crq->
valid);
2737 if (
unlikely(!ibmvfc_valid_event(&vhost->
pool, evt))) {
2738 dev_err(vhost->
dev,
"Returned correlation_token 0x%08llx is invalid!\n",
2744 dev_err(vhost->
dev,
"Received duplicate correlation_token 0x%08llx!\n",
2763 static int ibmvfc_scan_finished(
struct Scsi_Host *shost,
unsigned long time)
2765 unsigned long flags;
2770 if (time >= (init_timeout * HZ)) {
2771 dev_info(vhost->
dev,
"Scan taking longer than %d seconds, "
2772 "continuing initialization\n", init_timeout);
2778 spin_unlock_irqrestore(shost->
host_lock, flags);
2792 static int ibmvfc_slave_alloc(
struct scsi_device *sdev)
2797 unsigned long flags = 0;
2799 if (!rport || fc_remote_port_chkready(rport))
2804 spin_unlock_irqrestore(shost->
host_lock, flags);
2818 static int ibmvfc_target_alloc(
struct scsi_target *starget)
2820 struct Scsi_Host *shost = dev_to_shost(starget->
dev.parent);
2822 unsigned long flags = 0;
2826 spin_unlock_irqrestore(shost->
host_lock, flags);
2840 static int ibmvfc_slave_configure(
struct scsi_device *sdev)
2843 unsigned long flags = 0;
2854 spin_unlock_irqrestore(shost->
host_lock, flags);
2867 static int ibmvfc_change_queue_depth(
struct scsi_device *sdev,
int qdepth,
2891 scsi_set_tag_type(sdev, tag_type);
2903 static ssize_t ibmvfc_show_host_partition_name(
struct device *dev,
2913 static ssize_t ibmvfc_show_host_device_name(
struct device *dev,
2923 static ssize_t ibmvfc_show_host_loc_code(
struct device *dev,
2933 static ssize_t ibmvfc_show_host_drc_name(
struct device *dev,
2943 static ssize_t ibmvfc_show_host_npiv_version(
struct device *dev,
2951 static ssize_t ibmvfc_show_host_capabilities(
struct device *dev,
2967 static ssize_t ibmvfc_show_log_level(
struct device *dev,
2972 unsigned long flags = 0;
2977 spin_unlock_irqrestore(shost->
host_lock, flags);
2989 static ssize_t ibmvfc_store_log_level(
struct device *dev,
2995 unsigned long flags = 0;
2999 spin_unlock_irqrestore(shost->
host_lock, flags);
3010 ibmvfc_show_log_level, ibmvfc_store_log_level);
3012 #ifdef CONFIG_SCSI_IBMVFC_TRACE
3027 char *
buf, loff_t off,
size_t count)
3029 struct device *dev =
container_of(kobj,
struct device, kobj);
3032 unsigned long flags = 0;
3038 if (off + count > size) {
3044 memcpy(buf, &src[off], count);
3045 spin_unlock_irqrestore(shost->
host_lock, flags);
3055 .read = ibmvfc_read_trace,
3060 &dev_attr_partition_name,
3061 &dev_attr_device_name,
3062 &dev_attr_port_loc_code,
3064 &dev_attr_npiv_version,
3065 &dev_attr_capabilities,
3066 &dev_attr_log_level,
3072 .name =
"IBM POWER Virtual FC Adapter",
3074 .queuecommand = ibmvfc_queuecommand,
3075 .eh_abort_handler = ibmvfc_eh_abort_handler,
3076 .eh_device_reset_handler = ibmvfc_eh_device_reset_handler,
3077 .eh_target_reset_handler = ibmvfc_eh_target_reset_handler,
3078 .eh_host_reset_handler = ibmvfc_eh_host_reset_handler,
3079 .slave_alloc = ibmvfc_slave_alloc,
3080 .slave_configure = ibmvfc_slave_configure,
3081 .target_alloc = ibmvfc_target_alloc,
3082 .scan_finished = ibmvfc_scan_finished,
3083 .change_queue_depth = ibmvfc_change_queue_depth,
3084 .change_queue_type = ibmvfc_change_queue_type,
3091 .shost_attrs = ibmvfc_attrs,
3106 crq = &async_crq->
msgs[async_crq->
cur];
3107 if (crq->
valid & 0x80) {
3108 if (++async_crq->
cur == async_crq->
size)
3129 crq = &queue->
msgs[queue->
cur];
3130 if (crq->
valid & 0x80) {
3131 if (++queue->
cur == queue->
size)
3148 static irqreturn_t ibmvfc_interrupt(
int irq,
void *dev_instance)
3151 unsigned long flags;
3154 vio_disable_interrupts(to_vio_dev(vhost->
dev));
3155 tasklet_schedule(&vhost->
tasklet);
3156 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
3167 static void ibmvfc_tasklet(
void *data)
3170 struct vio_dev *vdev = to_vio_dev(vhost->
dev);
3173 unsigned long flags;
3179 while ((async = ibmvfc_next_async_crq(vhost)) !=
NULL) {
3180 ibmvfc_handle_async(async, vhost);
3186 while ((crq = ibmvfc_next_crq(vhost)) !=
NULL) {
3187 ibmvfc_handle_crq(crq, vhost);
3192 vio_enable_interrupts(vdev);
3193 if ((async = ibmvfc_next_async_crq(vhost)) !=
NULL) {
3194 vio_disable_interrupts(vdev);
3195 ibmvfc_handle_async(async, vhost);
3198 }
else if ((crq = ibmvfc_next_crq(vhost)) !=
NULL) {
3199 vio_disable_interrupts(vdev);
3200 ibmvfc_handle_crq(crq, vhost);
3207 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
3236 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3240 ibmvfc_init_tgt(tgt, job_step);
3245 static const struct {
3266 static int ibmvfc_get_prli_rsp(
u16 flags)
3269 int code = (flags & 0x0f00) >> 8;
3272 if (prli_rsp[i].code == code)
3283 static void ibmvfc_tgt_prli_done(
struct ibmvfc_event *evt)
3296 tgt_dbg(tgt,
"Process Login succeeded: %X %02X %04X\n",
3300 index = ibmvfc_get_prli_rsp(parms->
flags);
3311 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3312 }
else if (prli_rsp[index].retry)
3313 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3315 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3317 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3322 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3327 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3329 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3330 else if (ibmvfc_retry_cmd(rsp->
status, rsp->
error))
3331 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
3333 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3335 tgt_log(tgt, level,
"Process Login failed: %s (%x:%x) rc=0x%02X\n",
3341 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3342 ibmvfc_free_event(evt);
3360 kref_get(&tgt->
kref);
3361 evt = ibmvfc_get_event(vhost);
3366 memset(prli, 0,
sizeof(*prli));
3367 prli->
common.version = 1;
3369 prli->
common.length =
sizeof(*prli);
3377 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3380 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3382 tgt_dbg(tgt,
"Sent process login\n");
3390 static void ibmvfc_tgt_plogi_done(
struct ibmvfc_event *evt)
3402 tgt_dbg(tgt,
"Port Login succeeded\n");
3403 if (tgt->
ids.port_name &&
3406 tgt_dbg(tgt,
"Port re-init required\n");
3416 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
3421 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3426 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
3428 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3430 tgt_log(tgt, level,
"Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3437 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3438 ibmvfc_free_event(evt);
3447 static void ibmvfc_tgt_send_plogi(
struct ibmvfc_target *tgt)
3456 kref_get(&tgt->
kref);
3458 evt = ibmvfc_get_event(vhost);
3464 memset(plogi, 0,
sizeof(*plogi));
3465 plogi->
common.version = 1;
3467 plogi->
common.length =
sizeof(*plogi);
3470 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3473 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3475 tgt_dbg(tgt,
"Sent port login\n");
3483 static void ibmvfc_tgt_implicit_logout_done(
struct ibmvfc_event *evt)
3491 ibmvfc_free_event(evt);
3496 tgt_dbg(tgt,
"Implicit Logout succeeded\n");
3499 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3504 tgt_err(tgt,
"Implicit Logout failed: rc=0x%02X\n", status);
3509 ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_plogi);
3512 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3513 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3522 static void ibmvfc_tgt_implicit_logout(
struct ibmvfc_target *tgt)
3531 kref_get(&tgt->
kref);
3532 evt = ibmvfc_get_event(vhost);
3537 memset(mad, 0,
sizeof(*mad));
3540 mad->
common.length =
sizeof(*mad);
3544 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3547 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3549 tgt_dbg(tgt,
"Sent Implicit Logout\n");
3564 sizeof(tgt->
ids.port_name)))
3567 sizeof(tgt->
ids.node_name)))
3579 static void ibmvfc_tgt_adisc_done(
struct ibmvfc_event *evt)
3593 tgt_dbg(tgt,
"ADISC succeeded\n");
3594 if (ibmvfc_adisc_needs_plogi(mad, tgt))
3595 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3601 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3602 fc_reason = (mad->
fc_iu.response[1] & 0x00ff0000) >> 16;
3603 fc_explain = (mad->
fc_iu.response[1] & 0x0000ff00) >> 8;
3604 tgt_info(tgt,
"ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3605 ibmvfc_get_cmd_error(mad->
iu.status, mad->
iu.error),
3606 mad->
iu.status, mad->
iu.error,
3607 ibmvfc_get_fc_type(fc_reason), fc_reason,
3608 ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
3612 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3613 ibmvfc_free_event(evt);
3622 static void ibmvfc_init_passthru(
struct ibmvfc_event *evt)
3626 memset(mad, 0,
sizeof(*mad));
3629 mad->
common.length =
sizeof(*mad) -
sizeof(mad->
fc_iu) -
sizeof(mad->
iu);
3633 mad->
iu.cmd_len =
sizeof(mad->
fc_iu.payload);
3634 mad->
iu.rsp_len =
sizeof(mad->
fc_iu.response);
3635 mad->
iu.cmd.va = (
u64)evt->
crq.ioba +
3638 mad->
iu.cmd.len =
sizeof(mad->
fc_iu.payload);
3639 mad->
iu.rsp.va = (
u64)evt->
crq.ioba +
3642 mad->
iu.rsp.len =
sizeof(mad->
fc_iu.response);
3655 static void ibmvfc_tgt_adisc_cancel_done(
struct ibmvfc_event *evt)
3660 tgt_dbg(tgt,
"ADISC cancel complete\n");
3662 ibmvfc_free_event(evt);
3663 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3680 unsigned long flags;
3683 tgt_dbg(tgt,
"ADISC timeout\n");
3689 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
3694 kref_get(&tgt->
kref);
3695 evt = ibmvfc_get_event(vhost);
3700 memset(tmf, 0,
sizeof(*tmf));
3703 tmf->
common.length =
sizeof(*tmf);
3707 rc = ibmvfc_send_event(evt, vhost, default_timeout);
3710 tgt_err(tgt,
"Failed to send cancel event for ADISC. rc=%d\n", rc);
3712 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3713 __ibmvfc_reset_host(vhost);
3715 tgt_dbg(tgt,
"Attempting to cancel ADISC\n");
3716 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
3739 kref_get(&tgt->
kref);
3740 evt = ibmvfc_get_event(vhost);
3745 ibmvfc_init_passthru(evt);
3758 if (timer_pending(&tgt->
timer))
3763 tgt->
timer.function = (
void (*)(
unsigned long))ibmvfc_adisc_timeout;
3772 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3782 static void ibmvfc_tgt_query_target_done(
struct ibmvfc_event *evt)
3794 tgt_dbg(tgt,
"Query Target succeeded\n");
3797 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3799 ibmvfc_init_tgt(tgt, ibmvfc_tgt_adisc);
3804 ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3811 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3812 else if (ibmvfc_retry_cmd(rsp->
status, rsp->
error))
3813 level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
3815 ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
3817 tgt_log(tgt, level,
"Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
3824 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3825 ibmvfc_free_event(evt);
3834 static void ibmvfc_tgt_query_target(
struct ibmvfc_target *tgt)
3843 kref_get(&tgt->
kref);
3844 evt = ibmvfc_get_event(vhost);
3849 memset(query_tgt, 0,
sizeof(*query_tgt));
3850 query_tgt->
common.version = 1;
3852 query_tgt->
common.length =
sizeof(*query_tgt);
3853 query_tgt->
wwpn = tgt->
ids.port_name;
3856 if (ibmvfc_send_event(evt, vhost, default_timeout)) {
3859 kref_put(&tgt->
kref, ibmvfc_release_tgt);
3861 tgt_dbg(tgt,
"Sent Query Target\n");
3875 unsigned long flags;
3879 if (tgt->
scsi_id == scsi_id) {
3881 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3885 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
3889 dev_err(vhost->
dev,
"Target allocation failure for scsi id %08llx\n",
3894 memset(tgt, 0,
sizeof(*tgt));
3901 kref_init(&tgt->
kref);
3902 ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
3907 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
3918 static int ibmvfc_alloc_targets(
struct ibmvfc_host *vhost)
3922 for (i = 0, rc = 0; !rc && i < vhost->
num_targets; i++)
3923 rc = ibmvfc_alloc_target(vhost,
3934 static void ibmvfc_discover_targets_done(
struct ibmvfc_event *evt)
3941 switch (mad_status) {
3943 ibmvfc_dbg(vhost,
"Discover Targets succeeded\n");
3948 level += ibmvfc_retry_host_init(vhost);
3949 ibmvfc_log(vhost, level,
"Discover Targets failed: %s (%x:%x)\n",
3955 dev_err(vhost->
dev,
"Invalid Discover Targets response: 0x%x\n", mad_status);
3960 ibmvfc_free_event(evt);
3976 memset(mad, 0,
sizeof(*mad));
3979 mad->
common.length =
sizeof(*mad);
3985 if (!ibmvfc_send_event(evt, vhost, default_timeout))
3986 ibmvfc_dbg(vhost,
"Sent discover targets\n");
3996 static void ibmvfc_npiv_login_done(
struct ibmvfc_event *evt)
4001 unsigned int npiv_max_sectors;
4004 switch (mad_status) {
4006 ibmvfc_free_event(evt);
4010 level += ibmvfc_retry_host_init(vhost);
4013 ibmvfc_log(vhost, level,
"NPIV Login failed: %s (%x:%x)\n",
4015 ibmvfc_free_event(evt);
4018 ibmvfc_retry_host_init(vhost);
4020 ibmvfc_free_event(evt);
4023 dev_err(vhost->
dev,
"Invalid NPIV Login response: 0x%x\n", mad_status);
4025 ibmvfc_free_event(evt);
4032 dev_err(vhost->
dev,
"Virtual adapter does not support FC. %x\n",
4039 if (rsp->
max_cmds <= IBMVFC_NUM_INTERNAL_REQ) {
4040 dev_err(vhost->
dev,
"Virtual adapter supported queue depth too small: %d\n",
4049 dev_info(vhost->
dev,
"Host partition: %s, device: %s %s %s max sectors %u\n",
4069 vhost->
host->max_sectors = npiv_max_sectors;
4084 ibmvfc_gather_partition_info(vhost);
4085 ibmvfc_set_login_info(vhost);
4099 if (!ibmvfc_send_event(evt, vhost, default_timeout))
4110 static void ibmvfc_npiv_logout_done(
struct ibmvfc_event *evt)
4115 ibmvfc_free_event(evt);
4117 switch (mad_status) {
4119 if (list_empty(&vhost->
sent) &&
4121 ibmvfc_init_host(vhost);
4130 ibmvfc_dbg(vhost,
"NPIV Logout failed. 0x%X\n", mad_status);
4134 ibmvfc_hard_reset_host(vhost);
4142 static void ibmvfc_npiv_logout(
struct ibmvfc_host *vhost)
4147 evt = ibmvfc_get_event(vhost);
4151 memset(mad, 0,
sizeof(*mad));
4158 if (!ibmvfc_send_event(evt, vhost, default_timeout))
4171 static int ibmvfc_dev_init_to_do(
struct ibmvfc_host *vhost)
4191 static int __ibmvfc_work_to_do(
struct ibmvfc_host *vhost)
4216 case IBMVFC_HOST_ACTION_TGT_DEL:
4237 unsigned long flags;
4241 rc = __ibmvfc_work_to_do(vhost);
4242 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4256 if ((events & IBMVFC_AE_LINKDOWN) &&
4273 unsigned long flags;
4275 tgt_dbg(tgt,
"Adding rport\n");
4279 if (rport && tgt->
action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4280 tgt_dbg(tgt,
"Deleting rport\n");
4283 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4286 kref_put(&tgt->
kref, ibmvfc_release_tgt);
4289 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4294 tgt_dbg(tgt,
"rport add succeeded\n");
4308 tgt_dbg(tgt,
"rport add failed\n");
4309 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4317 static void ibmvfc_do_work(
struct ibmvfc_host *vhost)
4320 unsigned long flags;
4334 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4335 rc = ibmvfc_reset_crq(vhost);
4338 vio_enable_interrupts(to_vio_dev(vhost->
dev));
4339 if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
4340 (rc = vio_enable_interrupts(to_vio_dev(vhost->
dev)))) {
4342 dev_err(vhost->
dev,
"Error after reset (rc=%d)\n", rc);
4347 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4348 rc = ibmvfc_reenable_crq_queue(vhost);
4350 if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
4352 dev_err(vhost->
dev,
"Error after enable (rc=%d)\n", rc);
4362 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4370 ibmvfc_init_tgt(tgt, ibmvfc_tgt_query_target);
4373 case IBMVFC_HOST_ACTION_QUERY_TGTS:
4381 if (!ibmvfc_dev_init_to_do(vhost))
4382 ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL);
4384 case IBMVFC_HOST_ACTION_TGT_DEL:
4387 if (tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
4388 tgt_dbg(tgt,
"Deleting rport\n");
4393 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4397 kref_put(&tgt->kref, ibmvfc_release_tgt);
4404 if (vhost->reinit) {
4408 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4414 vhost->init_retries = 0;
4415 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4426 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4434 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4435 ibmvfc_alloc_targets(vhost);
4446 if (!ibmvfc_dev_init_to_do(vhost))
4453 spin_unlock_irqrestore(vhost->host->host_lock, flags);
4463 static int ibmvfc_work(
void *data)
4472 ibmvfc_work_to_do(vhost));
4479 ibmvfc_do_work(vhost);
4482 ibmvfc_dbg(vhost,
"ibmvfc kthread exiting...\n");
4496 static int ibmvfc_init_crq(
struct ibmvfc_host *vhost)
4499 struct device *dev = vhost->
dev;
4500 struct vio_dev *vdev = to_vio_dev(dev);
4516 retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
4519 if (rc == H_RESOURCE)
4521 retrc = rc = ibmvfc_reset_crq(vhost);
4524 dev_warn(dev,
"Partner adapter not ready\n");
4526 dev_warn(dev,
"Error %d opening adapter\n", rc);
4527 goto reg_crq_failed;
4535 dev_err(dev,
"Couldn't register irq 0x%x. rc=%d\n", vdev->irq, rc);
4536 goto req_irq_failed;
4539 if ((rc = vio_enable_interrupts(vdev))) {
4540 dev_err(dev,
"Error %d enabling interrupts\n", rc);
4541 goto req_irq_failed;
4551 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
4552 }
while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
4567 static void ibmvfc_free_mem(
struct ibmvfc_host *vhost)
4592 static int ibmvfc_alloc_mem(
struct ibmvfc_host *vhost)
4595 struct device *dev = vhost->
dev;
4599 if (!async_q->
msgs) {
4600 dev_err(dev,
"Couldn't allocate async queue.\n");
4606 async_q->
size *
sizeof(*async_q->
msgs),
4610 dev_err(dev,
"Failed to map async queue\n");
4611 goto free_async_crq;
4619 dev_err(dev,
"Failed to allocate sg pool\n");
4620 goto unmap_async_crq;
4627 dev_err(dev,
"Couldn't allocate NPIV login buffer\n");
4636 dev_err(dev,
"Couldn't allocate Discover Targets buffer\n");
4637 goto free_login_buffer;
4644 goto free_disc_buffer;
4650 dev_err(dev,
"Couldn't allocate target memory pool\n");
4688 unsigned long flags;
4702 kref_get(&tgt->
kref);
4705 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4706 ibmvfc_tgt_add_rport(tgt);
4708 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4709 tgt_dbg(tgt,
"Setting rport roles\n");
4714 kref_put(&tgt->
kref, ibmvfc_release_tgt);
4723 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4739 struct device *dev = &vdev->
dev;
4745 dev_err(dev,
"Couldn't allocate host data\n");
4749 shost->
transportt = ibmvfc_transport_template;
4752 shost->
max_id = max_targets;
4757 vhost = shost_priv(shost);
4758 INIT_LIST_HEAD(&vhost->
sent);
4759 INIT_LIST_HEAD(&vhost->
free);
4760 INIT_LIST_HEAD(&vhost->
targets);
4773 if ((rc = ibmvfc_alloc_mem(vhost)))
4774 goto free_scsi_host;
4780 dev_err(dev,
"Couldn't create kernel thread: %ld\n",
4785 if ((rc = ibmvfc_init_crq(vhost))) {
4786 dev_err(dev,
"Couldn't initialize crq. rc=%d\n", rc);
4790 if ((rc = ibmvfc_init_event_pool(vhost))) {
4791 dev_err(dev,
"Couldn't initialize event pool. rc=%d\n", rc);
4795 if ((rc = scsi_add_host(shost, dev)))
4796 goto release_event_pool;
4801 &ibmvfc_trace_attr))) {
4802 dev_err(dev,
"Failed to create trace file. rc=%d\n", rc);
4809 spin_lock(&ibmvfc_driver_lock);
4811 spin_unlock(&ibmvfc_driver_lock);
4813 ibmvfc_send_crq_init(vhost);
4820 ibmvfc_free_event_pool(vhost);
4822 ibmvfc_release_crq_queue(vhost);
4826 ibmvfc_free_mem(vhost);
4841 static int ibmvfc_remove(
struct vio_dev *vdev)
4844 unsigned long flags;
4851 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4853 ibmvfc_wait_while_resetting(vhost);
4854 ibmvfc_release_crq_queue(vhost);
4860 ibmvfc_purge_requests(vhost,
DID_ERROR);
4861 ibmvfc_free_event_pool(vhost);
4862 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4864 ibmvfc_free_mem(vhost);
4865 spin_lock(&ibmvfc_driver_lock);
4867 spin_unlock(&ibmvfc_driver_lock);
4881 static int ibmvfc_resume(
struct device *dev)
4883 unsigned long flags;
4885 struct vio_dev *vdev = to_vio_dev(dev);
4888 vio_disable_interrupts(vdev);
4889 tasklet_schedule(&vhost->
tasklet);
4890 spin_unlock_irqrestore(vhost->
host->host_lock, flags);
4902 static unsigned long ibmvfc_get_desired_dma(
struct vio_dev *vdev)
4904 unsigned long pool_dma = max_requests *
sizeof(
union ibmvfc_iu);
4905 return pool_dma + ((512 * 1024) * driver_template.
cmd_per_lun);
4909 {
"fcp",
"IBM,vfc-client"},
4915 .resume = ibmvfc_resume
4919 .id_table = ibmvfc_device_table,
4920 .probe = ibmvfc_probe,
4921 .remove = ibmvfc_remove,
4922 .get_desired_dma = ibmvfc_get_desired_dma,
4924 .pm = &ibmvfc_pm_ops,
4928 .show_host_fabric_name = 1,
4929 .show_host_node_name = 1,
4930 .show_host_port_name = 1,
4931 .show_host_supported_classes = 1,
4932 .show_host_port_type = 1,
4933 .show_host_port_id = 1,
4934 .show_host_maxframe_size = 1,
4936 .get_host_port_state = ibmvfc_get_host_port_state,
4937 .show_host_port_state = 1,
4939 .get_host_speed = ibmvfc_get_host_speed,
4940 .show_host_speed = 1,
4942 .issue_fc_host_lip = ibmvfc_issue_fc_host_lip,
4943 .terminate_rport_io = ibmvfc_terminate_rport_io,
4945 .show_rport_maxframe_size = 1,
4946 .show_rport_supported_classes = 1,
4948 .set_rport_dev_loss_tmo = ibmvfc_set_rport_dev_loss_tmo,
4949 .show_rport_dev_loss_tmo = 1,
4951 .get_starget_node_name = ibmvfc_get_starget_node_name,
4952 .show_starget_node_name = 1,
4954 .get_starget_port_name = ibmvfc_get_starget_port_name,
4955 .show_starget_port_name = 1,
4957 .get_starget_port_id = ibmvfc_get_starget_port_id,
4958 .show_starget_port_id = 1,
4960 .bsg_request = ibmvfc_bsg_request,
4961 .bsg_timeout = ibmvfc_bsg_timeout,
4970 static int __init ibmvfc_module_init(
void)
4974 if (!firmware_has_feature(FW_FEATURE_VIO))
4981 if (!ibmvfc_transport_template)
4996 static void __exit ibmvfc_module_exit(
void)