63 #include <linux/module.h>
67 #include <linux/slab.h>
73 #include <scsi/scsi.h>
85 static int max_id = 64;
86 static int max_channel = 3;
87 static int init_timeout = 300;
88 static int login_timeout = 60;
89 static int info_timeout = 30;
90 static int abort_timeout = 60;
91 static int reset_timeout = 60;
94 static int fast_fail = 1;
95 static int client_reserve = 1;
97 static unsigned int partition_number = -1;
101 #define IBMVSCSI_VERSION "1.5.9"
121 static void ibmvscsi_handle_crq(
struct viosrp_crq *crq,
135 static irqreturn_t ibmvscsi_handle_event(
int irq,
void *dev_instance)
139 vio_disable_interrupts(to_vio_dev(hostdata->
dev));
140 tasklet_schedule(&hostdata->
srp_task);
157 struct vio_dev *vdev = to_vio_dev(hostdata->
dev);
158 free_irq(vdev->irq, (
void *)hostdata);
163 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
164 }
while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
184 crq = &queue->
msgs[queue->
cur];
185 if (crq->
valid & 0x80) {
186 if (++queue->
cur == queue->
size)
190 spin_unlock_irqrestore(&queue->
lock, flags);
204 struct vio_dev *vdev = to_vio_dev(hostdata->
dev);
206 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
213 static void ibmvscsi_task(
void *
data)
216 struct vio_dev *vdev = to_vio_dev(hostdata->
dev);
222 while ((crq = crq_queue_next_crq(&hostdata->
queue)) !=
NULL) {
223 ibmvscsi_handle_crq(crq, hostdata);
227 vio_enable_interrupts(vdev);
228 crq = crq_queue_next_crq(&hostdata->
queue);
230 vio_disable_interrupts(vdev);
231 ibmvscsi_handle_crq(crq, hostdata);
239 static void gather_partition_info(
void)
243 const char *ppartition_name;
244 const unsigned int *p_number_ptr;
258 partition_number = *p_number_ptr;
285 static int ibmvscsi_reset_crq_queue(
struct crq_queue *queue,
289 struct vio_dev *vdev = to_vio_dev(hostdata->
dev);
295 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
296 }
while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
302 set_adapter_info(hostdata);
305 rc = plpar_hcall_norets(H_REG_CRQ,
310 dev_warn(hostdata->
dev,
"Partner adapter not ready\n");
311 }
else if (rc != 0) {
312 dev_warn(hostdata->
dev,
"couldn't register crq--rc 0x%x\n", rc);
326 static int ibmvscsi_init_crq_queue(
struct crq_queue *queue,
332 struct vio_dev *vdev = to_vio_dev(hostdata->
dev);
347 gather_partition_info();
348 set_adapter_info(hostdata);
350 retrc = rc = plpar_hcall_norets(H_REG_CRQ,
353 if (rc == H_RESOURCE)
355 rc = ibmvscsi_reset_crq_queue(queue,
360 dev_warn(hostdata->
dev,
"Partner adapter not ready\n");
362 }
else if (rc != 0) {
363 dev_warn(hostdata->
dev,
"Error %d opening adapter\n", rc);
371 (
unsigned long)hostdata);
374 ibmvscsi_handle_event,
375 0,
"ibmvscsi", (
void *)hostdata) != 0) {
376 dev_err(hostdata->
dev,
"couldn't register irq 0x%x\n",
381 rc = vio_enable_interrupts(vdev);
383 dev_err(hostdata->
dev,
"Error %d enabling interrupts!!!\n", rc);
395 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
396 }
while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
413 static int ibmvscsi_reenable_crq_queue(
struct crq_queue *queue,
417 struct vio_dev *vdev = to_vio_dev(hostdata->
dev);
423 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
424 }
while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
427 dev_err(hostdata->
dev,
"Error %d enabling adapter\n", rc);
462 for (i = 0; i < pool->
size; ++
i) {
466 evt->
crq.valid = 0x80;
486 static void release_event_pool(
struct event_pool *pool,
490 for (i = 0; i < pool->
size; ++
i) {
493 if (pool->
events[i].ext_list) {
497 pool->
events[i].ext_list_token);
501 dev_warn(hostdata->
dev,
"releasing event pool with %d "
502 "events still in use?\n", in_use);
516 static int valid_event_struct(
struct event_pool *pool,
520 if (index < 0 || index >= pool->
size)
522 if (evt != pool->
events + index)
533 static void free_event_struct(
struct event_pool *pool,
536 if (!valid_event_struct(pool, evt)) {
538 "(not in pool %p)\n", evt, pool->
events);
543 "which is not in use!\n", evt);
559 int poolsize = pool->
size;
562 for (i = 0; i < poolsize; i++) {
563 offset = (offset + 1) % poolsize;
591 evt_struct->
crq.timeout = timeout;
637 static void unmap_cmd_data(
struct srp_cmd *cmd,
644 in_fmt = cmd->
buf_fmt & ((1
U << 4) - 1);
649 if (evt_struct->
cmnd)
653 static int map_sg_list(
struct scsi_cmnd *cmd,
int nseg,
658 u64 total_length = 0;
679 static int map_sg_data(
struct scsi_cmnd *cmd,
681 struct srp_cmd *srp_cmd,
struct device *dev)
685 u64 total_length = 0;
694 else if (sg_mapped < 0)
697 set_srp_direction(cmd, srp_cmd, sg_mapped);
700 if (sg_mapped == 1) {
701 map_sg_list(cmd, sg_mapped, data);
705 indirect->table_desc.va = 0;
706 indirect->table_desc.len = sg_mapped *
sizeof(
struct srp_direct_buf);
707 indirect->table_desc.key = 0;
710 total_length = map_sg_list(cmd, sg_mapped,
711 &indirect->desc_list[0]);
712 indirect->len = total_length;
723 if (!firmware_has_feature(FW_FEATURE_CMO))
725 "Can't allocate memory "
726 "for indirect table\n");
732 total_length = map_sg_list(cmd, sg_mapped, evt_struct->
ext_list);
734 indirect->len = total_length;
736 indirect->table_desc.len = sg_mapped *
sizeof(indirect->desc_list[0]);
751 static int map_data_for_srp_cmd(
struct scsi_cmnd *cmd,
753 struct srp_cmd *srp_cmd,
struct device *dev)
763 "Can't map DMA_BIDIRECTIONAL to read/write\n");
767 "Unknown data direction 0x%02x; can't map!\n",
772 return map_sg_data(cmd, evt_struct, srp_cmd, dev);
785 while (!list_empty(&hostdata->
sent)) {
790 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
792 evt->
cmnd->result = (error_code << 16);
793 unmap_cmd_data(&evt->
iu.
srp.
cmd, evt,
797 }
else if (evt->
done)
799 free_event_struct(&evt->
hostdata->pool, evt);
802 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
829 dev_err(hostdata->
dev,
"Command timed out (%x). Resetting connection\n",
832 ibmvscsi_reset_host(hostdata);
850 unsigned long timeout)
852 u64 *crq_as_u64 = (
u64 *) &evt_struct->
crq;
853 int request_status = 0;
870 if (request_status < -1)
877 else if (request_status == -1 &&
884 else if (request_status < 2 &&
893 int server_limit = request_status;
900 if (server_limit > 2)
917 evt_struct->
timer.data = (
unsigned long) evt_struct;
919 evt_struct->
timer.function = (
void (*)(
unsigned long))ibmvscsi_timeout;
924 ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) {
933 if (rc == H_CLOSED) {
935 "Receive queue closed, will retry.\n");
938 dev_err(hostdata->
dev,
"send error %d\n", rc);
947 unmap_cmd_data(&evt_struct->
iu.
srp.
cmd, evt_struct, hostdata->
dev);
949 free_event_struct(&hostdata->
pool, evt_struct);
950 if (srp_req && request_status != -1)
955 unmap_cmd_data(&evt_struct->
iu.
srp.
cmd, evt_struct, hostdata->
dev);
960 }
else if (evt_struct->
done)
961 evt_struct->
done(evt_struct);
963 free_event_struct(&hostdata->
pool, evt_struct);
980 if (printk_ratelimit())
982 "bad SRP RSP type %d\n", rsp->
opcode);
991 unmap_cmd_data(&evt_struct->
iu.
srp.
cmd,
1012 return (0x2 << 14) | (dev->
id << 8) | (dev->
channel << 5) | dev->
lun;
1020 static int ibmvscsi_queuecommand_lck(
struct scsi_cmnd *cmnd,
1023 struct srp_cmd *srp_cmd;
1031 evt_struct = get_event_struct(&hostdata->
pool);
1040 srp_cmd->lun = ((
u64) lun) << 48;
1042 if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->
dev)) {
1043 if (!firmware_has_feature(FW_FEATURE_CMO))
1045 "couldn't convert cmd to srp_cmd\n");
1046 free_event_struct(&hostdata->
pool, evt_struct);
1050 init_event_struct(evt_struct,
1055 evt_struct->
cmnd = cmnd;
1060 out_fmt = srp_cmd->
buf_fmt >> 4;
1061 in_fmt = srp_cmd->
buf_fmt & ((1
U << 4) - 1);
1070 return ibmvscsi_send_srp_event(evt_struct, hostdata, 0);
1089 hostdata->caps_addr =
dma_map_single(hostdata->dev, &hostdata->caps,
1093 dev_err(hostdata->dev,
"Unable to map capabilities buffer!\n");
1098 &hostdata->madapter_info,
1099 sizeof(hostdata->madapter_info),
1102 dev_err(hostdata->dev,
"Unable to map adapter info buffer!\n");
1140 dev_info(hostdata->
dev,
"SRP_LOGIN_REJ reason %u\n",
1146 dev_err(hostdata->
dev,
"Invalid login response typecode 0x%02x!\n",
1176 unsigned long flags;
1181 init_event_struct(evt_struct, login_rsp,
1185 memset(login, 0,
sizeof(*login));
1197 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
1198 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1214 dev_err(hostdata->
dev,
"error 0x%X getting capabilities info\n",
1218 dev_info(hostdata->
dev,
"Partition migration not supported\n");
1220 if (client_reserve) {
1221 if (hostdata->
caps.reserve.common.server_support ==
1223 dev_info(hostdata->
dev,
"Client reserve enabled\n");
1225 dev_info(hostdata->
dev,
"Client reserve not supported\n");
1229 send_srp_login(hostdata);
1241 unsigned long flags;
1245 evt_struct = get_event_struct(&hostdata->
pool);
1248 init_event_struct(evt_struct, capabilities_rsp,
1252 memset(req, 0,
sizeof(*req));
1259 sizeof(hostdata->
caps.name));
1260 hostdata->
caps.name[
sizeof(hostdata->
caps.name) - 1] =
'\0';
1263 location = location ? location : dev_name(hostdata->
dev);
1265 hostdata->
caps.loc[
sizeof(hostdata->
caps.loc) - 1] =
'\0';
1271 hostdata->
caps.migration.common.length =
sizeof(hostdata->
caps.migration);
1273 hostdata->
caps.migration.ecl = 1;
1275 if (client_reserve) {
1277 hostdata->
caps.reserve.common.length =
sizeof(hostdata->
caps.reserve);
1282 req->
common.length =
sizeof(hostdata->
caps) -
sizeof(hostdata->
caps.reserve);
1285 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1286 dev_err(hostdata->
dev,
"couldn't send CAPABILITIES_REQ!\n");
1287 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1303 dev_err(hostdata->
dev,
"fast_fail not supported in server\n");
1305 dev_err(hostdata->
dev,
"fast_fail request failed\n");
1307 dev_err(hostdata->
dev,
"error 0x%X enabling fast_fail\n", status);
1309 send_mad_capabilities(hostdata);
1321 unsigned long flags;
1326 send_mad_capabilities(hostdata);
1330 evt_struct = get_event_struct(&hostdata->
pool);
1336 memset(fast_fail_mad, 0,
sizeof(*fast_fail_mad));
1338 fast_fail_mad->
common.length =
sizeof(*fast_fail_mad);
1341 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1342 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1358 dev_err(hostdata->
dev,
"error %d getting adapter info\n",
1362 "host partition %s (%d), OS %d, max io %u\n",
1370 hostdata->
host->max_sectors =
1375 dev_err(hostdata->
dev,
"host (Ver. %s) doesn't support large transfers\n",
1377 dev_err(hostdata->
dev,
"limiting scatterlists to %d\n",
1383 enable_fast_fail(hostdata);
1388 send_srp_login(hostdata);
1404 unsigned long flags;
1406 evt_struct = get_event_struct(&hostdata->
pool);
1409 init_event_struct(evt_struct,
1415 memset(req, 0x00,
sizeof(*req));
1422 if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
1423 dev_err(hostdata->
dev,
"couldn't send ADAPTER_INFO_REQ!\n");
1424 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1433 send_mad_adapter_info(hostdata);
1455 static int ibmvscsi_eh_abort_handler(
struct scsi_cmnd *cmd)
1463 unsigned long flags;
1465 unsigned long wait_switch = 0;
1471 wait_switch =
jiffies + (init_timeout *
HZ);
1475 if (tmp_evt->
cmnd == cmd) {
1476 found_evt = tmp_evt;
1482 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1486 evt = get_event_struct(&hostdata->
pool);
1488 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1490 "failed to allocate abort event\n");
1494 init_event_struct(evt,
1502 memset(tsk_mgmt, 0x00,
sizeof(*tsk_mgmt));
1504 tsk_mgmt->lun = ((
u64) lun) << 48;
1510 init_completion(&evt->
comp);
1511 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
1516 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1521 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1525 "failed to send abort() event. rc=%d\n", rsp_rc);
1530 "aborting command. lun 0x%llx, tag 0x%llx\n",
1531 (((
u64) lun) << 48), (
u64) found_evt);
1537 if (printk_ratelimit())
1549 if (printk_ratelimit())
1551 "abort code %d for task tag 0x%llx\n",
1563 if (tmp_evt->
cmnd == cmd) {
1564 found_evt = tmp_evt;
1569 if (found_evt ==
NULL) {
1570 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1581 unmap_cmd_data(&found_evt->
iu.
srp.
cmd, found_evt,
1583 free_event_struct(&found_evt->
hostdata->pool, found_evt);
1584 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1594 static int ibmvscsi_eh_device_reset_handler(
struct scsi_cmnd *cmd)
1602 unsigned long flags;
1604 unsigned long wait_switch = 0;
1607 wait_switch =
jiffies + (init_timeout *
HZ);
1609 evt = get_event_struct(&hostdata->
pool);
1611 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1613 "failed to allocate reset event\n");
1617 init_event_struct(evt,
1625 memset(tsk_mgmt, 0x00,
sizeof(*tsk_mgmt));
1627 tsk_mgmt->lun = ((
u64) lun) << 48;
1632 init_completion(&evt->
comp);
1633 rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
1638 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1643 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1647 "failed to send reset event. rc=%d\n", rsp_rc);
1652 (((
u64) lun) << 48));
1658 if (printk_ratelimit())
1670 if (printk_ratelimit())
1672 "reset code %d for task tag 0x%llx\n",
1686 unmap_cmd_data(&tmp_evt->
iu.
srp.
cmd, tmp_evt,
1688 free_event_struct(&tmp_evt->
hostdata->pool,
1693 else if (tmp_evt->
done)
1694 tmp_evt->
done(tmp_evt);
1697 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1705 static int ibmvscsi_eh_host_reset_handler(
struct scsi_cmnd *cmd)
1707 unsigned long wait_switch = 0;
1710 dev_err(hostdata->
dev,
"Resetting connection due to error recovery\n");
1712 ibmvscsi_reset_host(hostdata);
1714 for (wait_switch = jiffies + (init_timeout *
HZ);
1733 static void ibmvscsi_handle_crq(
struct viosrp_crq *crq,
1737 unsigned long flags;
1740 switch (crq->
valid) {
1746 rc = ibmvscsi_send_crq(hostdata, 0xC002000000000000LL, 0);
1749 init_adapter(hostdata);
1751 dev_err(hostdata->
dev,
"Unable to send init rsp. rc=%ld\n", rc);
1756 dev_info(hostdata->
dev,
"partner initialization complete\n");
1759 init_adapter(hostdata);
1768 if (crq->
format == 0x06) {
1770 dev_info(hostdata->
dev,
"Re-enabling adapter!\n");
1776 dev_err(hostdata->
dev,
"Virtual adapter failed rc %d!\n",
1778 ibmvscsi_reset_host(hostdata);
1784 dev_err(hostdata->
dev,
"got an invalid message type 0x%02x\n",
1793 if (!valid_event_struct(&hostdata->
pool, evt_struct)) {
1794 dev_err(hostdata->
dev,
"returned correlation_token 0x%p is invalid!\n",
1800 dev_err(hostdata->
dev,
"received duplicate correlation_token 0x%p!\n",
1813 if (evt_struct->
done)
1814 evt_struct->
done(evt_struct);
1816 dev_err(hostdata->
dev,
"returned done() is NULL; not running it!\n");
1824 free_event_struct(&evt_struct->
hostdata->pool, evt_struct);
1825 spin_unlock_irqrestore(evt_struct->
hostdata->host->host_lock, flags);
1837 unsigned long flags;
1841 evt_struct = get_event_struct(&hostdata->
pool);
1843 dev_err(hostdata->
dev,
"couldn't allocate event for HOST_CONFIG!\n");
1847 init_event_struct(evt_struct,
1855 length =
min(0xffff, length);
1858 memset(host_config, 0x00,
sizeof(*host_config));
1866 if (!firmware_has_feature(FW_FEATURE_CMO))
1868 "dma_mapping error getting host config\n");
1869 free_event_struct(&hostdata->
pool, evt_struct);
1873 init_completion(&evt_struct->
comp);
1875 rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
1876 spin_unlock_irqrestore(hostdata->
host->host_lock, flags);
1892 static int ibmvscsi_slave_configure(
struct scsi_device *sdev)
1903 spin_unlock_irqrestore(shost->
host_lock, lock_flags);
1916 static int ibmvscsi_change_queue_depth(
struct scsi_device *sdev,
int qdepth,
1939 len =
snprintf(buf,
sizeof(hostdata->
caps.loc),
"%s\n",
1940 hostdata->
caps.loc);
1946 .name =
"vhost_loc",
1949 .show = show_host_vhost_loc,
1959 len =
snprintf(buf,
sizeof(hostdata->
caps.name),
"%s\n",
1960 hostdata->
caps.name);
1966 .name =
"vhost_name",
1969 .show = show_host_vhost_name,
1986 .name =
"srp_version",
1989 .show = show_host_srp_version,
2007 .name =
"partition_name",
2010 .show = show_host_partition_name,
2013 static ssize_t show_host_partition_number(
struct device *dev,
2028 .name =
"partition_number",
2031 .show = show_host_partition_number,
2048 .name =
"mad_version",
2051 .show = show_host_mad_version,
2070 .show = show_host_os_type,
2080 if (ibmvscsi_do_host_config(hostdata, buf,
PAGE_SIZE) == 0)
2091 .show = show_host_config,
2095 &ibmvscsi_host_vhost_loc,
2096 &ibmvscsi_host_vhost_name,
2097 &ibmvscsi_host_srp_version,
2098 &ibmvscsi_host_partition_name,
2099 &ibmvscsi_host_partition_number,
2100 &ibmvscsi_host_mad_version,
2101 &ibmvscsi_host_os_type,
2102 &ibmvscsi_host_config,
2112 .proc_name =
"ibmvscsi",
2113 .queuecommand = ibmvscsi_queuecommand,
2114 .eh_abort_handler = ibmvscsi_eh_abort_handler,
2115 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
2116 .eh_host_reset_handler = ibmvscsi_eh_host_reset_handler,
2117 .slave_configure = ibmvscsi_slave_configure,
2118 .change_queue_depth = ibmvscsi_change_queue_depth,
2124 .shost_attrs = ibmvscsi_attrs,
2135 static unsigned long ibmvscsi_get_desired_dma(
struct vio_dev *vdev)
2138 unsigned long desired_io = max_events *
sizeof(
union viosrp_iu);
2156 rc = ibmvscsi_reset_crq_queue(&hostdata->
queue, hostdata);
2158 rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
2159 vio_enable_interrupts(to_vio_dev(hostdata->
dev));
2163 rc = ibmvscsi_reenable_crq_queue(&hostdata->
queue, hostdata);
2166 rc = ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0);
2172 dev_err(hostdata->
dev,
"error after %s\n", action);
2193 static int ibmvscsi_work(
void *data)
2202 ibmvscsi_work_to_do(hostdata));
2209 ibmvscsi_do_work(hostdata);
2225 unsigned long wait_switch = 0;
2232 dev_err(&vdev->
dev,
"couldn't allocate host data\n");
2233 goto scsi_host_alloc_failed;
2236 host->
transportt = ibmvscsi_transport_template;
2237 hostdata = shost_priv(host);
2238 memset(hostdata, 0x00,
sizeof(*hostdata));
2239 INIT_LIST_HEAD(&hostdata->
sent);
2241 hostdata->
host = host;
2246 if (map_persist_bufs(hostdata)) {
2247 dev_err(&vdev->
dev,
"couldn't map persistent buffers\n");
2248 goto persist_bufs_failed;
2255 dev_err(&vdev->
dev,
"couldn't initialize kthread. rc=%ld\n",
2257 goto init_crq_failed;
2260 rc = ibmvscsi_init_crq_queue(&hostdata->
queue, hostdata, max_events);
2261 if (rc != 0 && rc != H_RESOURCE) {
2262 dev_err(&vdev->
dev,
"couldn't initialize crq. rc=%d\n", rc);
2265 if (initialize_event_pool(&hostdata->
pool, max_events, hostdata) != 0) {
2266 dev_err(&vdev->
dev,
"couldn't initialize event pool\n");
2267 goto init_pool_failed;
2275 if (scsi_add_host(hostdata->
host, hostdata->
dev))
2276 goto add_host_failed;
2280 sizeof(ids.port_id));
2284 goto add_srp_port_failed;
2290 if (ibmvscsi_send_crq(hostdata, 0xC001000000000000LL, 0) == 0
2291 || rc == H_RESOURCE) {
2298 for (wait_switch = jiffies + (init_timeout * HZ);
2313 add_srp_port_failed:
2316 release_event_pool(&hostdata->
pool, hostdata);
2318 ibmvscsi_release_crq_queue(&hostdata->
queue, hostdata, max_events);
2322 unmap_persist_bufs(hostdata);
2323 persist_bufs_failed:
2325 scsi_host_alloc_failed:
2329 static int ibmvscsi_remove(
struct vio_dev *vdev)
2332 unmap_persist_bufs(hostdata);
2333 release_event_pool(&hostdata->
pool, hostdata);
2334 ibmvscsi_release_crq_queue(&hostdata->
queue, hostdata,
2352 static int ibmvscsi_resume(
struct device *dev)
2355 vio_disable_interrupts(to_vio_dev(hostdata->
dev));
2356 tasklet_schedule(&hostdata->
srp_task);
2366 {
"vscsi",
"IBM,v-scsi"},
2372 .resume = ibmvscsi_resume
2376 .id_table = ibmvscsi_device_table,
2377 .probe = ibmvscsi_probe,
2378 .remove = ibmvscsi_remove,
2379 .get_desired_dma = ibmvscsi_get_desired_dma,
2381 .pm = &ibmvscsi_pm_ops,
2392 driver_template.
can_queue = max_requests;
2393 max_events = max_requests + 2;
2395 if (!firmware_has_feature(FW_FEATURE_VIO))
2398 ibmvscsi_transport_template =
2400 if (!ibmvscsi_transport_template)