59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/slab.h>
66 #include <linux/pci.h>
67 #include <linux/wait.h>
69 #include <linux/sched.h>
73 #include <linux/module.h>
77 #include <linux/reboot.h>
81 #include <asm/processor.h>
82 #include <scsi/scsi.h>
94 static unsigned int ipr_max_speed = 1;
95 static int ipr_testmode = 0;
96 static unsigned int ipr_fastfail = 0;
97 static unsigned int ipr_transop_timeout = 0;
98 static unsigned int ipr_debug = 0;
100 static unsigned int ipr_dual_ioa_raid = 1;
108 .cache_line_size = 0x20,
111 .set_interrupt_mask_reg = 0x0022C,
112 .clr_interrupt_mask_reg = 0x00230,
113 .clr_interrupt_mask_reg32 = 0x00230,
114 .sense_interrupt_mask_reg = 0x0022C,
115 .sense_interrupt_mask_reg32 = 0x0022C,
116 .clr_interrupt_reg = 0x00228,
117 .clr_interrupt_reg32 = 0x00228,
118 .sense_interrupt_reg = 0x00224,
119 .sense_interrupt_reg32 = 0x00224,
120 .ioarrin_reg = 0x00404,
121 .sense_uproc_interrupt_reg = 0x00214,
122 .sense_uproc_interrupt_reg32 = 0x00214,
123 .set_uproc_interrupt_reg = 0x00214,
124 .set_uproc_interrupt_reg32 = 0x00214,
125 .clr_uproc_interrupt_reg = 0x00218,
126 .clr_uproc_interrupt_reg32 = 0x00218
132 .cache_line_size = 0x20,
135 .set_interrupt_mask_reg = 0x00288,
136 .clr_interrupt_mask_reg = 0x0028C,
137 .clr_interrupt_mask_reg32 = 0x0028C,
138 .sense_interrupt_mask_reg = 0x00288,
139 .sense_interrupt_mask_reg32 = 0x00288,
140 .clr_interrupt_reg = 0x00284,
141 .clr_interrupt_reg32 = 0x00284,
142 .sense_interrupt_reg = 0x00280,
143 .sense_interrupt_reg32 = 0x00280,
144 .ioarrin_reg = 0x00504,
145 .sense_uproc_interrupt_reg = 0x00290,
146 .sense_uproc_interrupt_reg32 = 0x00290,
147 .set_uproc_interrupt_reg = 0x00290,
148 .set_uproc_interrupt_reg32 = 0x00290,
149 .clr_uproc_interrupt_reg = 0x00294,
150 .clr_uproc_interrupt_reg32 = 0x00294
156 .cache_line_size = 0x20,
159 .set_interrupt_mask_reg = 0x00010,
160 .clr_interrupt_mask_reg = 0x00018,
161 .clr_interrupt_mask_reg32 = 0x0001C,
162 .sense_interrupt_mask_reg = 0x00010,
163 .sense_interrupt_mask_reg32 = 0x00014,
164 .clr_interrupt_reg = 0x00008,
165 .clr_interrupt_reg32 = 0x0000C,
166 .sense_interrupt_reg = 0x00000,
167 .sense_interrupt_reg32 = 0x00004,
168 .ioarrin_reg = 0x00070,
169 .sense_uproc_interrupt_reg = 0x00020,
170 .sense_uproc_interrupt_reg32 = 0x00024,
171 .set_uproc_interrupt_reg = 0x00020,
172 .set_uproc_interrupt_reg32 = 0x00024,
173 .clr_uproc_interrupt_reg = 0x00028,
174 .clr_uproc_interrupt_reg32 = 0x0002C,
175 .init_feedback_reg = 0x0005C,
176 .dump_addr_reg = 0x00064,
177 .dump_data_reg = 0x00068,
178 .endian_swap_reg = 0x00084
195 static int ipr_max_bus_speeds[] = {
202 MODULE_PARM_DESC(max_speed,
"Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
204 MODULE_PARM_DESC(log_level,
"Set to 0 - 4 for increasing verbosity of device driver");
206 MODULE_PARM_DESC(testmode,
"DANGEROUS!!! Allows unsupported configurations");
210 MODULE_PARM_DESC(transop_timeout,
"Time in seconds to wait for adapter to come operational (default: 300)");
214 MODULE_PARM_DESC(dual_ioa_raid,
"Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
216 MODULE_PARM_DESC(max_devs,
"Specify the maximum number of physical devices. "
225 "8155: An unknown error was received"},
227 "Soft underlength error"},
229 "Command to be cancelled not found"},
231 "Qualified success"},
233 "FFFE: Soft device bus error recovered by the IOA"},
235 "4101: Soft device bus fabric error"},
237 "FFFC: Logical block guard error recovered by the device"},
239 "FFFC: Logical block reference tag error recovered by the device"},
241 "4171: Recovered scatter list tag / sequence number error"},
243 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
245 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
247 "FFFD: Recovered logical block reference tag error detected by the IOA"},
249 "FFFD: Logical block guard error recovered by the IOA"},
251 "FFF9: Device sector reassign successful"},
253 "FFF7: Media error recovered by device rewrite procedures"},
255 "7001: IOA sector reassignment successful"},
257 "FFF9: Soft media error. Sector reassignment recommended"},
259 "FFF7: Media error recovered by IOA rewrite procedures"},
261 "FF3D: Soft PCI bus error recovered by the IOA"},
263 "FFF6: Device hardware error recovered by the IOA"},
265 "FFF6: Device hardware error recovered by the device"},
267 "FF3D: Soft IOA error recovered by the IOA"},
269 "FFFA: Undefined device response recovered by the IOA"},
271 "FFF6: Device bus error, message or command phase"},
273 "FFFE: Task Management Function failed"},
275 "FFF6: Failure prediction threshold exceeded"},
277 "8009: Impending cache battery pack failure"},
279 "34FF: Disk device format in progress"},
281 "9070: IOA requested reset"},
283 "Synchronization required"},
285 "No ready, IOA shutdown"},
287 "Not ready, IOA has been shutdown"},
289 "3020: Storage subsystem configuration error"},
291 "FFF5: Medium error, data unreadable, recommend reassign"},
293 "7000: Medium error, data unreadable, do not reassign"},
295 "FFF3: Disk media format bad"},
297 "3002: Addressed device failed to respond to selection"},
299 "3100: Device bus error"},
301 "3109: IOA timed out a device command"},
303 "3120: SCSI bus is not operational"},
305 "4100: Hard device bus fabric error"},
307 "310C: Logical block guard error detected by the device"},
309 "310C: Logical block reference tag error detected by the device"},
311 "4170: Scatter list tag / sequence number error"},
313 "8150: Logical block CRC error on IOA to Host transfer"},
315 "4170: Logical block sequence number error on IOA to Host transfer"},
317 "310D: Logical block reference tag error detected by the IOA"},
319 "310D: Logical block guard error detected by the IOA"},
321 "9000: IOA reserved area data check"},
323 "9001: IOA reserved area invalid data pattern"},
325 "9002: IOA reserved area LRC error"},
327 "Hardware Error, IOA metadata access error"},
329 "102E: Out of alternate sectors for disk storage"},
331 "FFF4: Data transfer underlength error"},
333 "FFF4: Data transfer overlength error"},
335 "3400: Logical unit failure"},
337 "FFF4: Device microcode is corrupt"},
339 "8150: PCI bus error"},
341 "Unsupported device bus message received"},
343 "FFF4: Disk device problem"},
345 "8150: Permanent IOA failure"},
347 "3010: Disk device returned wrong response to IOA"},
349 "8151: IOA microcode error"},
351 "Device bus status error"},
353 "8157: IOA error requiring IOA reset to recover"},
355 "ATA device status error"},
357 "Message reject received from the device"},
359 "8008: A permanent cache battery pack failure occurred"},
361 "9090: Disk unit has been modified after the last known status"},
363 "9081: IOA detected device error"},
365 "9082: IOA detected device error"},
367 "3110: Device bus error, message or command phase"},
369 "3110: SAS Command / Task Management Function failed"},
371 "9091: Incorrect hardware configuration change has been detected"},
373 "9073: Invalid multi-adapter configuration"},
375 "4010: Incorrect connection between cascaded expanders"},
377 "4020: Connections exceed IOA design limits"},
379 "4030: Incorrect multipath connection"},
381 "4110: Unsupported enclosure function"},
383 "FFF4: Command to logical unit failed"},
385 "Illegal request, invalid request type or request packet"},
387 "Illegal request, invalid resource handle"},
389 "Illegal request, commands not allowed to this device"},
391 "Illegal request, command not allowed to a secondary adapter"},
393 "Illegal request, command not allowed to a non-optimized resource"},
395 "Illegal request, invalid field in parameter list"},
397 "Illegal request, parameter not supported"},
399 "Illegal request, parameter value invalid"},
401 "Illegal request, command sequence error"},
403 "Illegal request, dual adapter support not enabled"},
405 "9031: Array protection temporarily suspended, protection resuming"},
407 "9040: Array protection temporarily suspended, protection resuming"},
409 "3140: Device bus not ready to ready transition"},
411 "FFFB: SCSI bus was reset"},
413 "FFFE: SCSI bus transition to single ended"},
415 "FFFE: SCSI bus transition to LVD"},
417 "FFFB: SCSI bus was reset by another initiator"},
419 "3029: A device replacement has occurred"},
421 "9051: IOA cache data exists for a missing or failed device"},
423 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
425 "9025: Disk unit is not supported at its physical location"},
427 "3020: IOA detected a SCSI bus configuration error"},
429 "3150: SCSI bus configuration error"},
431 "9074: Asymmetric advanced function disk configuration"},
433 "4040: Incomplete multipath connection between IOA and enclosure"},
435 "4041: Incomplete multipath connection between enclosure and device"},
437 "9075: Incomplete multipath connection between IOA and remote IOA"},
439 "9076: Configuration error, missing remote IOA"},
441 "4050: Enclosure does not support a required multipath function"},
443 "4070: Logically bad block written on device"},
445 "9041: Array protection temporarily suspended"},
447 "9042: Corrupt array parity detected on specified device"},
449 "9030: Array no longer protected due to missing or failed disk unit"},
451 "9071: Link operational transition"},
453 "9072: Link not operational transition"},
455 "9032: Array exposed but still protected"},
457 "70DD: Device forced failed by disrupt device command"},
459 "4061: Multipath redundancy level got better"},
461 "4060: Multipath redundancy level got worse"},
463 "Failure due to other device"},
465 "9008: IOA does not support functions expected by devices"},
467 "9010: Cache data associated with attached devices cannot be found"},
469 "9011: Cache data belongs to devices other than those attached"},
471 "9020: Array missing 2 or more devices with only 1 device present"},
473 "9021: Array missing 2 or more devices with 2 or more devices present"},
475 "9022: Exposed array is missing a required device"},
477 "9023: Array member(s) not at required physical locations"},
479 "9024: Array not functional due to present hardware configuration"},
481 "9026: Array not functional due to present hardware configuration"},
483 "9027: Array is missing a device and parity is out of sync"},
485 "9028: Maximum number of arrays already exist"},
487 "9050: Required cache data cannot be located for a disk unit"},
489 "9052: Cache data exists for a device that has been modified"},
491 "9054: IOA resources not available due to previous problems"},
493 "9092: Disk unit requires initialization before use"},
495 "9029: Incorrect hardware configuration change has been detected"},
497 "9060: One or more disk pairs are missing from an array"},
499 "9061: One or more disks are missing from an array"},
501 "9062: One or more disks are missing from an array"},
503 "9063: Maximum number of functional arrays has been exceeded"},
505 "Aborted command, invalid descriptor"},
507 "Command terminated by host"}
511 {
"2104-DL1 ",
"XXXXXXXXXXXXXXXX", 80 },
512 {
"2104-TL1 ",
"XXXXXXXXXXXXXXXX", 80 },
513 {
"HSBP07M P U2SCSI",
"XXXXXXXXXXXXXXXX", 80 },
514 {
"HSBP05M P U2SCSI",
"XXXXXXXXXXXXXXXX", 80 },
515 {
"HSBP05M S U2SCSI",
"XXXXXXXXXXXXXXXX", 80 },
516 {
"HSBP06E ASU2SCSI",
"XXXXXXXXXXXXXXXX", 80 },
517 {
"2104-DU3 ",
"XXXXXXXXXXXXXXXX", 160 },
518 {
"2104-TU3 ",
"XXXXXXXXXXXXXXXX", 160 },
519 {
"HSBP04C RSU2SCSI",
"XXXXXXX*XXXXXXXX", 160 },
520 {
"HSBP06E RSU2SCSI",
"XXXXXXX*XXXXXXXX", 160 },
521 {
"St V1S2 ",
"XXXXXXXXXXXXXXXX", 160 },
522 {
"HSBPD4M PU3SCSI",
"XXXXXXX*XXXXXXXX", 160 },
523 {
"VSBPD1H U3SCSI",
"XXXXXXX*XXXXXXXX", 160 }
529 static int ipr_reset_alert(
struct ipr_cmnd *);
530 static void ipr_process_ccn(
struct ipr_cmnd *);
531 static void ipr_process_error(
struct ipr_cmnd *);
532 static void ipr_reset_ioa_job(
struct ipr_cmnd *);
533 static void ipr_initiate_ioa_reset(
struct ipr_ioa_cfg *,
536 #ifdef CONFIG_SCSI_IPR_TRACE
565 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
575 static void ipr_lock_and_done(
struct ipr_cmnd *ipr_cmd)
581 ipr_cmd->
done(ipr_cmd);
582 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
592 static void ipr_reinit_ipr_cmnd(
struct ipr_cmnd *ipr_cmd)
608 ioasa64->
u.
gata.status = 0;
613 ioasa->
u.
gata.status = 0;
616 ioasa->
hdr.ioasc = 0;
617 ioasa->
hdr.residual_data_len = 0;
631 static void ipr_init_ipr_cmnd(
struct ipr_cmnd *ipr_cmd,
632 void (*fast_done) (
struct ipr_cmnd *))
634 ipr_reinit_ipr_cmnd(ipr_cmd);
669 struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
670 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
685 static void ipr_mask_and_clear_interrupts(
struct ipr_ioa_cfg *ioa_cfg,
688 volatile u32 int_reg;
695 writeq(~0, ioa_cfg->
regs.set_interrupt_mask_reg);
697 writel(~0, ioa_cfg->
regs.set_interrupt_mask_reg);
702 writel(clr_ints, ioa_cfg->
regs.clr_interrupt_reg32);
703 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg);
713 static int ipr_save_pcix_cmd_reg(
struct ipr_ioa_cfg *ioa_cfg)
717 if (pcix_cmd_reg == 0)
720 if (pci_read_config_word(ioa_cfg->
pdev, pcix_cmd_reg +
PCI_X_CMD,
722 dev_err(&ioa_cfg->
pdev->dev,
"Failed to save PCI-X command register\n");
737 static int ipr_set_pcix_cmd_reg(
struct ipr_ioa_cfg *ioa_cfg)
742 if (pci_write_config_word(ioa_cfg->
pdev, pcix_cmd_reg +
PCI_X_CMD,
744 dev_err(&ioa_cfg->
pdev->dev,
"Failed to setup PCI-X command register\n");
762 static void ipr_sata_eh_done(
struct ipr_cmnd *ipr_cmd)
784 static void ipr_scsi_eh_done(
struct ipr_cmnd *ipr_cmd)
805 static void ipr_fail_all_ops(
struct ipr_ioa_cfg *ioa_cfg)
817 ipr_cmd->
done = ipr_scsi_eh_done;
818 else if (ipr_cmd->
qc)
819 ipr_cmd->
done = ipr_sata_eh_done;
823 ipr_cmd->
done(ipr_cmd);
840 static void ipr_send_command(
struct ipr_cmnd *ipr_cmd)
845 if (ioa_cfg->
sis64) {
847 send_dma_addr |= 0x1;
852 send_dma_addr |= 0x4;
853 writeq(send_dma_addr, ioa_cfg->
regs.ioarrin_reg);
855 writel(send_dma_addr, ioa_cfg->
regs.ioarrin_reg);
871 static void ipr_do_req(
struct ipr_cmnd *ipr_cmd,
873 void (*timeout_func) (
struct ipr_cmnd *),
u32 timeout)
881 ipr_cmd->
timer.data = (
unsigned long) ipr_cmd;
883 ipr_cmd->
timer.function = (
void (*)(
unsigned long))timeout_func;
889 ipr_send_command(ipr_cmd);
902 static void ipr_internal_cmd_done(
struct ipr_cmnd *ipr_cmd)
936 ipr_cmd->
ioarcb.ioadl_len =
944 ipr_cmd->
ioarcb.read_ioadl_len =
948 ipr_cmd->
ioarcb.ioadl_len =
964 static void ipr_send_blocking_cmd(
struct ipr_cmnd *ipr_cmd,
965 void (*timeout_func) (
struct ipr_cmnd *ipr_cmd),
971 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
973 spin_unlock_irq(ioa_cfg->
host->host_lock);
975 spin_lock_irq(ioa_cfg->
host->host_lock);
991 static void ipr_send_hcam(
struct ipr_ioa_cfg *ioa_cfg,
u8 type,
998 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
1003 ioarcb = &ipr_cmd->
ioarcb;
1009 ioarcb->
cmd_pkt.cdb[7] = (
sizeof(hostrcb->
hcam) >> 8) & 0xff;
1010 ioarcb->
cmd_pkt.cdb[8] =
sizeof(hostrcb->
hcam) & 0xff;
1016 ipr_cmd->
done = ipr_process_ccn;
1018 ipr_cmd->
done = ipr_process_error;
1022 ipr_send_command(ipr_cmd);
1077 if (ioa_cfg->
sis64) {
1088 sizeof(res->
dev_lun.scsi_lun));
1123 proto = cfgtew->
u.
cfgte->proto;
1129 res->
type = cfgtew->
u.
cfgte->rsvd_subtype & 0x0f;
1131 res->
bus = cfgtew->
u.
cfgte->res_addr.bus;
1133 res->
lun = cfgtew->
u.
cfgte->res_addr.lun;
1137 ipr_update_ata_class(res, proto);
1153 sizeof(cfgtew->
u.
cfgte64->dev_id)) &&
1159 if (res->
bus == cfgtew->
u.
cfgte->res_addr.bus &&
1161 res->
lun == cfgtew->
u.
cfgte->res_addr.lun)
1182 p +=
snprintf(p, buffer + len - p,
"%02X", res_path[0]);
1183 for (i = 1; res_path[
i] != 0xff && ((i * 3) < len); i++)
1184 p +=
snprintf(p, buffer + len - p,
"-%02X", res_path[i]);
1218 sizeof(res->
dev_lun.scsi_lun));
1227 if (res->
sdev && new_path)
1229 ipr_format_res_path(res->
res_path, buffer,
1236 res->
type = cfgtew->
u.
cfgte->rsvd_subtype & 0x0f;
1242 proto = cfgtew->
u.
cfgte->proto;
1246 ipr_update_ata_class(res, proto);
1263 if (!ioa_cfg->
sis64)
1277 clear_bit(res->target, ioa_cfg->target_ids);
1297 if (ioa_cfg->sis64) {
1298 cfgtew.
u.
cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1299 cc_res_handle = cfgtew.
u.
cfgte64->res_handle;
1301 cfgtew.
u.
cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1302 cc_res_handle = cfgtew.
u.
cfgte->res_handle;
1313 if (list_empty(&ioa_cfg->free_res_q)) {
1314 ipr_send_hcam(ioa_cfg,
1324 ipr_init_res_entry(res, &cfgtew);
1328 ipr_update_res_entry(res, &cfgtew);
1334 if (ioa_cfg->allow_ml_add_del)
1337 ipr_clear_res_target(res);
1338 list_move_tail(&res->
queue, &ioa_cfg->free_res_q);
1342 if (ioa_cfg->allow_ml_add_del)
1359 static void ipr_process_ccn(
struct ipr_cmnd *ipr_cmd)
1371 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1375 ipr_handle_config_change(ioa_cfg, hostrcb);
1390 static int strip_and_pad_whitespace(
int i,
char *
buf)
1392 while (i && buf[i] ==
' ')
1423 ipr_hcam_err(hostrcb,
"%s VPID/SN: %s\n", prefix, buffer);
1442 ipr_err(
"Vendor/Product ID: %s\n", buffer);
1446 ipr_err(
" Serial Number: %s\n", buffer);
1461 ipr_log_vpd_compact(prefix, hostrcb, &vpd->
vpd);
1473 static void ipr_log_ext_vpd(
struct ipr_ext_vpd *vpd)
1475 ipr_log_vpd(&vpd->
vpd);
1488 static void ipr_log_enhanced_cache_error(
struct ipr_ioa_cfg *ioa_cfg,
1494 error = &hostrcb->
hcam.u.error64.u.type_12_error;
1496 error = &hostrcb->
hcam.u.error.u.type_12_error;
1498 ipr_err(
"-----Current Configuration-----\n");
1499 ipr_err(
"Cache Directory Card Information:\n");
1500 ipr_log_ext_vpd(&error->
ioa_vpd);
1501 ipr_err(
"Adapter Card Information:\n");
1502 ipr_log_ext_vpd(&error->
cfc_vpd);
1504 ipr_err(
"-----Expected Configuration-----\n");
1505 ipr_err(
"Cache Directory Card Information:\n");
1507 ipr_err(
"Adapter Card Information:\n");
1510 ipr_err(
"Additional IOA Data: %08X %08X %08X\n",
1524 static void ipr_log_cache_error(
struct ipr_ioa_cfg *ioa_cfg,
1528 &hostrcb->
hcam.u.error.u.type_02_error;
1530 ipr_err(
"-----Current Configuration-----\n");
1531 ipr_err(
"Cache Directory Card Information:\n");
1533 ipr_err(
"Adapter Card Information:\n");
1536 ipr_err(
"-----Expected Configuration-----\n");
1537 ipr_err(
"Cache Directory Card Information:\n");
1539 ipr_err(
"Adapter Card Information:\n");
1542 ipr_err(
"Additional IOA Data: %08X %08X %08X\n",
1556 static void ipr_log_enhanced_config_error(
struct ipr_ioa_cfg *ioa_cfg,
1563 error = &hostrcb->
hcam.u.error.u.type_13_error;
1566 ipr_err(
"Device Errors Detected/Logged: %d/%d\n",
1569 dev_entry = error->
dev;
1575 ipr_log_ext_vpd(&dev_entry->
vpd);
1577 ipr_err(
"-----New Device Information-----\n");
1578 ipr_log_ext_vpd(&dev_entry->
new_vpd);
1580 ipr_err(
"Cache Directory Card Information:\n");
1583 ipr_err(
"Adapter Card Information:\n");
1596 static void ipr_log_sis64_config_error(
struct ipr_ioa_cfg *ioa_cfg,
1604 error = &hostrcb->
hcam.u.error64.u.type_23_error;
1607 ipr_err(
"Device Errors Detected/Logged: %d/%d\n",
1610 dev_entry = error->
dev;
1615 ipr_err(
"Device %d : %s", i + 1,
1616 ipr_format_res_path(dev_entry->
res_path, buffer,
1618 ipr_log_ext_vpd(&dev_entry->
vpd);
1620 ipr_err(
"-----New Device Information-----\n");
1621 ipr_log_ext_vpd(&dev_entry->
new_vpd);
1623 ipr_err(
"Cache Directory Card Information:\n");
1626 ipr_err(
"Adapter Card Information:\n");
1639 static void ipr_log_config_error(
struct ipr_ioa_cfg *ioa_cfg,
1646 error = &hostrcb->
hcam.u.error.u.type_03_error;
1649 ipr_err(
"Device Errors Detected/Logged: %d/%d\n",
1652 dev_entry = error->
dev;
1658 ipr_log_vpd(&dev_entry->
vpd);
1660 ipr_err(
"-----New Device Information-----\n");
1661 ipr_log_vpd(&dev_entry->
new_vpd);
1663 ipr_err(
"Cache Directory Card Information:\n");
1666 ipr_err(
"Adapter Card Information:\n");
1669 ipr_err(
"Additional IOA Data: %08X %08X %08X %08X %08X\n",
1686 static void ipr_log_enhanced_array_error(
struct ipr_ioa_cfg *ioa_cfg,
1694 error = &hostrcb->
hcam.u.error.u.type_14_error;
1698 ipr_err(
"RAID %s Array Configuration: %d:%d:%d:%d\n",
1700 ioa_cfg->
host->host_no,
1711 for (i = 0; i <
num_entries; i++, array_entry++) {
1716 ipr_err(
"Exposed Array Member %d:\n", i);
1718 ipr_err(
"Array Member %d:\n", i);
1720 ipr_log_ext_vpd(&array_entry->
vpd);
1723 "Expected Location");
1737 static void ipr_log_array_error(
struct ipr_ioa_cfg *ioa_cfg,
1745 error = &hostrcb->
hcam.u.error.u.type_04_error;
1749 ipr_err(
"RAID %s Array Configuration: %d:%d:%d:%d\n",
1751 ioa_cfg->
host->host_no,
1760 for (i = 0; i < 18; i++) {
1765 ipr_err(
"Exposed Array Member %d:\n", i);
1767 ipr_err(
"Array Member %d:\n", i);
1769 ipr_log_vpd(&array_entry->
vpd);
1773 "Expected Location");
1803 for (i = 0; i < len / 4; i += 4) {
1804 ipr_err(
"%08X: %08X %08X %08X %08X\n", i*4,
1820 static void ipr_log_enhanced_dual_ioa_error(
struct ipr_ioa_cfg *ioa_cfg,
1826 error = &hostrcb->
hcam.u.error64.u.type_17_error;
1828 error = &hostrcb->
hcam.u.error.u.type_17_error;
1835 ipr_log_ext_vpd_compact(
"Remote IOA", hostrcb, &error->
vpd);
1836 ipr_log_hex_data(ioa_cfg, error->
data,
1850 static void ipr_log_dual_ioa_error(
struct ipr_ioa_cfg *ioa_cfg,
1855 error = &hostrcb->
hcam.u.error.u.type_07_error;
1861 ipr_log_vpd_compact(
"Remote IOA", hostrcb, &error->
vpd);
1862 ipr_log_hex_data(ioa_cfg, error->
data,
1868 static const struct {
1871 } path_active_desc[] = {
1877 static const struct {
1880 } path_state_desc[] = {
1895 static void ipr_log_fabric_path(
struct ipr_hostrcb *hostrcb,
1903 for (i = 0; i <
ARRAY_SIZE(path_active_desc); i++) {
1904 if (path_active_desc[i].active != active)
1907 for (j = 0; j <
ARRAY_SIZE(path_state_desc); j++) {
1908 if (path_state_desc[j].state != state)
1913 path_active_desc[i].
desc, path_state_desc[j].desc,
1917 path_active_desc[i].
desc, path_state_desc[j].desc,
1919 }
else if (fabric->
phy == 0xff) {
1920 ipr_hcam_err(hostrcb,
"%s %s: IOA Port=%d, Cascade=%d\n",
1921 path_active_desc[i].
desc, path_state_desc[j].desc,
1924 ipr_hcam_err(hostrcb,
"%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1925 path_active_desc[i].
desc, path_state_desc[j].desc,
1932 ipr_err(
"Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1944 static void ipr_log64_fabric_path(
struct ipr_hostrcb *hostrcb,
1953 for (i = 0; i <
ARRAY_SIZE(path_active_desc); i++) {
1954 if (path_active_desc[i].active != active)
1957 for (j = 0; j <
ARRAY_SIZE(path_state_desc); j++) {
1958 if (path_state_desc[j].state != state)
1962 path_active_desc[i].
desc, path_state_desc[j].desc,
1963 ipr_format_res_path(fabric->
res_path, buffer,
1969 ipr_err(
"Path state=%02X Resource Path=%s\n", path_state,
1970 ipr_format_res_path(fabric->
res_path, buffer,
sizeof(buffer)));
1973 static const struct {
1976 } path_type_desc[] = {
1983 static const struct {
1986 } path_status_desc[] = {
1998 "phy reset problem",
2022 static void ipr_log_path_elem(
struct ipr_hostrcb *hostrcb,
2032 for (i = 0; i <
ARRAY_SIZE(path_type_desc); i++) {
2033 if (path_type_desc[i].type != type)
2036 for (j = 0; j <
ARRAY_SIZE(path_status_desc); j++) {
2037 if (path_status_desc[j].status != status)
2041 ipr_hcam_err(hostrcb,
"%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2042 path_status_desc[j].
desc, path_type_desc[i].desc,
2047 ipr_hcam_err(hostrcb,
"%s %s: Link rate=%s, WWN=%08X%08X\n",
2048 path_status_desc[j].desc, path_type_desc[i].desc,
2049 link_rate[cfg->
link_rate & IPR_PHY_LINK_RATE_MASK],
2053 "WWN=%08X%08X\n", path_status_desc[j].desc,
2054 path_type_desc[i].desc, cfg->
phy,
2055 link_rate[cfg->
link_rate & IPR_PHY_LINK_RATE_MASK],
2057 }
else if (cfg->
phy == 0xff) {
2058 ipr_hcam_err(hostrcb,
"%s %s: Cascade=%d, Link rate=%s, "
2059 "WWN=%08X%08X\n", path_status_desc[j].desc,
2061 link_rate[cfg->
link_rate & IPR_PHY_LINK_RATE_MASK],
2064 ipr_hcam_err(hostrcb,
"%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2065 "WWN=%08X%08X\n", path_status_desc[j].desc,
2067 link_rate[cfg->
link_rate & IPR_PHY_LINK_RATE_MASK],
2075 ipr_hcam_err(hostrcb,
"Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2077 link_rate[cfg->
link_rate & IPR_PHY_LINK_RATE_MASK],
2089 static void ipr_log64_path_elem(
struct ipr_hostrcb *hostrcb,
2101 for (i = 0; i <
ARRAY_SIZE(path_type_desc); i++) {
2102 if (path_type_desc[i].type != type)
2105 for (j = 0; j <
ARRAY_SIZE(path_status_desc); j++) {
2106 if (path_status_desc[j].status != status)
2109 ipr_hcam_err(hostrcb,
"%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2110 path_status_desc[j].desc, path_type_desc[i].desc,
2111 ipr_format_res_path(cfg->
res_path, buffer,
2113 link_rate[cfg->
link_rate & IPR_PHY_LINK_RATE_MASK],
2118 ipr_hcam_err(hostrcb,
"Path element=%02X: Resource Path=%s, Link rate=%s "
2120 ipr_format_res_path(cfg->
res_path, buffer,
sizeof(buffer)),
2121 link_rate[cfg->
link_rate & IPR_PHY_LINK_RATE_MASK],
2133 static void ipr_log_fabric_error(
struct ipr_ioa_cfg *ioa_cfg,
2141 error = &hostrcb->
hcam.u.error.u.type_20_error;
2149 for (i = 0, fabric = error->
desc; i < error->num_entries; i++) {
2150 ipr_log_fabric_path(hostrcb, fabric);
2152 ipr_log_path_elem(hostrcb, cfg);
2156 ((
unsigned long)fabric +
be16_to_cpu(fabric->length));
2159 ipr_log_hex_data(ioa_cfg, (
u32 *)fabric, add_len);
2179 error = &hostrcb->hcam.u.error64.u.type_24_error;
2183 ipr_err(
"RAID %s Array Configuration: %s\n",
2185 ipr_format_res_path(error->
last_res_path, buffer,
sizeof(buffer)));
2193 for (i = 0; i <
num_entries; i++, array_entry++) {
2199 ipr_err(
"Exposed Array Member %d:\n", i);
2201 ipr_err(
"Array Member %d:\n", i);
2203 ipr_err(
"Array Member %d:\n", i);
2204 ipr_log_ext_vpd(&array_entry->
vpd);
2205 ipr_err(
"Current Location: %s\n",
2206 ipr_format_res_path(array_entry->
res_path, buffer,
2208 ipr_err(
"Expected Location: %s\n",
2210 buffer,
sizeof(buffer)));
2224 static void ipr_log_sis64_fabric_error(
struct ipr_ioa_cfg *ioa_cfg,
2232 error = &hostrcb->
hcam.u.error64.u.type_30_error;
2241 for (i = 0, fabric = error->
desc; i < error->num_entries; i++) {
2242 ipr_log64_fabric_path(hostrcb, fabric);
2244 ipr_log64_path_elem(hostrcb, cfg);
2248 ((
unsigned long)fabric +
be16_to_cpu(fabric->length));
2251 ipr_log_hex_data(ioa_cfg, (
u32 *)fabric, add_len);
2265 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2280 static u32 ipr_get_error(
u32 ioasc)
2284 for (i = 0; i <
ARRAY_SIZE(ipr_error_table); i++)
2301 static void ipr_handle_log_data(
struct ipr_ioa_cfg *ioa_cfg,
2311 dev_err(&ioa_cfg->
pdev->dev,
"Error notifications lost\n");
2322 hostrcb->
hcam.u.error.fd_res_addr.bus);
2325 error_index = ipr_get_error(ioasc);
2327 if (!ipr_error_table[error_index].log_hcam)
2330 ipr_hcam_err(hostrcb,
"%s\n", ipr_error_table[error_index].error);
2340 switch (hostrcb->
hcam.overlay_id) {
2342 ipr_log_cache_error(ioa_cfg, hostrcb);
2345 ipr_log_config_error(ioa_cfg, hostrcb);
2349 ipr_log_array_error(ioa_cfg, hostrcb);
2352 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2355 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2358 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2362 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2365 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2368 ipr_log_fabric_error(ioa_cfg, hostrcb);
2371 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2375 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2378 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2383 ipr_log_generic_error(ioa_cfg, hostrcb);
2399 static void ipr_process_error(
struct ipr_cmnd *ipr_cmd)
2415 ipr_handle_log_data(ioa_cfg, hostrcb);
2420 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2436 static void ipr_timeout(
struct ipr_cmnd *ipr_cmd)
2446 "Adapter being reset due to command timeout.\n");
2454 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
2468 static void ipr_oper_timeout(
struct ipr_cmnd *ipr_cmd)
2470 unsigned long lock_flags = 0;
2478 "Adapter timed out transitioning to operational.\n");
2489 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
2503 static int ipr_reset_reload(
struct ipr_ioa_cfg *ioa_cfg,
2507 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2509 spin_unlock_irq(ioa_cfg->
host->host_lock);
2511 spin_lock_irq(ioa_cfg->
host->host_lock);
2537 for (i = 0; i <
ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2549 if (matches == IPR_PROD_ID_LEN)
2579 if (bus != res->
bus)
2582 if (!(ste = ipr_find_ses_entry(res)))
2601 static int ipr_wait_iodbg_ack(
struct ipr_ioa_cfg *ioa_cfg,
int max_delay)
2603 volatile u32 pcii_reg;
2607 while (delay < max_delay) {
2608 pcii_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg);
2634 static int ipr_get_sis64_dump_data_section(
struct ipr_ioa_cfg *ioa_cfg,
2640 for (i = 0; i < length_in_words; i++) {
2641 writel(start_addr+(i*4), ioa_cfg->
regs.dump_addr_reg);
2659 static int ipr_get_ldump_data_section(
struct ipr_ioa_cfg *ioa_cfg,
2663 volatile u32 temp_pcii_reg;
2667 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2668 dest, length_in_words);
2672 ioa_cfg->
regs.set_uproc_interrupt_reg32);
2675 if (ipr_wait_iodbg_ack(ioa_cfg,
2678 "IOA dump long data transfer timeout\n");
2683 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2684 ioa_cfg->
regs.clr_interrupt_reg);
2691 ioa_cfg->
regs.clr_uproc_interrupt_reg32);
2693 for (i = 0; i < length_in_words; i++) {
2695 if (ipr_wait_iodbg_ack(ioa_cfg,
2698 "IOA dump short data transfer timeout\n");
2707 if (i < (length_in_words - 1)) {
2709 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2710 ioa_cfg->
regs.clr_interrupt_reg);
2716 ioa_cfg->
regs.set_uproc_interrupt_reg32);
2719 ioa_cfg->
regs.clr_uproc_interrupt_reg32);
2722 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2723 ioa_cfg->
regs.clr_interrupt_reg);
2728 readl(ioa_cfg->
regs.sense_uproc_interrupt_reg32);
2740 #ifdef CONFIG_SCSI_IPR_DUMP
2752 static int ipr_sdt_copy(
struct ipr_ioa_cfg *ioa_cfg,
2753 unsigned long pci_address,
u32 length)
2755 int bytes_copied = 0;
2756 int cur_len,
rc, rem_len, rem_page_len, max_dump_size;
2758 unsigned long lock_flags = 0;
2766 while (bytes_copied < length &&
2767 (ioa_dump->
hdr.len + bytes_copied) < max_dump_size) {
2774 return bytes_copied;
2783 rem_len = length - bytes_copied;
2785 cur_len =
min(rem_len, rem_page_len);
2791 rc = ipr_get_ldump_data_section(ioa_cfg,
2792 pci_address + bytes_copied,
2794 (cur_len /
sizeof(
u32)));
2796 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
2800 bytes_copied += cur_len;
2808 return bytes_copied;
2822 hdr->
offset =
sizeof(*hdr);
2834 static void ipr_dump_ioa_type_data(
struct ipr_ioa_cfg *ioa_cfg,
2849 driver_dump->
hdr.num_entries++;
2860 static void ipr_dump_version_data(
struct ipr_ioa_cfg *ioa_cfg,
2870 driver_dump->
hdr.num_entries++;
2881 static void ipr_dump_trace_data(
struct ipr_ioa_cfg *ioa_cfg,
2884 ipr_init_dump_entry_hdr(&driver_dump->
trace_entry.hdr);
2891 driver_dump->
hdr.num_entries++;
2902 static void ipr_dump_location_data(
struct ipr_ioa_cfg *ioa_cfg,
2912 driver_dump->
hdr.num_entries++;
2926 unsigned long lock_flags = 0;
2930 u32 max_dump_size, bytes_to_copy, bytes_copied,
rc;
2940 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
2944 if (ioa_cfg->
sis64) {
2945 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
2952 if (!ioa_cfg->
sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2954 "Invalid dump table format: %lx\n", start_addr);
2955 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
2959 dev_err(&ioa_cfg->
pdev->dev,
"Dump of IOA initiated\n");
2965 driver_dump->
hdr.num_entries = 1;
2971 ipr_dump_version_data(ioa_cfg, driver_dump);
2972 ipr_dump_location_data(ioa_cfg, driver_dump);
2973 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2974 ipr_dump_trace_data(ioa_cfg, driver_dump);
2980 ipr_init_dump_entry_hdr(&ioa_dump->
hdr);
2981 ioa_dump->
hdr.len = 0;
2989 sdt = &ioa_dump->
sdt;
2991 if (ioa_cfg->
sis64) {
3001 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (
__be32 *)sdt,
3002 bytes_to_copy /
sizeof(
__be32));
3008 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3012 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3018 if (num_entries > max_num_entries)
3019 num_entries = max_num_entries;
3028 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3031 if (ioa_dump->
hdr.len > max_dump_size) {
3044 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3045 bytes_to_copy = end_off - start_off;
3050 if (bytes_to_copy > max_dump_size) {
3051 sdt->
entry[
i].flags &= ~IPR_SDT_VALID_ENTRY;
3056 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3059 ioa_dump->
hdr.len += bytes_copied;
3061 if (bytes_copied != bytes_to_copy) {
3069 dev_err(&ioa_cfg->
pdev->dev,
"Dump of IOA completed.\n");
3072 driver_dump->
hdr.len += ioa_dump->
hdr.len;
3079 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
3089 static void ipr_release_dump(
struct kref *
kref)
3093 unsigned long lock_flags = 0;
3100 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3102 for (i = 0; i < dump->
ioa_dump.next_page_index; i++)
3136 dump = ioa_cfg->
dump;
3138 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3141 kref_get(&dump->
kref);
3142 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3144 kref_put(&dump->
kref, ipr_release_dump);
3149 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3157 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3170 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3186 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3193 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3198 #ifdef CONFIG_SCSI_IPR_TRACE
3213 char *buf, loff_t off,
size_t count)
3218 unsigned long lock_flags = 0;
3224 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3235 .read = ipr_read_trace,
3253 unsigned long lock_flags = 0;
3261 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3267 .name =
"fw_version",
3270 .show = ipr_show_fw_version,
3286 unsigned long lock_flags = 0;
3291 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3305 const char *buf,
size_t count)
3309 unsigned long lock_flags = 0;
3313 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3319 .name =
"log_level",
3322 .show = ipr_show_log_level,
3323 .store = ipr_store_log_level
3340 const char *buf,
size_t count)
3344 unsigned long lock_flags = 0;
3352 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3361 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3367 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3374 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3381 .name =
"run_diagnostics",
3384 .store = ipr_store_diagnostics
3400 unsigned long lock_flags = 0;
3408 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3425 const char *buf,
size_t count)
3429 unsigned long lock_flags;
3442 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3450 .name =
"online_state",
3453 .show = ipr_show_adapter_state,
3454 .store = ipr_store_adapter_state
3470 const char *buf,
size_t count)
3474 unsigned long lock_flags;
3483 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3491 .name =
"reset_host",
3494 .store = ipr_store_reset_adapter
3507 static struct ipr_sglist *ipr_alloc_ucode_buffer(
int buf_len)
3509 int sg_size,
order, bsize_elem, num_elem,
i,
j;
3524 if (buf_len % bsize_elem)
3525 num_elem = (buf_len / bsize_elem) + 1;
3527 num_elem = buf_len / bsize_elem;
3531 (
sizeof(
struct scatterlist) * (num_elem - 1)),
3534 if (sglist ==
NULL) {
3543 sglist->
num_sg = num_elem;
3546 for (i = 0; i < num_elem; i++) {
3552 for (j = i - 1; j >= 0; j--)
3558 sg_set_page(&scatterlist[i], page, 0, 0);
3574 static void ipr_free_ucode_buffer(
struct ipr_sglist *sglist)
3578 for (i = 0; i < sglist->
num_sg; i++)
3596 static int ipr_copy_ucode_buffer(
struct ipr_sglist *sglist,
3597 u8 *buffer,
u32 len)
3599 int bsize_elem,
i,
result = 0;
3600 struct scatterlist *scatterlist;
3608 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3609 struct page *page = sg_page(&scatterlist[i]);
3612 memcpy(kaddr, buffer, bsize_elem);
3615 scatterlist[
i].length = bsize_elem;
3623 if (len % bsize_elem) {
3624 struct page *page = sg_page(&scatterlist[i]);
3627 memcpy(kaddr, buffer, len % bsize_elem);
3630 scatterlist[
i].length = len % bsize_elem;
3645 static void ipr_build_ucode_ioadl64(
struct ipr_cmnd *ipr_cmd,
3650 struct scatterlist *scatterlist = sglist->
scatterlist;
3676 static void ipr_build_ucode_ioadl(
struct ipr_cmnd *ipr_cmd,
3681 struct scatterlist *scatterlist = sglist->
scatterlist;
3712 static int ipr_update_ioa_ucode(
struct ipr_ioa_cfg *ioa_cfg,
3719 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3725 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3727 "Microcode download already in progress\n");
3735 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3737 "Failed to map microcode download buffer!\n");
3743 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3748 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3765 const char *buf,
size_t count)
3774 int len, result, dnld_size;
3780 fname[len-1] =
'\0';
3791 sglist = ipr_alloc_ucode_buffer(dnld_size);
3794 dev_err(&ioa_cfg->
pdev->dev,
"Microcode buffer allocation failed\n");
3799 result = ipr_copy_ucode_buffer(sglist,
src, dnld_size);
3803 "Microcode buffer copy to DMA buffer failed\n");
3807 ipr_info(
"Updating microcode, please be patient. This may take up to 30 minutes.\n");
3809 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3814 ipr_free_ucode_buffer(sglist);
3821 .name =
"update_fw",
3824 .store = ipr_store_update_fw
3840 unsigned long lock_flags = 0;
3845 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3854 .show = ipr_show_fw_type
3858 &ipr_fw_version_attr,
3859 &ipr_log_level_attr,
3860 &ipr_diagnostics_attr,
3861 &ipr_ioa_state_attr,
3862 &ipr_ioa_reset_attr,
3863 &ipr_update_fw_attr,
3864 &ipr_ioa_fw_type_attr,
3868 #ifdef CONFIG_SCSI_IPR_DUMP
3883 char *buf, loff_t off,
size_t count)
3889 unsigned long lock_flags = 0;
3898 dump = ioa_cfg->
dump;
3901 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3904 kref_get(&dump->
kref);
3905 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
3908 kref_put(&dump->
kref, ipr_release_dump);
3939 if (count && off < sdt_end) {
3940 if (off + count > sdt_end)
3941 len = sdt_end - off;
3959 src += off & ~PAGE_MASK;
3966 kref_put(&dump->
kref, ipr_release_dump);
3977 static int ipr_alloc_dump(
struct ipr_ioa_cfg *ioa_cfg)
3981 unsigned long lock_flags = 0;
3986 ipr_err(
"Dump memory allocation failed\n");
3996 ipr_err(
"Dump memory allocation failed\n");
4003 kref_init(&dump->
kref);
4009 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4021 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4033 static int ipr_free_dump(
struct ipr_ioa_cfg *ioa_cfg)
4036 unsigned long lock_flags = 0;
4041 dump = ioa_cfg->
dump;
4043 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4048 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4050 kref_put(&dump->
kref, ipr_release_dump);
4070 char *buf, loff_t off,
size_t count)
4081 rc = ipr_alloc_dump(ioa_cfg);
4082 else if (buf[0] ==
'0')
4083 rc = ipr_free_dump(ioa_cfg);
4099 .read = ipr_read_dump,
4100 .write = ipr_write_dump
4103 static int ipr_free_dump(
struct ipr_ioa_cfg *ioa_cfg) {
return 0; };
4115 static int ipr_change_queue_depth(
struct scsi_device *sdev,
int qdepth,
4120 unsigned long lock_flags = 0;
4130 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4148 unsigned long lock_flags = 0;
4159 scsi_set_tag_type(sdev, tag_type);
4170 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4188 unsigned long lock_flags = 0;
4195 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4201 .name =
"adapter_handle",
4204 .show = ipr_show_adapter_handle
4222 unsigned long lock_flags = 0;
4230 ipr_format_res_path(res->
res_path, buffer,
4236 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4242 .name =
"resource_path",
4245 .show = ipr_show_resource_path
4262 unsigned long lock_flags = 0;
4272 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4278 .name =
"device_id",
4281 .show = ipr_show_device_id
4298 unsigned long lock_flags = 0;
4307 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4313 .name =
"resource_type",
4316 .show = ipr_show_resource_type
4320 &ipr_adapter_handle_attr,
4321 &ipr_resource_path_attr,
4322 &ipr_device_id_attr,
4323 &ipr_resource_type_attr,
4341 static int ipr_biosparam(
struct scsi_device *sdev,
4397 static int ipr_target_alloc(
struct scsi_target *starget)
4404 unsigned long lock_flags;
4407 res = ipr_find_starget(starget);
4410 if (res && ipr_is_gata(res)) {
4411 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4412 sata_port = kzalloc(
sizeof(*sata_port),
GFP_KERNEL);
4419 sata_port->ioa_cfg = ioa_cfg;
4421 sata_port->res =
res;
4424 ap->private_data = sata_port;
4431 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4444 static void ipr_target_destroy(
struct scsi_target *starget)
4451 if (!ipr_find_starget(starget)) {
4456 else if (starget->
channel == 0)
4497 static void ipr_slave_destroy(
struct scsi_device *sdev)
4501 unsigned long lock_flags = 0;
4514 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4526 static int ipr_slave_configure(
struct scsi_device *sdev)
4531 unsigned long lock_flags = 0;
4537 if (ipr_is_af_dasd_device(res))
4539 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4543 if (ipr_is_vset_device(res)) {
4550 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4559 ipr_format_res_path(res->
res_path, buffer,
4563 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4577 static int ipr_ata_slave_alloc(
struct scsi_device *sdev)
4592 ipr_slave_destroy(sdev);
4610 static int ipr_slave_alloc(
struct scsi_device *sdev)
4614 unsigned long lock_flags;
4621 res = ipr_find_sdev(sdev);
4627 if (!ipr_is_naca_model(res))
4630 if (ipr_is_gata(res)) {
4631 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4632 return ipr_ata_slave_alloc(sdev);
4636 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4648 static int __ipr_eh_host_reset(
struct scsi_cmnd *scsi_cmd)
4658 "Adapter being reset as a result of error recovery.\n");
4674 spin_lock_irq(cmd->
device->host->host_lock);
4675 rc = __ipr_eh_host_reset(cmd);
4676 spin_unlock_irq(cmd->
device->host->host_lock);
4695 static int ipr_device_reset(
struct ipr_ioa_cfg *ioa_cfg,
4705 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4706 ioarcb = &ipr_cmd->
ioarcb;
4709 if (ipr_cmd->
ioa_cfg->sis64) {
4718 if (ipr_is_gata(res)) {
4750 static int ipr_sata_reset(
struct ata_link *
link,
unsigned int *classes,
4751 unsigned long deadline)
4756 unsigned long lock_flags = 0;
4762 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4767 res = sata_port->
res;
4769 rc = ipr_device_reset(ioa_cfg, res);
4773 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4789 static int __ipr_eh_dev_reset(
struct scsi_cmnd *scsi_cmd)
4799 res = scsi_cmd->
device->hostdata;
4817 ipr_cmd->
done = ipr_scsi_eh_done;
4819 ipr_cmd->
done = ipr_sata_eh_done;
4830 if (ipr_is_gata(res) && res->
sata_port) {
4832 spin_unlock_irq(scsi_cmd->
device->host->host_lock);
4834 spin_lock_irq(scsi_cmd->
device->host->host_lock);
4843 rc = ipr_device_reset(ioa_cfg, res);
4854 spin_lock_irq(cmd->
device->host->host_lock);
4855 rc = __ipr_eh_dev_reset(cmd);
4856 spin_unlock_irq(cmd->
device->host->host_lock);
4870 static void ipr_bus_reset_done(
struct ipr_cmnd *ipr_cmd)
4876 if (!ioa_cfg->
sis64)
4888 if (ipr_cmd->
sibling->sibling)
4908 static void ipr_abort_timeout(
struct ipr_cmnd *ipr_cmd)
4913 unsigned long lock_flags = 0;
4918 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4923 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4926 reset_cmd->
ioarcb.res_handle = ipr_cmd->
ioarcb.res_handle;
4927 cmd_pkt = &reset_cmd->
ioarcb.cmd_pkt;
4933 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
4946 static int ipr_cancel_op(
struct scsi_cmnd *scsi_cmd)
4957 res = scsi_cmd->
device->hostdata;
4973 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg);
4975 if (!ipr_is_gscsi(res))
4979 if (ipr_cmd->
scsi_cmd == scsi_cmd) {
4980 ipr_cmd->
done = ipr_scsi_eh_done;
4989 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4991 cmd_pkt = &ipr_cmd->
ioarcb.cmd_pkt;
5011 if (!ipr_is_naca_model(res))
5025 static int ipr_eh_abort(
struct scsi_cmnd *scsi_cmd)
5027 unsigned long flags;
5033 rc = ipr_cancel_op(scsi_cmd);
5034 spin_unlock_irqrestore(scsi_cmd->
device->host->host_lock, flags);
5054 int_mask_reg =
readl(ioa_cfg->
regs.sense_interrupt_mask_reg32);
5055 int_reg &= ~int_mask_reg;
5061 if (ioa_cfg->
sis64) {
5062 int_mask_reg =
readl(ioa_cfg->
regs.sense_interrupt_mask_reg);
5063 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg) & ~int_mask_reg;
5067 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->
regs.clr_interrupt_reg);
5068 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg) & ~int_mask_reg;
5081 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->
regs.set_interrupt_mask_reg);
5084 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->
regs.clr_interrupt_reg);
5085 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg);
5092 if (ipr_debug && printk_ratelimit())
5094 "Spurious interrupt detected. 0x%08X\n", int_reg);
5095 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->
regs.clr_interrupt_reg32);
5096 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg32);
5104 "Permanent IOA failure. 0x%08X\n", int_reg);
5109 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5146 unsigned long lock_flags = 0;
5160 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
5174 ipr_isr_eh(ioa_cfg,
"Invalid response handle from IOA");
5185 list_move_tail(&ipr_cmd->
queue, &doneq);
5200 if (ipr_cmd !=
NULL) {
5204 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->
regs.clr_interrupt_reg32);
5205 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg32);
5206 }
while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5209 }
else if (rc ==
IRQ_NONE && irq_none == 0) {
5210 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg32);
5213 int_reg & IPR_PCII_HRRQ_UPDATED) {
5214 ipr_isr_eh(ioa_cfg,
"Error clearing HRRQ");
5222 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5225 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
5243 static int ipr_build_ioadl64(
struct ipr_ioa_cfg *ioa_cfg,
5247 struct scatterlist *
sg;
5249 u32 ioadl_flags = 0;
5254 length = scsi_bufflen(scsi_cmd);
5260 if (printk_ratelimit())
5261 dev_err(&ioa_cfg->
pdev->dev,
"pci_map_sg failed!\n");
5295 static int ipr_build_ioadl(
struct ipr_ioa_cfg *ioa_cfg,
5299 struct scatterlist *
sg;
5301 u32 ioadl_flags = 0;
5306 length = scsi_bufflen(scsi_cmd);
5312 dev_err(&ioa_cfg->
pdev->dev,
"pci_map_sg failed!\n");
5355 static u8 ipr_get_task_attributes(
struct scsi_cmnd *scsi_cmd)
5360 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5387 static void ipr_erp_done(
struct ipr_cmnd *ipr_cmd)
5397 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5404 if (!ipr_is_naca_model(res))
5420 static void ipr_reinit_ipr_cmnd_for_erp(
struct ipr_cmnd *ipr_cmd)
5431 ioasa->
hdr.ioasc = 0;
5432 ioasa->
hdr.residual_data_len = 0;
5454 static void ipr_erp_request_sense(
struct ipr_cmnd *ipr_cmd)
5460 ipr_erp_done(ipr_cmd);
5464 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5476 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5492 static void ipr_erp_cancel_all(
struct ipr_cmnd *ipr_cmd)
5500 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5502 if (!scsi_get_tag_type(scsi_cmd->
device)) {
5503 ipr_erp_request_sense(ipr_cmd);
5507 cmd_pkt = &ipr_cmd->
ioarcb.cmd_pkt;
5511 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5528 static void ipr_dump_ioasa(
struct ipr_ioa_cfg *ioa_cfg,
5548 error_index = ipr_get_error(fd_ioasc);
5550 error_index = ipr_get_error(ioasc);
5554 if (ioasa->
hdr.ilid != 0)
5557 if (!ipr_is_gscsi(res))
5560 if (ipr_error_table[error_index].log_ioasa == 0)
5564 ipr_res_err(ioa_cfg, res,
"%s\n", ipr_error_table[error_index].error);
5574 for (i = 0; i < data_len / 4; i += 4) {
5575 ipr_err(
"%08X: %08X %08X %08X %08X\n", i*4,
5591 static void ipr_gen_sense(
struct ipr_cmnd *ipr_cmd)
5594 u8 *sense_buf = ipr_cmd->
scsi_cmd->sense_buffer;
5606 if (ipr_is_vset_device(res) &&
5608 ioasa->
u.
vset.failing_lba_hi != 0) {
5609 sense_buf[0] = 0x72;
5616 sense_buf[9] = 0x0A;
5617 sense_buf[10] = 0x80;
5621 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5622 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5623 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5624 sense_buf[15] = failing_lba & 0x000000ff;
5628 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5629 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5630 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5631 sense_buf[19] = failing_lba & 0x000000ff;
5633 sense_buf[0] = 0x70;
5645 sense_buf[15] = 0xC0;
5647 sense_buf[15] = 0x80;
5657 if (ipr_is_vset_device(res))
5662 sense_buf[0] |= 0x80;
5663 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5664 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5665 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5666 sense_buf[6] = failing_lba & 0x000000ff;
5684 static int ipr_get_autosense(
struct ipr_cmnd *ipr_cmd)
5714 static void ipr_erp_start(
struct ipr_ioa_cfg *ioa_cfg,
5723 ipr_scsi_eh_done(ipr_cmd);
5728 ipr_gen_sense(ipr_cmd);
5730 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5732 switch (masked_ioasc) {
5734 if (ipr_is_naca_model(res))
5745 if (!ipr_is_naca_model(res))
5766 if (!ipr_is_naca_model(res))
5772 if (!ipr_get_autosense(ipr_cmd)) {
5773 if (!ipr_is_naca_model(res)) {
5774 ipr_erp_cancel_all(ipr_cmd);
5779 if (!ipr_is_naca_model(res))
5787 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5807 static void ipr_scsi_done(
struct ipr_cmnd *ipr_cmd)
5822 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
5825 ipr_erp_start(ioa_cfg, ipr_cmd);
5826 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
5842 static int ipr_queuecommand(
struct Scsi_Host *shost,
5856 res = scsi_cmd->
device->hostdata;
5864 spin_unlock_irqrestore(shost->
host_lock, lock_flags);
5873 spin_unlock_irqrestore(shost->
host_lock, lock_flags);
5877 if (ipr_is_gata(res) && res->
sata_port) {
5879 spin_unlock_irqrestore(shost->
host_lock, lock_flags);
5883 ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
5884 spin_unlock_irqrestore(shost->
host_lock, lock_flags);
5886 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
5887 ioarcb = &ipr_cmd->
ioarcb;
5891 ipr_cmd->
done = ipr_scsi_eh_done;
5893 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5898 if (ipr_is_gscsi(res))
5901 ioarcb->
cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5904 if (scsi_cmd->
cmnd[0] >= 0xC0 &&
5909 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5911 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5916 spin_unlock_irqrestore(shost->
host_lock, lock_flags);
5924 spin_unlock_irqrestore(shost->
host_lock, lock_flags);
5936 ipr_send_command(ipr_cmd);
5937 spin_unlock_irqrestore(shost->
host_lock, lock_flags);
5945 spin_unlock_irqrestore(shost->
host_lock, lock_flags);
5963 if (res && ipr_is_gata(res)) {
5981 static char buffer[512];
5983 unsigned long lock_flags = 0;
5988 sprintf(buffer,
"IBM %X Storage Adapter", ioa_cfg->
type);
5989 spin_unlock_irqrestore(host->
host_lock, lock_flags);
5997 .info = ipr_ioa_info,
5999 .queuecommand = ipr_queuecommand,
6000 .eh_abort_handler = ipr_eh_abort,
6001 .eh_device_reset_handler = ipr_eh_dev_reset,
6002 .eh_host_reset_handler = ipr_eh_host_reset,
6003 .slave_alloc = ipr_slave_alloc,
6004 .slave_configure = ipr_slave_configure,
6005 .slave_destroy = ipr_slave_destroy,
6006 .target_alloc = ipr_target_alloc,
6007 .target_destroy = ipr_target_destroy,
6008 .change_queue_depth = ipr_change_queue_depth,
6009 .change_queue_type = ipr_change_queue_type,
6010 .bios_param = ipr_biosparam,
6017 .shost_attrs = ipr_ioa_attrs,
6018 .sdev_attrs = ipr_dev_attrs,
6027 static void ipr_ata_phy_reset(
struct ata_port *ap)
6029 unsigned long flags;
6038 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, flags);
6046 rc = ipr_device_reset(ioa_cfg, res);
6058 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, flags);
6074 unsigned long flags;
6078 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, flags);
6084 if (ipr_cmd->
qc == qc) {
6085 ipr_device_reset(ioa_cfg, sata_port->
res);
6089 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, flags);
6128 static void ipr_sata_done(
struct ipr_cmnd *ipr_cmd)
6142 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6161 static void ipr_build_ata_ioadl64(
struct ipr_cmnd *ipr_cmd,
6164 u32 ioadl_flags = 0;
6169 struct scatterlist *
sg;
6197 if (
likely(last_ioadl64))
6207 static void ipr_build_ata_ioadl(
struct ipr_cmnd *ipr_cmd,
6210 u32 ioadl_flags = 0;
6215 struct scatterlist *
sg;
6266 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6267 ioarcb = &ipr_cmd->
ioarcb;
6269 if (ioa_cfg->
sis64) {
6275 memset(regs, 0,
sizeof(*regs));
6280 ipr_cmd->
done = ipr_sata_done;
6288 ipr_build_ata_ioadl64(ipr_cmd, qc);
6290 ipr_build_ata_ioadl(ipr_cmd, qc);
6293 ipr_copy_sata_tf(regs, &qc->
tf);
6297 switch (qc->
tf.protocol) {
6321 ipr_send_command(ipr_cmd);
6356 .phy_reset = ipr_ata_phy_reset,
6357 .hardreset = ipr_sata_reset,
6358 .post_internal_cmd = ipr_ata_post_internal,
6360 .qc_issue = ipr_qc_issue,
6361 .qc_fill_rtf = ipr_qc_fill_rtf,
6371 .port_ops = &ipr_sata_ops
6374 #ifdef CONFIG_PPC_PSERIES
6375 static const u16 ipr_blocked_processors[] = {
6401 if ((ioa_cfg->
type == 0x5702) && (ioa_cfg->
pdev->revision < 4)) {
6402 for (i = 0; i <
ARRAY_SIZE(ipr_blocked_processors); i++) {
6403 if (pvr_version_is(ipr_blocked_processors[i]))
6410 #define ipr_invalid_adapter(ioa_cfg) 0
6423 static int ipr_ioa_bringdown_done(
struct ipr_cmnd *ipr_cmd)
6433 spin_unlock_irq(ioa_cfg->
host->host_lock);
6435 spin_lock_irq(ioa_cfg->
host->host_lock);
6452 static int ipr_ioa_reset_done(
struct ipr_cmnd *ipr_cmd)
6488 spin_unlock(ioa_cfg->
host->host_lock);
6490 spin_lock(ioa_cfg->
host->host_lock);
6527 static int ipr_set_supported_devs(
struct ipr_cmnd *ipr_cmd)
6534 ipr_cmd->
job_step = ipr_ioa_reset_done;
6537 if (!ipr_is_scsi_disk(res))
6541 ipr_set_sup_dev_dflt(supp_dev, &res->
std_inq_data.vpids);
6552 ipr_init_ioadl(ipr_cmd,
6558 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6561 if (!ioa_cfg->
sis64)
6562 ipr_cmd->
job_step = ipr_set_supported_devs;
6578 static void *ipr_get_mode_page(
struct ipr_mode_pages *mode_pages,
6585 if (!mode_pages || (mode_pages->
hdr.length == 0))
6588 length = (mode_pages->
hdr.length + 1) - 4 - mode_pages->
hdr.block_desc_len;
6590 (mode_pages->
data + mode_pages->
hdr.block_desc_len);
6599 mode_hdr->page_length);
6618 static void ipr_check_term_power(
struct ipr_ioa_cfg *ioa_cfg,
6626 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6631 bus = mode_page->
bus;
6636 "Term power is absent on scsi bus %d\n",
6655 static void ipr_scsi_bus_speed_limit(
struct ipr_ioa_cfg *ioa_cfg)
6661 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6664 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6679 static void ipr_modify_ioafp_mode_page_28(
struct ipr_ioa_cfg *ioa_cfg,
6687 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6693 for (i = 0, bus = mode_page->
bus;
6694 i < mode_page->num_entries;
6696 if (bus->
res_addr.bus > IPR_MAX_NUM_BUSES) {
6698 "Invalid resource address reported: 0x%08X\n",
6726 static void ipr_build_mode_select(
struct ipr_cmnd *ipr_cmd,
6752 static int ipr_ioafp_mode_select_page28(
struct ipr_cmnd *ipr_cmd)
6759 ipr_scsi_bus_speed_limit(ioa_cfg);
6760 ipr_check_term_power(ioa_cfg, mode_pages);
6761 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6762 length = mode_pages->
hdr.length + 1;
6763 mode_pages->
hdr.length = 0;
6769 ipr_cmd->
job_step = ipr_set_supported_devs;
6789 static void ipr_build_mode_sense(
struct ipr_cmnd *ipr_cmd,
6813 static int ipr_reset_cmd_failed(
struct ipr_cmnd *ipr_cmd)
6819 "0x%02X failed with IOASC: 0x%08X\n",
6820 ipr_cmd->
ioarcb.cmd_pkt.cdb[0], ioasc);
6837 static int ipr_reset_mode_sense_failed(
struct ipr_cmnd *ipr_cmd)
6843 ipr_cmd->
job_step = ipr_set_supported_devs;
6849 return ipr_reset_cmd_failed(ipr_cmd);
6862 static int ipr_ioafp_mode_sense_page28(
struct ipr_cmnd *ipr_cmd)
6872 ipr_cmd->
job_step = ipr_ioafp_mode_select_page28;
6890 static int ipr_ioafp_mode_select_page24(
struct ipr_cmnd *ipr_cmd)
6898 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6904 length = mode_pages->
hdr.length + 1;
6905 mode_pages->
hdr.length = 0;
6911 ipr_cmd->
job_step = ipr_ioafp_mode_sense_page28;
6928 static int ipr_reset_mode_sense_page24_failed(
struct ipr_cmnd *ipr_cmd)
6933 ipr_cmd->
job_step = ipr_ioafp_mode_sense_page28;
6937 return ipr_reset_cmd_failed(ipr_cmd);
6950 static int ipr_ioafp_mode_sense_page24(
struct ipr_cmnd *ipr_cmd)
6960 ipr_cmd->
job_step = ipr_ioafp_mode_select_page24;
6981 static int ipr_init_res_table(
struct ipr_cmnd *ipr_cmd)
6996 dev_err(&ioa_cfg->
pdev->dev,
"Microcode download required\n");
6999 list_move_tail(&res->
queue, &old_res);
7004 entries = ioa_cfg->
u.cfg_table->
hdr.num_entries;
7006 for (i = 0; i < entries; i++) {
7014 if (ipr_is_same_device(res, &cfgtew)) {
7023 dev_err(&ioa_cfg->
pdev->dev,
"Too many devices attached\n");
7031 ipr_init_res_entry(res, &cfgtew);
7033 }
else if (res->
sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7034 res->
sdev->allow_restart = 1;
7037 ipr_update_res_entry(res, &cfgtew);
7049 ipr_clear_res_target(res);
7053 if (ioa_cfg->
dual_raid && ipr_dual_ioa_raid)
7054 ipr_cmd->
job_step = ipr_ioafp_mode_sense_page24;
7056 ipr_cmd->
job_step = ipr_ioafp_mode_sense_page28;
7072 static int ipr_ioafp_query_ioa_cfg(
struct ipr_cmnd *ipr_cmd)
7082 dev_info(&ioa_cfg->
pdev->dev,
"Adapter firmware version: %02X%02X%02X%02X\n",
7096 ipr_cmd->
job_step = ipr_init_res_table;
7113 static void ipr_ioafp_inquiry(
struct ipr_cmnd *ipr_cmd,
u8 flags,
u8 page,
7148 if (page0->
page[i] == page)
7164 static int ipr_ioafp_cap_inquiry(
struct ipr_cmnd *ipr_cmd)
7171 ipr_cmd->
job_step = ipr_ioafp_query_ioa_cfg;
7172 memset(cap, 0,
sizeof(*cap));
7174 if (ipr_inquiry_page_supported(page0, 0xD0)) {
7175 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7195 static int ipr_ioafp_page3_inquiry(
struct ipr_cmnd *ipr_cmd)
7201 ipr_cmd->
job_step = ipr_ioafp_cap_inquiry;
7203 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7221 static int ipr_ioafp_page0_inquiry(
struct ipr_cmnd *ipr_cmd)
7229 memcpy(type, ioa_cfg->
vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7233 ipr_cmd->
job_step = ipr_ioafp_page3_inquiry;
7235 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7252 static int ipr_ioafp_std_inquiry(
struct ipr_cmnd *ipr_cmd)
7257 ipr_cmd->
job_step = ipr_ioafp_page0_inquiry;
7259 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7277 static int ipr_ioafp_identify_hrrq(
struct ipr_cmnd *ipr_cmd)
7283 dev_info(&ioa_cfg->
pdev->dev,
"Starting IOA initialization sequence.\n");
7304 if (ioa_cfg->
sis64) {
7315 ipr_cmd->
job_step = ipr_ioafp_std_inquiry;
7336 static void ipr_reset_timer_done(
struct ipr_cmnd *ipr_cmd)
7339 unsigned long lock_flags = 0;
7345 ipr_cmd->
done(ipr_cmd);
7348 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
7365 static void ipr_reset_start_timer(
struct ipr_cmnd *ipr_cmd,
7366 unsigned long timeout)
7369 ipr_cmd->
done = ipr_reset_ioa_job;
7371 ipr_cmd->
timer.data = (
unsigned long) ipr_cmd;
7373 ipr_cmd->
timer.function = (
void (*)(
unsigned long))ipr_reset_timer_done;
7384 static void ipr_init_ioa_mem(
struct ipr_ioa_cfg *ioa_cfg)
7405 static int ipr_reset_next_stage(
struct ipr_cmnd *ipr_cmd)
7407 unsigned long stage, stage_time;
7409 volatile u32 int_reg;
7413 feedback =
readl(ioa_cfg->
regs.init_feedback_reg);
7417 ipr_dbg(
"IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7420 if (stage_time == 0)
7429 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_mask_reg);
7431 ipr_cmd->
job_step = ipr_ioafp_identify_hrrq;
7433 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg32);
7434 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7435 ipr_cmd->
job_step = ipr_ioafp_identify_hrrq;
7437 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7438 writeq(maskval, ioa_cfg->
regs.set_interrupt_mask_reg);
7439 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_mask_reg);
7444 ipr_cmd->
timer.data = (
unsigned long) ipr_cmd;
7446 ipr_cmd->
timer.function = (
void (*)(
unsigned long))ipr_oper_timeout;
7447 ipr_cmd->
done = ipr_reset_ioa_job;
7464 static int ipr_reset_enable_ioa(
struct ipr_cmnd *ipr_cmd)
7467 volatile u32 int_reg;
7471 ipr_cmd->
job_step = ipr_ioafp_identify_hrrq;
7472 ipr_init_ioa_mem(ioa_cfg);
7475 if (ioa_cfg->
sis64) {
7478 int_reg =
readl(ioa_cfg->
regs.endian_swap_reg);
7481 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg32);
7483 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7485 ioa_cfg->
regs.clr_interrupt_mask_reg32);
7486 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_mask_reg);
7493 if (ioa_cfg->
sis64) {
7495 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7496 writeq(maskval, ioa_cfg->
regs.clr_interrupt_mask_reg);
7498 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->
regs.clr_interrupt_mask_reg32);
7500 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_mask_reg);
7504 if (ioa_cfg->
sis64) {
7505 ipr_cmd->
job_step = ipr_reset_next_stage;
7509 ipr_cmd->
timer.data = (
unsigned long) ipr_cmd;
7511 ipr_cmd->
timer.function = (
void (*)(
unsigned long))ipr_oper_timeout;
7512 ipr_cmd->
done = ipr_reset_ioa_job;
7530 static int ipr_reset_wait_for_dump(
struct ipr_cmnd *ipr_cmd)
7540 ipr_cmd->
job_step = ipr_reset_alert;
7555 static void ipr_unit_check_no_data(
struct ipr_ioa_cfg *ioa_cfg)
7558 dev_err(&ioa_cfg->
pdev->dev,
"IOA unit check with no data\n");
7571 static void ipr_get_unit_check_buffer(
struct ipr_ioa_cfg *ioa_cfg)
7581 if (!ioa_cfg->
sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7582 ipr_unit_check_no_data(ioa_cfg);
7587 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (
__be32 *) &sdt,
7590 if (rc || !(sdt.
entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7593 ipr_unit_check_no_data(ioa_cfg);
7603 IPR_FMT2_MBX_ADDR_MASK;
7610 rc = ipr_get_ldump_data_section(ioa_cfg,
7616 ipr_handle_log_data(ioa_cfg, hostrcb);
7622 ipr_unit_check_no_data(ioa_cfg);
7636 static int ipr_reset_get_unit_check_job(
struct ipr_cmnd *ipr_cmd)
7642 ipr_get_unit_check_buffer(ioa_cfg);
7643 ipr_cmd->
job_step = ipr_reset_alert;
7644 ipr_reset_start_timer(ipr_cmd, 0);
7661 static int ipr_reset_restore_cfg_space(
struct ipr_cmnd *ipr_cmd)
7667 ioa_cfg->
pdev->state_saved =
true;
7670 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7675 ipr_fail_all_ops(ioa_cfg);
7677 if (ioa_cfg->
sis64) {
7680 int_reg =
readl(ioa_cfg->
regs.endian_swap_reg);
7684 if (ioa_cfg->
sis64) {
7685 ipr_cmd->
job_step = ipr_reset_get_unit_check_job;
7690 ipr_get_unit_check_buffer(ioa_cfg);
7691 ipr_cmd->
job_step = ipr_reset_alert;
7692 ipr_reset_start_timer(ipr_cmd, 0);
7698 ipr_cmd->
job_step = ipr_ioa_bringdown_done;
7700 ipr_cmd->
job_step = ipr_reset_enable_ioa;
7709 ipr_cmd->
job_step = ipr_reset_wait_for_dump;
7728 static int ipr_reset_bist_done(
struct ipr_cmnd *ipr_cmd)
7736 ipr_cmd->
job_step = ipr_reset_restore_cfg_space;
7750 static int ipr_reset_start_bist(
struct ipr_cmnd *ipr_cmd)
7758 ioa_cfg->
regs.set_uproc_interrupt_reg32);
7763 ipr_cmd->
job_step = ipr_reset_bist_done;
7787 static int ipr_reset_slot_reset_done(
struct ipr_cmnd *ipr_cmd)
7791 ipr_cmd->
job_step = ipr_reset_bist_done;
7806 static int ipr_reset_slot_reset(
struct ipr_cmnd *ipr_cmd)
7813 ipr_cmd->
job_step = ipr_reset_slot_reset_done;
7828 static int ipr_reset_block_config_access_wait(
struct ipr_cmnd *ipr_cmd)
7840 ipr_reset_start_timer(ipr_cmd,
7845 "Timed out waiting to lock config access. Resetting anyway.\n");
7861 static int ipr_reset_block_config_access(
struct ipr_cmnd *ipr_cmd)
7863 ipr_cmd->
ioa_cfg->cfg_locked = 0;
7864 ipr_cmd->
job_step = ipr_reset_block_config_access_wait;
7876 static int ipr_reset_allowed(
struct ipr_ioa_cfg *ioa_cfg)
7880 temp_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg);
7899 static int ipr_reset_wait_to_start_bist(
struct ipr_cmnd *ipr_cmd)
7904 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->
u.
time_left) {
7908 ipr_cmd->
job_step = ipr_reset_block_config_access;
7927 static int ipr_reset_alert(
struct ipr_cmnd *ipr_cmd)
7937 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7939 ipr_cmd->
job_step = ipr_reset_wait_to_start_bist;
7941 ipr_cmd->
job_step = ipr_reset_block_config_access;
7960 static int ipr_reset_ucode_download_done(
struct ipr_cmnd *ipr_cmd)
7968 ipr_cmd->
job_step = ipr_reset_alert;
7982 static int ipr_reset_ucode_download(
struct ipr_cmnd *ipr_cmd)
7988 ipr_cmd->
job_step = ipr_reset_alert;
8002 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
8004 ipr_build_ucode_ioadl(ipr_cmd, sglist);
8005 ipr_cmd->
job_step = ipr_reset_ucode_download_done;
8007 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8025 static int ipr_reset_shutdown_ioa(
struct ipr_cmnd *ipr_cmd)
8037 ipr_cmd->
ioarcb.cmd_pkt.cdb[1] = shutdown_type;
8043 else if (ioa_cfg->
dual_raid && ipr_dual_ioa_raid)
8048 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
8051 ipr_cmd->
job_step = ipr_reset_ucode_download;
8053 ipr_cmd->
job_step = ipr_reset_alert;
8068 static void ipr_reset_ioa_job(
struct ipr_cmnd *ipr_cmd)
8091 ipr_reinit_ipr_cmnd(ipr_cmd);
8111 static void _ipr_initiate_ioa_reset(
struct ipr_ioa_cfg *ioa_cfg,
8112 int (*job_step) (
struct ipr_cmnd *),
8121 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8126 ipr_reset_ioa_job(ipr_cmd);
8141 static void ipr_initiate_ioa_reset(
struct ipr_ioa_cfg *ioa_cfg,
8156 "IOA taken offline - error recovery failed\n");
8164 ipr_fail_all_ops(ioa_cfg);
8167 spin_unlock_irq(ioa_cfg->
host->host_lock);
8169 spin_lock_irq(ioa_cfg->
host->host_lock);
8177 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
8189 static int ipr_reset_freeze(
struct ipr_cmnd *ipr_cmd)
8192 ipr_cmd->
ioa_cfg->allow_interrupts = 0;
8194 ipr_cmd->
done = ipr_reset_ioa_job;
8206 static void ipr_pci_frozen(
struct pci_dev *pdev)
8208 unsigned long flags = 0;
8209 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8213 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, flags);
8226 unsigned long flags = 0;
8227 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8233 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8235 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, flags);
8246 static void ipr_pci_perm_failure(
struct pci_dev *pdev)
8248 unsigned long flags = 0;
8249 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8258 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, flags);
8276 ipr_pci_frozen(pdev);
8279 ipr_pci_perm_failure(pdev);
8302 unsigned long host_lock_flags = 0;
8306 dev_dbg(&ioa_cfg->
pdev->dev,
"ioa_cfg adx: 0x%p\n", ioa_cfg);
8311 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8314 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, host_lock_flags);
8325 "Adapter not supported in this hardware configuration.\n");
8328 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, host_lock_flags);
8341 static void ipr_free_cmd_blks(
struct ipr_ioa_cfg *ioa_cfg)
8371 static void ipr_free_mem(
struct ipr_ioa_cfg *ioa_cfg)
8378 ipr_free_cmd_blks(ioa_cfg);
8392 ipr_free_dump(ioa_cfg);
8406 static void ipr_free_all_resources(
struct ipr_ioa_cfg *ioa_cfg)
8415 ipr_free_mem(ioa_cfg);
8445 ipr_free_cmd_blks(ioa_cfg);
8453 ipr_free_cmd_blks(ioa_cfg);
8457 memset(ipr_cmd, 0,
sizeof(*ipr_cmd));
8461 ioarcb = &ipr_cmd->
ioarcb;
8469 if (ioa_cfg->
sis64) {
8512 if (ioa_cfg->
sis64) {
8513 ioa_cfg->
target_ids = kzalloc(
sizeof(
unsigned long) *
8515 ioa_cfg->
array_ids = kzalloc(
sizeof(
unsigned long) *
8517 ioa_cfg->
vset_ids = kzalloc(
sizeof(
unsigned long) *
8531 goto out_free_res_entries;
8533 if (ipr_alloc_cmd_blks(ioa_cfg))
8534 goto out_free_vpd_cbs;
8537 sizeof(
u32) * IPR_NUM_CMD_BLKS,
8541 goto out_ipr_free_cmd_blocks;
8548 goto out_free_host_rrq;
8556 goto out_free_hostrcb_dma;
8560 ioa_cfg->
hostrcb[
i]->ioa_cfg = ioa_cfg;
8567 if (!ioa_cfg->
trace)
8568 goto out_free_hostrcb_dma;
8575 out_free_hostrcb_dma:
8587 out_ipr_free_cmd_blocks:
8588 ipr_free_cmd_blks(ioa_cfg);
8592 out_free_res_entries:
8612 if (ipr_max_speed <
ARRAY_SIZE(ipr_max_bus_speeds))
8613 ioa_cfg->
bus_attr[
i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8635 ioa_cfg->
host = host;
8636 ioa_cfg->
pdev = pdev;
8648 INIT_LIST_HEAD(&ioa_cfg->
free_q);
8659 ipr_initialize_bus_attr(ioa_cfg);
8662 if (ioa_cfg->
sis64) {
8677 pci_set_drvdata(pdev, ioa_cfg);
8700 if (ioa_cfg->
sis64) {
8723 return &ipr_chip[
i];
8740 unsigned long lock_flags = 0;
8748 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
8768 volatile u32 int_reg;
8769 unsigned long lock_flags = 0;
8776 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8777 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->
regs.clr_interrupt_mask_reg32);
8778 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_mask_reg);
8779 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
8785 }
else if (ipr_debug)
8788 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->
regs.sense_interrupt_reg32);
8789 int_reg =
readl(ioa_cfg->
regs.sense_interrupt_reg);
8791 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8796 dev_info(&pdev->
dev,
"MSI test failed. Falling back to LSI.\n");
8798 }
else if (ipr_debug)
8801 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
8823 unsigned long ipr_regs_pci;
8826 volatile u32 mask, uproc, interrupts;
8831 dev_err(&pdev->
dev,
"Cannot enable adapter\n");
8840 dev_err(&pdev->
dev,
"call to scsi_host_alloc failed!\n");
8849 ioa_cfg->
ipr_chip = ipr_get_chip_info(dev_id);
8852 dev_err(&pdev->
dev,
"Unknown adapter chipset 0x%04X 0x%04X\n",
8854 goto out_scsi_host_put;
8863 if (ipr_transop_timeout)
8877 "Couldn't register memory range of registers\n");
8878 goto out_scsi_host_put;
8885 "Couldn't map memory range of registers\n");
8887 goto out_release_regions;
8894 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8898 if (ioa_cfg->
sis64) {
8901 dev_dbg(&pdev->
dev,
"Failed to set 64 bit PCI DMA mask\n");
8909 dev_err(&pdev->
dev,
"Failed to set PCI DMA mask\n");
8914 ioa_cfg->
chip_cfg->cache_line_size);
8917 dev_err(&pdev->
dev,
"Write of cache line size failed\n");
8924 rc = ipr_test_msi(ioa_cfg, pdev);
8928 goto out_msi_disable;
8931 }
else if (ipr_debug)
8938 dev_err(&pdev->
dev,
"Failed to save PCI config space\n");
8940 goto out_msi_disable;
8943 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8944 goto out_msi_disable;
8946 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8947 goto out_msi_disable;
8958 rc = ipr_alloc_mem(ioa_cfg);
8961 "Couldn't allocate enough memory for device driver!\n");
8962 goto out_msi_disable;
8969 mask =
readl(ioa_cfg->
regs.sense_interrupt_mask_reg32);
8970 interrupts =
readl(ioa_cfg->
regs.sense_interrupt_reg32);
8971 uproc =
readl(ioa_cfg->
regs.sense_uproc_interrupt_reg32);
8979 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8985 dev_err(&pdev->
dev,
"Couldn't register IRQ %d! rc=%d\n",
8993 ioa_cfg->
reset = ipr_reset_slot_reset;
8995 ioa_cfg->
reset = ipr_reset_start_bist;
8997 spin_lock(&ipr_driver_lock);
8999 spin_unlock(&ipr_driver_lock);
9006 ipr_free_mem(ioa_cfg);
9011 out_release_regions:
9030 static void ipr_scan_vsets(
struct ipr_ioa_cfg *ioa_cfg)
9053 static void ipr_initiate_ioa_bringdown(
struct ipr_ioa_cfg *ioa_cfg,
9061 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
9074 static void __ipr_remove(
struct pci_dev *pdev)
9076 unsigned long host_lock_flags = 0;
9077 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9082 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, host_lock_flags);
9089 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, host_lock_flags);
9094 spin_lock(&ipr_driver_lock);
9096 spin_unlock(&ipr_driver_lock);
9100 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, host_lock_flags);
9102 ipr_free_all_resources(ioa_cfg);
9118 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9145 rc = ipr_probe_ioa(pdev, dev_id);
9150 ioa_cfg = pci_get_drvdata(pdev);
9151 rc = ipr_probe_ioa_part2(ioa_cfg);
9158 rc = scsi_add_host(ioa_cfg->
host, &pdev->
dev);
9186 ipr_scan_vsets(ioa_cfg);
9204 static void ipr_shutdown(
struct pci_dev *pdev)
9206 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9207 unsigned long lock_flags = 0;
9211 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
9217 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, lock_flags);
9300 .error_detected = ipr_pci_error_detected,
9301 .slot_reset = ipr_pci_slot_reset,
9306 .id_table = ipr_pci_table,
9309 .shutdown = ipr_shutdown,
9310 .err_handler = &ipr_err_handler,
9319 static void ipr_halt_done(
struct ipr_cmnd *ipr_cmd)
9336 unsigned long flags = 0;
9341 spin_lock(&ipr_driver_lock);
9346 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, flags);
9350 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9357 spin_unlock_irqrestore(ioa_cfg->
host->host_lock, flags);
9359 spin_unlock(&ipr_driver_lock);
9374 static int __init ipr_init(
void)
9376 ipr_info(
"IBM Power RAID SCSI Device Driver version: %s %s\n",
9380 return pci_register_driver(&ipr_driver);
9391 static void __exit ipr_exit(
void)