22 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
36 #include <linux/blktrace_api.h>
42 #include <scsi/scsi.h>
47 #include <linux/cciss_ioctl.h>
48 #include <linux/string.h>
49 #include <linux/bitmap.h>
57 #define HPSA_DRIVER_VERSION "2.0.2-1"
58 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
62 #define MAX_CONFIG_WAIT 30000
63 #define MAX_IOCTL_CONFIG_WAIT 1000
66 #define MAX_CMD_RETRIES 3
76 static int hpsa_allow_any;
79 "Allow hpsa driver to access unknown HP Smart Array hardware");
80 static int hpsa_simple_mode;
83 "Use 'simple mode' rather than 'performant mode'");
123 {0x3241103C,
"Smart Array P212", &SA5_access},
124 {0x3243103C,
"Smart Array P410", &SA5_access},
125 {0x3245103C,
"Smart Array P410i", &SA5_access},
126 {0x3247103C,
"Smart Array P411", &SA5_access},
127 {0x3249103C,
"Smart Array P812", &SA5_access},
128 {0x324a103C,
"Smart Array P712m", &SA5_access},
129 {0x324b103C,
"Smart Array P711m", &SA5_access},
130 {0x3350103C,
"Smart Array P222", &SA5_access},
131 {0x3351103C,
"Smart Array P420", &SA5_access},
132 {0x3352103C,
"Smart Array P421", &SA5_access},
133 {0x3353103C,
"Smart Array P822", &SA5_access},
134 {0x3354103C,
"Smart Array P420i", &SA5_access},
135 {0x3355103C,
"Smart Array P220i", &SA5_access},
136 {0x3356103C,
"Smart Array P721m", &SA5_access},
137 {0x1920103C,
"Smart Array", &SA5_access},
138 {0x1921103C,
"Smart Array", &SA5_access},
139 {0x1922103C,
"Smart Array", &SA5_access},
140 {0x1923103C,
"Smart Array", &SA5_access},
141 {0x1924103C,
"Smart Array", &SA5_access},
142 {0x1925103C,
"Smart Array", &SA5_access},
143 {0x1926103C,
"Smart Array", &SA5_access},
144 {0x1928103C,
"Smart Array", &SA5_access},
145 {0x334d103C,
"Smart Array P822se", &SA5_access},
146 {0xFFFF103C,
"Unknown Smart Array", &SA5_access},
149 static int number_of_controllers;
173 static void hpsa_scan_start(
struct Scsi_Host *);
174 static int hpsa_scan_finished(
struct Scsi_Host *sh,
175 unsigned long elapsed_time);
176 static int hpsa_change_queue_depth(
struct scsi_device *sdev,
179 static int hpsa_eh_device_reset_handler(
struct scsi_cmnd *scsicmd);
180 static int hpsa_eh_abort_handler(
struct scsi_cmnd *scsicmd);
181 static int hpsa_slave_alloc(
struct scsi_device *sdev);
182 static void hpsa_slave_destroy(
struct scsi_device *sdev);
184 static void hpsa_update_scsi_devices(
struct ctlr_info *
h,
int hostno);
185 static int check_for_unit_attention(
struct ctlr_info *
h,
187 static void check_ioctl_unit_attention(
struct ctlr_info *
h,
190 static void calc_bucket_map(
int *bucket,
int num_buckets,
191 int nsgs,
int *bucket_map);
198 unsigned long *memory_bar);
203 #define BOARD_NOT_READY 0
204 #define BOARD_READY 1
208 unsigned long *
priv = shost_priv(sdev->
host);
214 unsigned long *
priv = shost_priv(sh);
218 static int check_for_unit_attention(
struct ctlr_info *
h,
224 switch (c->
err_info->SenseInfo[12]) {
227 "detected, command retried\n", h->
ctlr);
231 "detected, action required\n", h->
ctlr);
235 "changed, action required\n", h->
ctlr);
243 "or device reset detected\n", h->
ctlr);
247 "cleared by another initiator\n", h->
ctlr);
251 "unit attention detected\n", h->
ctlr);
273 h = shost_to_hba(shost);
283 unsigned char *fwrev;
285 h = shost_to_hba(shost);
289 return snprintf(buf, 20,
"%c%c%c%c\n",
290 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
293 static ssize_t host_show_commands_outstanding(
struct device *dev,
297 struct ctlr_info *h = shost_to_hba(shost);
308 h = shost_to_hba(shost);
311 "performant" :
"simple");
315 static u32 unresettable_controller[] = {
340 static u32 soft_unresettable_controller[] = {
363 for (i = 0; i <
ARRAY_SIZE(unresettable_controller); i++)
364 if (unresettable_controller[i] == board_id)
369 static int ctlr_is_soft_resettable(
u32 board_id)
373 for (i = 0; i <
ARRAY_SIZE(soft_unresettable_controller); i++)
374 if (soft_unresettable_controller[i] == board_id)
379 static int ctlr_is_resettable(
u32 board_id)
381 return ctlr_is_hard_resettable(board_id) ||
382 ctlr_is_soft_resettable(board_id);
391 h = shost_to_hba(shost);
395 static inline int is_logical_dev_addr_mode(
unsigned char scsi3addr[])
397 return (scsi3addr[3] & 0xC0) == 0x40;
400 static const char *raid_label[] = {
"0",
"4",
"1(1+0)",
"5",
"5+1",
"ADG",
403 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
409 unsigned char rlevel;
416 h = sdev_to_hba(sdev);
420 spin_unlock_irqrestore(&h->
lock, flags);
425 if (!is_logical_dev_addr_mode(hdev->
scsi3addr)) {
426 spin_unlock_irqrestore(&h->
lock, flags);
432 spin_unlock_irqrestore(&h->
lock, flags);
446 unsigned char lunid[8];
449 h = sdev_to_hba(sdev);
453 spin_unlock_irqrestore(&h->
lock, flags);
457 spin_unlock_irqrestore(&h->
lock, flags);
458 return snprintf(buf, 20,
"0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
459 lunid[0], lunid[1], lunid[2], lunid[3],
460 lunid[4], lunid[5], lunid[6], lunid[7]);
470 unsigned char sn[16];
473 h = sdev_to_hba(sdev);
477 spin_unlock_irqrestore(&h->
lock, flags);
481 spin_unlock_irqrestore(&h->
lock, flags);
483 "%02X%02X%02X%02X%02X%02X%02X%02X"
484 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
485 sn[0], sn[1], sn[2], sn[3],
486 sn[4], sn[5], sn[6], sn[7],
487 sn[8], sn[9], sn[10], sn[11],
488 sn[12], sn[13], sn[14], sn[15]);
496 host_show_firmware_revision,
NULL);
498 host_show_commands_outstanding,
NULL);
500 host_show_transport_mode,
NULL);
502 host_show_resettable,
NULL);
505 &dev_attr_raid_level,
513 &dev_attr_firmware_revision,
514 &dev_attr_commands_outstanding,
515 &dev_attr_transport_mode,
516 &dev_attr_resettable,
524 .queuecommand = hpsa_scsi_queue_command,
525 .scan_start = hpsa_scan_start,
526 .scan_finished = hpsa_scan_finished,
527 .change_queue_depth = hpsa_change_queue_depth,
530 .eh_abort_handler = hpsa_eh_abort_handler,
531 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
533 .slave_alloc = hpsa_slave_alloc,
534 .slave_destroy = hpsa_slave_destroy,
536 .compat_ioctl = hpsa_compat_ioctl,
538 .sdev_attrs = hpsa_sdev_attrs,
539 .shost_attrs = hpsa_shost_attrs,
557 return h->
access.command_completed(h, q);
564 spin_unlock_irqrestore(&h->
lock, flags);
590 static int is_firmware_flash_cmd(
u8 *
cdb)
600 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
601 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
602 static void dial_down_lockup_detection_during_fw_flash(
struct ctlr_info *h,
605 if (!is_firmware_flash_cmd(c->
Request.CDB))
611 static void dial_up_lockup_detection_on_fw_flash_complete(
struct ctlr_info *h,
614 if (is_firmware_flash_cmd(c->
Request.CDB) &&
619 static void enqueue_cmd_and_start_io(
struct ctlr_info *h,
624 set_performant_mode(h, c);
625 dial_down_lockup_detection_during_fw_flash(h, c);
629 spin_unlock_irqrestore(&h->
lock, flags);
637 list_del_init(&c->
list);
640 static inline int is_hba_lunid(
unsigned char scsi3addr[])
645 static inline int is_scsi_rev_5(
struct ctlr_info *h)
654 static int hpsa_find_target_lun(
struct ctlr_info *h,
655 unsigned char scsi3addr[],
int bus,
int *
target,
int *
lun)
666 if (h->
dev[i]->bus == bus && h->
dev[i]->target != -1)
681 static int hpsa_scsi_add_entry(
struct ctlr_info *h,
int hostno,
692 dev_err(&h->
pdev->dev,
"too many devices, some will be "
698 if (device->
lun != -1)
708 if (hpsa_find_target_lun(h, device->
scsi3addr,
722 for (i = 0; i <
n; i++) {
727 if (
memcmp(addr1, addr2, 8) == 0) {
734 if (device->
lun == -1) {
735 dev_warn(&h->
pdev->dev,
"physical device with no LUN=0,"
736 " suspect firmware bug or unsupported hardware "
745 added[*nadded] = device;
753 dev_info(&h->
pdev->dev,
"%s device c%db%dt%dl%d added.\n",
760 static void hpsa_scsi_update_entry(
struct ctlr_info *h,
int hostno,
768 dev_info(&h->
pdev->dev,
"%s device c%db%dt%dl%d updated.\n",
774 static void hpsa_scsi_replace_entry(
struct ctlr_info *h,
int hostno,
788 if (new_entry->
target == -1) {
794 added[*nadded] = new_entry;
796 dev_info(&h->
pdev->dev,
"%s device c%db%dt%dl%d changed.\n",
802 static void hpsa_scsi_remove_entry(
struct ctlr_info *h,
int hostno,
int entry,
815 for (i = entry; i < h->
ndevices-1; i++)
818 dev_info(&h->
pdev->dev,
"%s device c%db%dt%dl%d removed.\n",
823 #define SCSI3ADDR_EQ(a, b) ( \
824 (a)[7] == (b)[7] && \
825 (a)[6] == (b)[6] && \
826 (a)[5] == (b)[5] && \
827 (a)[4] == (b)[4] && \
828 (a)[3] == (b)[3] && \
829 (a)[2] == (b)[2] && \
830 (a)[1] == (b)[1] && \
833 static void fixup_botched_add(
struct ctlr_info *h,
844 if (h->
dev[i] == added) {
851 spin_unlock_irqrestore(&h->
lock, flags);
874 if (dev1->
bus != dev2->
bus)
904 #define DEVICE_NOT_FOUND 0
905 #define DEVICE_CHANGED 1
906 #define DEVICE_SAME 2
907 #define DEVICE_UPDATED 3
908 for (i = 0; i < haystack_size; i++) {
909 if (haystack[i] ==
NULL)
913 if (device_is_the_same(needle, haystack[i])) {
914 if (device_updated(needle, haystack[i]))
926 static void adjust_hpsa_scsi_table(
struct ctlr_info *h,
int hostno,
933 int i,
entry, device_change, changes = 0;
937 int nadded, nremoved;
943 if (!added || !removed) {
945 "adjust_hpsa_scsi_table\n");
961 while (i < h->ndevices) {
963 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
964 if (device_change == DEVICE_NOT_FOUND) {
966 hpsa_scsi_remove_entry(h, hostno, i,
969 }
else if (device_change == DEVICE_CHANGED) {
971 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
972 added, &nadded, removed, &nremoved);
977 }
else if (device_change == DEVICE_UPDATED) {
978 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
987 for (i = 0; i < nsds; i++) {
990 device_change = hpsa_scsi_find_entry(sd[i], h->
dev,
992 if (device_change == DEVICE_NOT_FOUND) {
994 if (hpsa_scsi_add_entry(h, hostno, sd[i],
995 added, &nadded) != 0)
998 }
else if (device_change == DEVICE_CHANGED) {
1002 "device unexpectedly changed.\n");
1006 spin_unlock_irqrestore(&h->
devlock, flags);
1012 if (hostno == -1 || !changes)
1017 for (i = 0; i < nremoved; i++) {
1020 removed[i]->target, removed[i]->lun);
1030 " for removal.", hostno, removed[i]->
bus,
1038 for (i = 0; i < nadded; i++) {
1040 added[i]->target, added[i]->lun) == 0)
1042 dev_warn(&h->
pdev->dev,
"scsi_add_device c%db%dt%dl%d failed, "
1043 "device not added.\n", hostno, added[i]->
bus,
1048 fixup_botched_add(h, added[i]);
1061 int bus,
int target,
int lun)
1066 for (i = 0; i < h->
ndevices; i++) {
1068 if (sd->
bus == bus && sd->
target == target && sd->
lun == lun)
1075 static int hpsa_slave_alloc(
struct scsi_device *sdev)
1078 unsigned long flags;
1081 h = sdev_to_hba(sdev);
1083 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1084 sdev_id(sdev), sdev->
lun);
1087 spin_unlock_irqrestore(&h->
devlock, flags);
1091 static void hpsa_slave_destroy(
struct scsi_device *sdev)
1096 static void hpsa_free_sg_chain_blocks(
struct ctlr_info *h)
1102 for (i = 0; i < h->
nr_cmds; i++) {
1110 static int hpsa_allocate_sg_chain_blocks(
struct ctlr_info *h)
1121 for (i = 0; i < h->
nr_cmds; i++) {
1130 hpsa_free_sg_chain_blocks(h);
1134 static void hpsa_map_sg_chain_block(
struct ctlr_info *h,
1143 chain_sg->
Len =
sizeof(*chain_sg) *
1145 temp64 = pci_map_single(h->
pdev, chain_block, chain_sg->
Len,
1147 chain_sg->
Addr.lower = (
u32) (temp64 & 0x0FFFFFFFFULL);
1148 chain_sg->
Addr.upper = (
u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
1151 static void hpsa_unmap_sg_chain_block(
struct ctlr_info *h,
1161 temp64.val32.lower = chain_sg->
Addr.lower;
1162 temp64.val32.upper = chain_sg->
Addr.upper;
1175 unsigned long sense_data_size;
1183 hpsa_unmap_sg_chain_block(h, cp);
1193 sense_data_size =
sizeof(ei->
SenseInfo);
1194 if (ei->
SenseLen < sense_data_size)
1220 if (check_for_unit_attention(h, cp)) {
1235 if ((asc == 0x25) && (ascq == 0x0)) {
1237 "has check condition\n", cp);
1247 if ((asc == 0x04) && (ascq == 0x03)) {
1249 "has check condition: unit "
1250 "not ready, manual "
1251 "intervention required\n", cp);
1258 "has check condition: aborted command: "
1259 "ASC: 0x%x, ASCQ: 0x%x\n",
1265 dev_dbg(&h->
pdev->dev,
"cp %p has check condition: "
1267 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1268 "Returning result: 0x%x, "
1269 "cmd=[%02x %02x %02x %02x %02x "
1270 "%02x %02x %02x %02x %02x %02x "
1271 "%02x %02x %02x %02x %02x]\n",
1272 cp, sense_key, asc, ascq,
1291 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1292 "Returning result: 0x%x\n",
1294 sense_key, asc, ascq,
1298 "Returning no connection.\n", cp),
1320 " completed with data overrun "
1338 "protocol error\n", cp);
1342 dev_warn(&h->
pdev->dev,
"cp %p had hardware error\n", cp);
1346 dev_warn(&h->
pdev->dev,
"cp %p had connection lost\n", cp);
1350 dev_warn(&h->
pdev->dev,
"cp %p was aborted with status 0x%x\n",
1355 dev_warn(&h->
pdev->dev,
"cp %p reports abort failed\n", cp);
1359 dev_warn(&h->
pdev->dev,
"cp %p aborted due to an unsolicited "
1372 dev_warn(&h->
pdev->dev,
"cp %p returned unknown status %x\n",
1379 static void hpsa_pci_unmap(
struct pci_dev *pdev,
1385 for (i = 0; i < sg_used; i++) {
1386 addr64.val32.lower = c->
SG[
i].Addr.lower;
1387 addr64.val32.upper = c->
SG[
i].Addr.upper;
1393 static void hpsa_map_one(
struct pci_dev *pdev,
1407 addr64 = (
u64) pci_map_single(pdev, buf, buflen, data_direction);
1408 cp->
SG[0].Addr.lower =
1409 (
u32) (addr64 & (
u64) 0x00000000FFFFFFFF);
1410 cp->
SG[0].Addr.upper =
1411 (
u32) ((addr64 >> 32) & (
u64) 0x00000000FFFFFFFF);
1417 static inline void hpsa_scsi_do_simple_cmd_core(
struct ctlr_info *h,
1423 enqueue_cmd_and_start_io(h, c);
1427 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(
struct ctlr_info *h,
1430 unsigned long flags;
1435 spin_unlock_irqrestore(&h->
lock, flags);
1438 spin_unlock_irqrestore(&h->
lock, flags);
1439 hpsa_scsi_do_simple_cmd_core(h, c);
1443 #define MAX_DRIVER_CMD_RETRIES 25
1444 static void hpsa_scsi_do_simple_cmd_with_retry(
struct ctlr_info *h,
1451 hpsa_scsi_do_simple_cmd_core(h, c);
1455 if (backoff_time < 1000)
1458 }
while ((check_for_unit_attention(h, c) ||
1459 check_for_busy(h, c)) &&
1461 hpsa_pci_unmap(h->
pdev, c, 1, data_direction);
1464 static void hpsa_scsi_interpret_error(
struct CommandList *cp)
1467 struct device *
d = &cp->
h->pdev->dev;
1472 dev_warn(d,
"cmd %p has completed with errors\n", cp);
1473 dev_warn(d,
"cmd %p has SCSI Status = %x\n", cp,
1476 dev_warn(d,
"SCSI status is abnormally zero. "
1477 "(probably indicates selection timeout "
1478 "reported incorrectly due to a known "
1479 "firmware bug, circa July, 2001.)\n");
1485 dev_warn(d,
"cp %p has completed with data overrun\n", cp);
1491 dev_warn(d,
"cp %p is reported invalid (probably means "
1492 "target device no longer present)\n", cp);
1498 dev_warn(d,
"cp %p has protocol error \n", cp);
1502 dev_warn(d,
"cp %p had hardware error\n", cp);
1505 dev_warn(d,
"cp %p had connection lost\n", cp);
1508 dev_warn(d,
"cp %p was aborted\n", cp);
1511 dev_warn(d,
"cp %p reports abort failed\n", cp);
1514 dev_warn(d,
"cp %p aborted due to an unsolicited abort\n", cp);
1517 dev_warn(d,
"cp %p timed out\n", cp);
1520 dev_warn(d,
"Command unabortable\n");
1523 dev_warn(d,
"cp %p returned unknown status %x\n", cp,
1528 static int hpsa_scsi_do_inquiry(
struct ctlr_info *h,
unsigned char *scsi3addr,
1529 unsigned char page,
unsigned char *buf,
1536 c = cmd_special_alloc(h);
1539 dev_warn(&h->
pdev->dev,
"cmd_special_alloc returned NULL!\n");
1547 hpsa_scsi_interpret_error(c);
1550 cmd_special_free(h, c);
1554 static int hpsa_send_reset(
struct ctlr_info *h,
unsigned char *scsi3addr)
1560 c = cmd_special_alloc(h);
1563 dev_warn(&h->
pdev->dev,
"cmd_special_alloc returned NULL!\n");
1568 hpsa_scsi_do_simple_cmd_core(h, c);
1573 hpsa_scsi_interpret_error(c);
1576 cmd_special_free(h, c);
1580 static void hpsa_get_raid_level(
struct ctlr_info *h,
1581 unsigned char *scsi3addr,
unsigned char *
raid_level)
1590 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1592 *raid_level = buf[8];
1600 static int hpsa_get_device_id(
struct ctlr_info *h,
unsigned char *scsi3addr,
1611 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
1613 memcpy(device_id, &buf[8], buflen);
1618 static int hpsa_scsi_do_report_luns(
struct ctlr_info *h,
int logical,
1620 int extended_response)
1624 unsigned char scsi3addr[8];
1627 c = cmd_special_alloc(h);
1629 dev_err(&h->
pdev->dev,
"cmd_special_alloc returned NULL!\n");
1633 memset(scsi3addr, 0,
sizeof(scsi3addr));
1635 buf, bufsize, 0, scsi3addr,
TYPE_CMD);
1636 if (extended_response)
1637 c->
Request.CDB[1] = extended_response;
1642 hpsa_scsi_interpret_error(c);
1645 cmd_special_free(h, c);
1649 static inline int hpsa_scsi_do_report_phys_luns(
struct ctlr_info *h,
1651 int bufsize,
int extended_response)
1653 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
1656 static inline int hpsa_scsi_do_report_log_luns(
struct ctlr_info *h,
1659 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
1662 static inline void hpsa_set_bus_target_lun(
struct hpsa_scsi_dev_t *device,
1663 int bus,
int target,
int lun)
1670 static int hpsa_update_device_info(
struct ctlr_info *h,
1672 unsigned char *is_OBDR_device)
1675 #define OBDR_SIG_OFFSET 43
1676 #define OBDR_TAPE_SIG "$DR-10"
1677 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
1678 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
1680 unsigned char *inq_buff;
1681 unsigned char *obdr_sig;
1683 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE,
GFP_KERNEL);
1688 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
1689 (
unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
1692 "hpsa_update_device_info: inquiry failed\n");
1696 this_device->
devtype = (inq_buff[0] & 0x1f);
1699 sizeof(this_device->
vendor));
1701 sizeof(this_device->
model));
1704 hpsa_get_device_id(h, scsi3addr, this_device->
device_id,
1708 is_logical_dev_addr_mode(scsi3addr))
1709 hpsa_get_raid_level(h, scsi3addr, &this_device->
raid_level);
1713 if (is_OBDR_device) {
1719 strncmp(obdr_sig, OBDR_TAPE_SIG,
1720 OBDR_SIG_LEN) == 0);
1731 static unsigned char *ext_target_model[] = {
1744 for (i = 0; ext_target_model[
i]; i++)
1746 strlen(ext_target_model[i])) == 0)
1758 static void figure_bus_target_lun(
struct ctlr_info *h,
1763 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
1765 if (is_hba_lunid(lunaddrbytes))
1766 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
1769 hpsa_set_bus_target_lun(device, 2, -1, -1);
1773 if (is_ext_target(h, device)) {
1778 hpsa_set_bus_target_lun(device,
1779 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
1782 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
1796 static int add_ext_target_dev(
struct ctlr_info *h,
1799 unsigned long lunzerobits[],
int *n_ext_target_devs)
1801 unsigned char scsi3addr[8];
1806 if (!is_logical_dev_addr_mode(lunaddrbytes))
1809 if (!is_ext_target(h, tmpdevice))
1812 if (tmpdevice->
lun == 0)
1816 scsi3addr[3] = tmpdevice->
target;
1817 if (is_hba_lunid(scsi3addr))
1820 if (is_scsi_rev_5(h))
1825 "target devices exceeded. Check your hardware "
1830 if (hpsa_update_device_info(h, scsi3addr, this_device,
NULL))
1832 (*n_ext_target_devs)++;
1833 hpsa_set_bus_target_lun(this_device,
1845 static int hpsa_gather_lun_info(
struct ctlr_info *h,
1850 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) {
1851 dev_err(&h->
pdev->dev,
"report physical LUNs failed.\n");
1856 dev_warn(&h->
pdev->dev,
"maximum physical LUNs (%d) exceeded."
1861 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
1862 dev_err(&h->
pdev->dev,
"report logical LUNs failed.\n");
1869 "maximum logical LUNs (%d) exceeded. "
1876 "maximum logical + physical LUNs (%d) exceeded. "
1885 int nphysicals,
int nlogicals,
struct ReportLUNdata *physdev_list,
1893 int logicals_start = nphysicals + (raid_ctlr_position == 0);
1894 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
1896 if (i == raid_ctlr_position)
1899 if (i < logicals_start)
1900 return &physdev_list->
LUN[i - (raid_ctlr_position == 0)][0];
1902 if (i < last_device)
1903 return &logdev_list->
LUN[i - nphysicals -
1904 (raid_ctlr_position == 0)][0];
1909 static void hpsa_update_scsi_devices(
struct ctlr_info *h,
int hostno)
1925 u32 ndev_allocated = 0;
1929 int i, n_ext_target_devs, ndevs_to_allocate;
1930 int raid_ctlr_position;
1934 physdev_list = kzalloc(reportlunsize,
GFP_KERNEL);
1935 logdev_list = kzalloc(reportlunsize,
GFP_KERNEL);
1936 tmpdevice = kzalloc(
sizeof(*tmpdevice),
GFP_KERNEL);
1938 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
1942 memset(lunzerobits, 0,
sizeof(lunzerobits));
1944 if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals,
1945 logdev_list, &nlogicals))
1955 for (i = 0; i < ndevs_to_allocate; i++) {
1957 dev_warn(&h->
pdev->dev,
"maximum devices (%d) exceeded."
1963 currentsd[
i] = kzalloc(
sizeof(*currentsd[i]),
GFP_KERNEL);
1964 if (!currentsd[i]) {
1966 __FILE__, __LINE__);
1973 raid_ctlr_position = 0;
1975 raid_ctlr_position = nphysicals + nlogicals;
1978 n_ext_target_devs = 0;
1979 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
1980 u8 *lunaddrbytes, is_OBDR = 0;
1984 i, nphysicals, nlogicals, physdev_list, logdev_list);
1986 if (lunaddrbytes[3] & 0xC0 &&
1987 i < nphysicals + (raid_ctlr_position == 0))
1991 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
1994 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
1995 this_device = currentsd[ncurrent];
2004 if (add_ext_target_dev(h, tmpdevice, this_device,
2005 lunaddrbytes, lunzerobits,
2006 &n_ext_target_devs)) {
2008 this_device = currentsd[ncurrent];
2011 *this_device = *tmpdevice;
2013 switch (this_device->
devtype) {
2040 if (!is_hba_lunid(lunaddrbytes))
2050 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
2053 for (i = 0; i < ndev_allocated; i++)
2054 kfree(currentsd[i]);
2056 kfree(physdev_list);
2064 static int hpsa_scatter_gather(
struct ctlr_info *h,
2071 int use_sg,
i, sg_index, chained;
2081 goto sglist_finished;
2095 curr_sg->
Addr.lower = (
u32) (addr64 & 0x0FFFFFFFFULL);
2096 curr_sg->
Addr.upper = (
u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
2102 if (use_sg + chained > h->
maxSG)
2103 h->
maxSG = use_sg + chained;
2107 cp->
Header.SGTotal = (
u16) (use_sg + 1);
2108 hpsa_map_sg_chain_block(h, cp);
2120 static int hpsa_scsi_queue_command_lck(
struct scsi_cmnd *cmd,
2125 unsigned char scsi3addr[8];
2127 unsigned long flags;
2130 h = sdev_to_hba(cmd->
device);
2131 dev = cmd->
device->hostdata;
2141 spin_unlock_irqrestore(&h->
lock, flags);
2146 spin_unlock_irqrestore(&h->
lock, flags);
2149 dev_err(&h->
pdev->dev,
"cmd_alloc returned NULL!\n");
2162 c->
Header.ReplyQueue = 0;
2163 memcpy(&c->
Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
2204 dev_err(&h->
pdev->dev,
"unknown data direction: %d\n",
2210 if (hpsa_scatter_gather(h, c, cmd) < 0) {
2214 enqueue_cmd_and_start_io(h, c);
2224 unsigned long flags;
2231 spin_unlock_irqrestore(&h->
scan_lock, flags);
2240 spin_unlock_irqrestore(&h->
scan_lock, flags);
2242 hpsa_update_scsi_devices(h, h->
scsi_host->host_no);
2247 spin_unlock_irqrestore(&h->
scan_lock, flags);
2250 static int hpsa_scan_finished(
struct Scsi_Host *sh,
2251 unsigned long elapsed_time)
2254 unsigned long flags;
2259 spin_unlock_irqrestore(&h->
scan_lock, flags);
2263 static int hpsa_change_queue_depth(
struct scsi_device *sdev,
2266 struct ctlr_info *h = sdev_to_hba(sdev);
2280 static void hpsa_unregister_scsi(
struct ctlr_info *h)
2288 static int hpsa_register_scsi(
struct ctlr_info *h)
2308 sh->hostdata[0] = (
unsigned long) h;
2311 error = scsi_add_host(sh, &h->
pdev->dev);
2319 " failed for controller %d\n", __func__, h->
ctlr);
2324 " failed for controller %d\n", __func__, h->
ctlr);
2328 static int wait_for_device_to_become_ready(
struct ctlr_info *h,
2329 unsigned char lunaddr[])
2336 c = cmd_special_alloc(h);
2339 "wait_for_device_to_become_ready.\n");
2354 waittime = waittime * 2;
2358 hpsa_scsi_do_simple_cmd_core(h, c);
2371 "for device to become ready.\n", waittime);
2380 cmd_special_free(h, c);
2387 static int hpsa_eh_device_reset_handler(
struct scsi_cmnd *scsicmd)
2394 h = sdev_to_hba(scsicmd->
device);
2397 dev = scsicmd->
device->hostdata;
2399 dev_err(&h->
pdev->dev,
"hpsa_eh_device_reset_handler: "
2400 "device lookup failed.\n");
2403 dev_warn(&h->
pdev->dev,
"resetting device %d:%d:%d:%d\n",
2406 rc = hpsa_send_reset(h, dev->
scsi3addr);
2407 if (rc == 0 && wait_for_device_to_become_ready(h, dev->
scsi3addr) == 0)
2414 static void swizzle_abort_tag(
u8 *
tag)
2418 memcpy(original_tag, tag, 8);
2419 tag[0] = original_tag[3];
2420 tag[1] = original_tag[2];
2421 tag[2] = original_tag[1];
2422 tag[3] = original_tag[0];
2423 tag[4] = original_tag[7];
2424 tag[5] = original_tag[6];
2425 tag[6] = original_tag[5];
2426 tag[7] = original_tag[4];
2429 static int hpsa_send_abort(
struct ctlr_info *h,
unsigned char *scsi3addr,
2436 c = cmd_special_alloc(h);
2438 dev_warn(&h->
pdev->dev,
"cmd_special_alloc returned NULL!\n");
2444 swizzle_abort_tag(&c->
Request.CDB[4]);
2445 hpsa_scsi_do_simple_cmd_core(h, c);
2446 dev_dbg(&h->
pdev->dev,
"%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
2447 __func__, abort->
Header.Tag.upper, abort->
Header.Tag.lower);
2458 dev_dbg(&h->
pdev->dev,
"%s: Tag:0x%08x:%08x: interpreting error.\n",
2459 __func__, abort->
Header.Tag.upper,
2460 abort->
Header.Tag.lower);
2461 hpsa_scsi_interpret_error(c);
2465 cmd_special_free(h, c);
2466 dev_dbg(&h->
pdev->dev,
"%s: Tag:0x%08x:%08x: Finished.\n", __func__,
2486 unsigned long flags;
2496 spin_unlock_irqrestore(&h->
lock, flags);
2500 spin_unlock_irqrestore(&h->
lock, flags);
2507 unsigned long flags;
2514 spin_unlock_irqrestore(&h->
lock, flags);
2517 spin_unlock_irqrestore(&h->
lock, flags);
2527 static int hpsa_send_abort_both_ways(
struct ctlr_info *h,
2528 unsigned char *scsi3addr,
struct CommandList *abort)
2532 int rc = 0, rc2 = 0;
2539 swizzle_abort_tag(swizzled_tag);
2540 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->
cmpQ);
2542 dev_warn(&h->
pdev->dev,
"Unexpectedly found byte-swapped tag in completion queue.\n");
2543 return hpsa_send_abort(h, scsi3addr, abort, 0);
2545 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
2551 c = hpsa_find_cmd_in_queue(h, abort->
scsi_cmd, &h->
cmpQ);
2553 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
2561 static int hpsa_eh_abort_handler(
struct scsi_cmnd *
sc)
2574 h = sdev_to_hba(sc->
device);
2576 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
2584 memset(msg, 0,
sizeof(msg));
2585 ml +=
sprintf(msg+ml,
"ABORT REQUEST on C%d:B%d:T%d:L%d ",
2590 dev = sc->
device->hostdata;
2592 dev_err(&h->
pdev->dev,
"%s FAILED, Device lookup failed.\n",
2600 dev_err(&h->
pdev->dev,
"%s FAILED, Command to abort is NULL.\n",
2605 ml +=
sprintf(msg+ml,
"Tag:0x%08x:%08x ",
2609 ml +=
sprintf(msg+ml,
"Command:0x%x SN:0x%lx ",
2612 dev_warn(&h->
pdev->dev,
"Abort request on C%d:B%d:T%d:L%d\n",
2619 found = hpsa_find_cmd_in_queue(h, sc, &h->
reqQ);
2623 dev_info(&h->
pdev->dev,
"%s Request SUCCEEDED (driver queue).\n",
2629 found = hpsa_find_cmd_in_queue(h, sc, &h->
cmpQ);
2631 dev_dbg(&h->
pdev->dev,
"%s Request SUCCEEDED (not known to driver).\n",
2641 rc = hpsa_send_abort_both_ways(h, dev->
scsi3addr, abort);
2643 dev_dbg(&h->
pdev->dev,
"%s Request FAILED.\n", msg);
2644 dev_warn(&h->
pdev->dev,
"FAILED abort on device C%d:B%d:T%d:L%d\n",
2649 dev_info(&h->
pdev->dev,
"%s REQUEST SUCCEEDED.\n", msg);
2656 #define ABORT_COMPLETE_WAIT_SECS 30
2658 found = hpsa_find_cmd_in_queue(h, sc, &h->
cmpQ);
2663 dev_warn(&h->
pdev->dev,
"%s FAILED. Aborted command has not completed after %d seconds.\n",
2664 msg, ABORT_COMPLETE_WAIT_SECS);
2681 unsigned long flags;
2687 spin_unlock_irqrestore(&h->
lock, flags);
2694 spin_unlock_irqrestore(&h->
lock, flags);
2697 memset(c, 0,
sizeof(*c));
2701 memset(c->err_info, 0,
sizeof(*c->err_info));
2703 + i *
sizeof(*c->err_info);
2707 INIT_LIST_HEAD(&c->list);
2708 c->busaddr = (
u32) cmd_dma_handle;
2709 temp64.val = (
u64) err_dma_handle;
2710 c->ErrDesc.Addr.lower = temp64.val32.lower;
2711 c->ErrDesc.Addr.upper = temp64.val32.upper;
2712 c->ErrDesc.Len =
sizeof(*c->err_info);
2731 memset(c, 0,
sizeof(*c));
2740 sizeof(*c), c, cmd_dma_handle);
2745 INIT_LIST_HEAD(&c->
list);
2747 temp64.val = (
u64) err_dma_handle;
2748 c->
ErrDesc.Addr.lower = temp64.val32.lower;
2749 c->
ErrDesc.Addr.upper = temp64.val32.upper;
2759 unsigned long flags;
2766 spin_unlock_irqrestore(&h->
lock, flags);
2774 temp64.val32.upper = c->
ErrDesc.Addr.upper;
2781 #ifdef CONFIG_COMPAT
2783 static int hpsa_ioctl32_passthru(
struct scsi_device *dev,
int cmd,
void *
arg)
2785 IOCTL32_Command_struct
__user *arg32 =
2786 (IOCTL32_Command_struct
__user *) arg;
2792 memset(&arg64, 0,
sizeof(arg64));
2802 arg64.
buf = compat_ptr(cp);
2811 err |=
copy_in_user(&arg32->error_info, &p->error_info,
2812 sizeof(arg32->error_info));
2818 static int hpsa_ioctl32_big_passthru(
struct scsi_device *dev,
2821 BIG_IOCTL32_Command_struct
__user *arg32 =
2822 (BIG_IOCTL32_Command_struct
__user *) arg;
2829 memset(&arg64, 0,
sizeof(arg64));
2840 arg64.
buf = compat_ptr(cp);
2849 err |=
copy_in_user(&arg32->error_info, &p->error_info,
2850 sizeof(arg32->error_info));
2856 static int hpsa_compat_ioctl(
struct scsi_device *dev,
int cmd,
void *arg)
2874 return hpsa_ioctl(dev, cmd, arg);
2876 case CCISS_PASSTHRU32:
2877 return hpsa_ioctl32_passthru(dev, cmd, arg);
2878 case CCISS_BIG_PASSTHRU32:
2879 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
2887 static int hpsa_getpciinfo_ioctl(
struct ctlr_info *h,
void __user *
argp)
2894 pciinfo.bus = h->
pdev->bus->number;
2895 pciinfo.dev_fn = h->
pdev->devfn;
2902 static int hpsa_getdrivver_ioctl(
struct ctlr_info *h,
void __user *argp)
2905 unsigned char vmaj, vmin, vsubmin;
2909 &vmaj, &vmin, &vsubmin);
2917 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
2925 static int hpsa_passthru_ioctl(
struct ctlr_info *h,
void __user *argp)
2957 c = cmd_special_alloc(h);
2965 c->
Header.ReplyQueue = 0;
2983 temp64.val = pci_map_single(h->
pdev, buff,
2985 c->
SG[0].Addr.lower = temp64.val32.lower;
2986 c->
SG[0].Addr.upper = temp64.val32.upper;
2990 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
2993 check_ioctl_unit_attention(h, c);
2998 if (
copy_to_user(argp, &iocommand,
sizeof(iocommand))) {
3000 cmd_special_free(h, c);
3008 cmd_special_free(h, c);
3013 cmd_special_free(h, c);
3017 static int hpsa_big_passthru_ioctl(
struct ctlr_info *h,
void __user *argp)
3021 unsigned char **buff =
NULL;
3070 data_ptr = ioc->
buf;
3073 buff_size[sg_used] = sz;
3075 if (buff[sg_used] ==
NULL) {
3085 memset(buff[sg_used], 0, sz);
3090 c = cmd_special_alloc(h);
3096 c->
Header.ReplyQueue = 0;
3103 for (i = 0; i < sg_used; i++) {
3104 temp64.val = pci_map_single(h->
pdev, buff[i],
3106 c->
SG[
i].Addr.lower = temp64.val32.lower;
3107 c->
SG[
i].Addr.upper = temp64.val32.upper;
3108 c->
SG[
i].Len = buff_size[
i];
3113 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
3116 check_ioctl_unit_attention(h, c);
3120 cmd_special_free(h, c);
3127 for (i = 0; i < sg_used; i++) {
3129 cmd_special_free(h, c);
3133 ptr += buff_size[
i];
3136 cmd_special_free(h, c);
3140 for (i = 0; i < sg_used; i++)
3149 static void check_ioctl_unit_attention(
struct ctlr_info *h,
3154 (
void) check_for_unit_attention(h, c);
3159 static int hpsa_ioctl(
struct scsi_device *dev,
int cmd,
void *arg)
3164 h = sdev_to_hba(dev);
3173 return hpsa_getpciinfo_ioctl(h, argp);
3175 return hpsa_getdrivver_ioctl(h, argp);
3177 return hpsa_passthru_ioctl(h, argp);
3179 return hpsa_big_passthru_ioctl(h, argp);
3197 enqueue_cmd_and_start_io(h, c);
3213 c->
Header.ReplyQueue = 0;
3214 if (buff !=
NULL && size > 0) {
3229 if (page_code != 0) {
3238 c->
Request.CDB[4] = size & 0xFF;
3250 c->
Request.CDB[6] = (size >> 24) & 0xFF;
3251 c->
Request.CDB[7] = (size >> 16) & 0xFF;
3252 c->
Request.CDB[8] = (size >> 8) & 0xFF;
3253 c->
Request.CDB[9] = size & 0xFF;
3262 c->
Request.CDB[7] = (size >> 8) & 0xFF;
3263 c->
Request.CDB[8] = size & 0xFF;
3272 dev_warn(&h->
pdev->dev,
"unknown command 0x%c\n", cmd);
3297 dev_dbg(&h->
pdev->dev,
"Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
3329 dev_warn(&h->
pdev->dev,
"unknown command type %d\n", cmd_type);
3333 switch (c->
Request.Type.Direction) {
3347 hpsa_map_one(h->
pdev, c, buff, size, pci_dir);
3358 ulong page_offs = ((
ulong) base) - page_base;
3362 return page_remapped ? (page_remapped + page_offs) :
NULL;
3368 static void start_io(
struct ctlr_info *h)
3371 unsigned long flags;
3374 while (!list_empty(&h->
reqQ)) {
3377 if ((h->
access.fifo_full(h))) {
3398 spin_unlock_irqrestore(&h->
lock, flags);
3399 h->
access.submit_command(h, c);
3402 spin_unlock_irqrestore(&h->
lock, flags);
3405 static inline unsigned long get_next_completion(
struct ctlr_info *h,
u8 q)
3407 return h->
access.command_completed(h, q);
3412 return h->
access.intr_pending(h);
3415 static inline long interrupt_not_for_us(
struct ctlr_info *h)
3417 return (h->
access.intr_pending(h) == 0) ||
3421 static inline int bad_tag(
struct ctlr_info *h,
u32 tag_index,
3425 dev_warn(&h->
pdev->dev,
"bad tag 0x%08x ignored.\n", raw_tag);
3431 static inline void finish_cmd(
struct CommandList *c)
3433 unsigned long flags;
3437 spin_unlock_irqrestore(&c->
h->lock, flags);
3438 dial_up_lockup_detection_on_fw_flash_complete(c->
h, c);
3440 complete_scsi_command(c);
3445 static inline u32 hpsa_tag_contains_index(
u32 tag)
3450 static inline u32 hpsa_tag_to_index(
u32 tag)
3456 static inline u32 hpsa_tag_discard_error_bits(
struct ctlr_info *h,
u32 tag)
3458 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
3459 #define HPSA_SIMPLE_ERROR_BITS 0x03
3466 static inline void process_indexed_cmd(
struct ctlr_info *h,
3472 tag_index = hpsa_tag_to_index(raw_tag);
3473 if (!bad_tag(h, tag_index, raw_tag)) {
3480 static inline void process_nonindexed_cmd(
struct ctlr_info *h,
3485 unsigned long flags;
3487 tag = hpsa_tag_discard_error_bits(h, raw_tag);
3490 if ((c->
busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
3491 spin_unlock_irqrestore(&h->
lock, flags);
3496 spin_unlock_irqrestore(&h->
lock, flags);
3497 bad_tag(h, h->
nr_cmds + 1, raw_tag);
3505 static int ignore_bogus_interrupt(
struct ctlr_info *h)
3513 dev_info(&h->
pdev->dev,
"Received interrupt while interrupts disabled "
3514 "(known firmware bug.) Ignoring.\n");
3529 static irqreturn_t hpsa_intx_discard_completions(
int irq,
void *queue)
3531 struct ctlr_info *h = queue_to_hba(queue);
3532 u8 q = *(
u8 *) queue;
3535 if (ignore_bogus_interrupt(h))
3538 if (interrupt_not_for_us(h))
3542 raw_tag = get_next_completion(h, q);
3544 raw_tag = next_command(h, q);
3549 static irqreturn_t hpsa_msix_discard_completions(
int irq,
void *queue)
3551 struct ctlr_info *h = queue_to_hba(queue);
3553 u8 q = *(
u8 *) queue;
3555 if (ignore_bogus_interrupt(h))
3559 raw_tag = get_next_completion(h, q);
3561 raw_tag = next_command(h, q);
3565 static irqreturn_t do_hpsa_intr_intx(
int irq,
void *queue)
3569 u8 q = *(
u8 *) queue;
3571 if (interrupt_not_for_us(h))
3575 raw_tag = get_next_completion(h, q);
3577 if (
likely(hpsa_tag_contains_index(raw_tag)))
3578 process_indexed_cmd(h, raw_tag);
3580 process_nonindexed_cmd(h, raw_tag);
3581 raw_tag = next_command(h, q);
3587 static irqreturn_t do_hpsa_intr_msi(
int irq,
void *queue)
3589 struct ctlr_info *h = queue_to_hba(queue);
3591 u8 q = *(
u8 *) queue;
3594 raw_tag = get_next_completion(h, q);
3596 if (
likely(hpsa_tag_contains_index(raw_tag)))
3597 process_indexed_cmd(h, raw_tag);
3599 process_nonindexed_cmd(h, raw_tag);
3600 raw_tag = next_command(h, q);
3618 static const size_t cmd_sz =
sizeof(*cmd) +
3619 sizeof(cmd->ErrorDescriptor);
3633 err = pci_set_consistent_dma_mask(pdev,
DMA_BIT_MASK(32));
3651 cmd->CommandHeader.ReplyQueue = 0;
3652 cmd->CommandHeader.SGList = 0;
3653 cmd->CommandHeader.SGTotal = 0;
3654 cmd->CommandHeader.Tag.lower = paddr32;
3655 cmd->CommandHeader.Tag.upper = 0;
3656 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
3658 cmd->Request.CDBLen = 16;
3661 cmd->Request.Type.Direction =
XFER_NONE;
3662 cmd->Request.Timeout = 0;
3663 cmd->Request.CDB[0] =
opcode;
3664 cmd->Request.CDB[1] =
type;
3665 memset(&cmd->Request.CDB[2], 0, 14);
3666 cmd->ErrorDescriptor.Addr.lower = paddr32 +
sizeof(*cmd);
3667 cmd->ErrorDescriptor.Addr.upper = 0;
3668 cmd->ErrorDescriptor.Len =
sizeof(
struct ErrorInfo);
3674 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
3684 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
3685 dev_err(&pdev->
dev,
"controller message %02x:%02x timed out\n",
3693 dev_err(&pdev->
dev,
"controller message %02x:%02x failed\n",
3698 dev_info(&pdev->
dev,
"controller message %02x:%02x succeeded\n",
3703 #define hpsa_noop(p) hpsa_message(p, 3, 0)
3705 static int hpsa_controller_hard_reset(
struct pci_dev *pdev,
3716 dev_info(&pdev->
dev,
"using doorbell to reset controller\n");
3731 "hpsa_reset_controller: "
3732 "PCI PM not supported\n");
3735 dev_info(&pdev->
dev,
"using PCI PM to reset controller\n");
3737 pci_read_config_word(pdev, pos +
PCI_PM_CTRL, &pmcsr);
3740 pci_write_config_word(pdev, pos +
PCI_PM_CTRL, pmcsr);
3747 pci_write_config_word(pdev, pos +
PCI_PM_CTRL, pmcsr);
3759 static __devinit void init_driver_version(
char *driver_version,
int len)
3761 memset(driver_version, 0, len);
3765 static __devinit int write_driver_ver_to_cfgtable(
3769 int i, size =
sizeof(cfgtable->driver_version);
3772 if (!driver_version)
3775 init_driver_version(driver_version, size);
3776 for (i = 0; i <
size; i++)
3777 writeb(driver_version[i], &cfgtable->driver_version[i]);
3778 kfree(driver_version);
3782 static __devinit void read_driver_ver_from_cfgtable(
3787 for (i = 0; i <
sizeof(cfgtable->driver_version); i++)
3788 driver_ver[i] =
readb(&cfgtable->driver_version[i]);
3791 static __devinit int controller_reset_failed(
3796 int rc, size =
sizeof(cfgtable->driver_version);
3799 if (!old_driver_ver)
3801 driver_ver = old_driver_ver +
size;
3806 init_driver_version(old_driver_ver, size);
3807 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3808 rc = !
memcmp(driver_ver, old_driver_ver, size);
3809 kfree(old_driver_ver);
3815 static __devinit int hpsa_kdump_hard_reset_controller(
struct pci_dev *pdev)
3819 u64 cfg_base_addr_index;
3821 unsigned long paddr;
3822 u32 misc_fw_support;
3827 u16 command_register;
3842 rc = hpsa_lookup_board_id(pdev, &board_id);
3843 if (rc < 0 || !ctlr_is_resettable(board_id)) {
3849 if (!ctlr_is_hard_resettable(board_id))
3853 pci_read_config_word(pdev, 4, &command_register);
3861 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
3864 vaddr = remap_pci_mem(paddr, 0x250);
3869 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
3870 &cfg_base_addr_index, &cfg_offset);
3874 cfg_base_addr_index) + cfg_offset,
sizeof(*cfgtable));
3879 rc = write_driver_ver_to_cfgtable(cfgtable);
3894 "Firmware update is required.\n");
3896 goto unmap_cfgtable;
3900 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3902 goto unmap_cfgtable;
3907 dev_warn(&pdev->
dev,
"failed to enable device.\n");
3908 goto unmap_cfgtable;
3910 pci_write_config_word(pdev, 4, command_register);
3917 dev_info(&pdev->
dev,
"Waiting for board to reset.\n");
3921 "failed waiting for board to reset."
3922 " Will try soft reset.\n");
3924 goto unmap_cfgtable;
3926 rc = hpsa_wait_for_board_state(pdev, vaddr,
BOARD_READY);
3929 "failed waiting for board to become ready "
3930 "after hard reset\n");
3931 goto unmap_cfgtable;
3934 rc = controller_reset_failed(vaddr);
3936 goto unmap_cfgtable;
3938 dev_warn(&pdev->
dev,
"Unable to successfully reset "
3939 "controller. Will try soft reset.\n");
3942 dev_info(&pdev->
dev,
"board ready after hard reset.\n");
3958 static void print_cfg_table(
struct device *dev,
struct CfgTable *
tb)
3964 dev_info(dev,
"Controller Configuration information\n");
3965 dev_info(dev,
"------------------------------------\n");
3966 for (i = 0; i < 4; i++)
3968 temp_name[4] =
'\0';
3969 dev_info(dev,
" Signature = %s\n", temp_name);
3971 dev_info(dev,
" Transport methods supported = 0x%x\n",
3973 dev_info(dev,
" Transport methods active = 0x%x\n",
3975 dev_info(dev,
" Requested transport Method = 0x%x\n",
3977 dev_info(dev,
" Coalesce Interrupt Delay = 0x%x\n",
3979 dev_info(dev,
" Coalesce Interrupt Count = 0x%x\n",
3981 dev_info(dev,
" Max outstanding commands = 0x%d\n",
3984 for (i = 0; i < 16; i++)
3986 temp_name[16] =
'\0';
3987 dev_info(dev,
" Server Name = %s\n", temp_name);
3988 dev_info(dev,
" Heartbeat Counter = 0x%x\n\n\n",
3993 static int find_PCI_BAR_index(
struct pci_dev *pdev,
unsigned long pci_bar_addr)
4017 "base address is invalid\n");
4034 #ifdef CONFIG_PCI_MSI
4039 hpsa_msix_entries[
i].vector = 0;
4040 hpsa_msix_entries[
i].entry =
i;
4046 goto default_int_mode;
4053 h->
intr[i] = hpsa_msix_entries[i].vector;
4059 "available\n", err);
4060 goto default_int_mode;
4064 goto default_int_mode;
4069 if (!pci_enable_msi(h->
pdev))
4083 u32 subsystem_vendor_id, subsystem_device_id;
4087 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
4088 subsystem_vendor_id;
4091 if (*board_id == products[i].board_id)
4098 "0x%08x, ignoring.\n", *board_id);
4105 unsigned long *memory_bar)
4122 void __iomem *vaddr,
int wait_for_ready)
4131 for (i = 0; i < iterations; i++) {
4133 if (wait_for_ready) {
4142 dev_warn(&pdev->
dev,
"board not ready, timed out.\n");
4147 void __iomem *vaddr,
u32 *cfg_base_addr,
u64 *cfg_base_addr_index,
4152 *cfg_base_addr &= (
u32) 0x0000ffff;
4153 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
4154 if (*cfg_base_addr_index == -1) {
4155 dev_warn(&pdev->
dev,
"cannot find cfg_base_addr_index\n");
4165 u64 cfg_base_addr_index;
4169 rc = hpsa_find_cfg_addrs(h->
pdev, h->
vaddr, &cfg_base_addr,
4170 &cfg_base_addr_index, &cfg_offset);
4174 cfg_base_addr_index) + cfg_offset,
sizeof(*h->
cfgtable));
4177 rc = write_driver_ver_to_cfgtable(h->
cfgtable);
4183 cfg_base_addr_index)+cfg_offset+trans_offset,
4200 "max supported commands of %d, an obvious lie. "
4201 "Using 16. Ensure that firmware is up to date.\n",
4213 hpsa_get_max_perf_mode_cmds(h);
4234 static inline bool hpsa_CISS_signature_present(
struct ctlr_info *h)
4237 dev_warn(&h->
pdev->dev,
"not a valid CISS config table\n");
4244 static inline void hpsa_enable_scsi_prefetch(
struct ctlr_info *h)
4258 static inline void hpsa_p600_dma_prefetch_quirk(
struct ctlr_info *h)
4265 dma_prefetch |= 0x8000;
4273 unsigned long flags;
4282 spin_unlock_irqrestore(&h->
lock, flags);
4302 hpsa_wait_for_mode_change_ack(h);
4306 "unable to get board into simple mode\n");
4315 int prod_index,
err;
4317 prod_index = hpsa_lookup_board_id(h->
pdev, &h->
board_id);
4328 dev_warn(&h->
pdev->dev,
"unable to enable PCI device\n");
4338 "cannot obtain PCI resources, aborting\n");
4341 hpsa_interrupt_mode(h);
4342 err = hpsa_pci_find_memory_BAR(h->
pdev, &h->
paddr);
4344 goto err_out_free_res;
4348 goto err_out_free_res;
4352 goto err_out_free_res;
4353 err = hpsa_find_cfgtables(h);
4355 goto err_out_free_res;
4356 hpsa_find_board_params(h);
4358 if (!hpsa_CISS_signature_present(h)) {
4360 goto err_out_free_res;
4362 hpsa_enable_scsi_prefetch(h);
4363 hpsa_p600_dma_prefetch_quirk(h);
4364 err = hpsa_enter_simple_mode(h);
4366 goto err_out_free_res;
4385 #define HBA_INQUIRY_BYTE_COUNT 64
4405 rc = hpsa_kdump_hard_reset_controller(pdev);
4418 dev_warn(&pdev->
dev,
"Waiting for controller to respond to no-op\n");
4424 (i < 11 ?
"; re-trying" :
""));
4443 dev_err(&h->
pdev->dev,
"out of memory in %s", __func__);
4449 static void hpsa_free_cmd_pool(
struct ctlr_info *h)
4463 static int hpsa_request_irq(
struct ctlr_info *h,
4495 dev_err(&h->
pdev->dev,
"unable to get irq %d for %s\n",
4506 dev_warn(&h->
pdev->dev,
"Resetting array controller failed.\n");
4510 dev_info(&h->
pdev->dev,
"Waiting for board to soft reset.\n");
4512 dev_warn(&h->
pdev->dev,
"Soft reset had no effect.\n");
4516 dev_info(&h->
pdev->dev,
"Board reset, awaiting READY status.\n");
4518 dev_warn(&h->
pdev->dev,
"Board failed to become ready "
4519 "after soft reset.\n");
4541 static void hpsa_free_irqs_and_disable_msix(
struct ctlr_info *h)
4544 #ifdef CONFIG_PCI_MSI
4546 if (h->
pdev->msix_enabled)
4549 if (h->
pdev->msi_enabled)
4555 static void hpsa_undo_allocations_after_kdump_soft_reset(
struct ctlr_info *h)
4557 hpsa_free_irqs_and_disable_msix(h);
4558 hpsa_free_sg_chain_blocks(h);
4559 hpsa_free_cmd_pool(h);
4573 static void remove_ctlr_from_lockup_detector_list(
struct ctlr_info *h)
4576 if (!hpsa_lockup_detector)
4590 while (!list_empty(list)) {
4597 static void controller_lockup_detected(
struct ctlr_info *h)
4599 unsigned long flags;
4602 remove_ctlr_from_lockup_detector_list(h);
4606 spin_unlock_irqrestore(&h->
lock, flags);
4607 dev_warn(&h->
pdev->dev,
"Controller lockup detected: 0x%08x\n",
4611 fail_all_cmds_on_list(h, &h->
cmpQ);
4612 fail_all_cmds_on_list(h, &h->
reqQ);
4613 spin_unlock_irqrestore(&h->
lock, flags);
4616 static void detect_controller_lockup(
struct ctlr_info *h)
4620 unsigned long flags;
4641 spin_unlock_irqrestore(&h->
lock, flags);
4643 controller_lockup_detected(h);
4652 static int detect_controller_lockup_thread(
void *notused)
4655 unsigned long flags;
4666 detect_controller_lockup(h);
4668 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4673 static void add_ctlr_to_lockup_detector_list(
struct ctlr_info *h)
4675 unsigned long flags;
4680 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4683 static void start_controller_lockup_detector(
struct ctlr_info *h)
4686 if (!hpsa_lockup_detector) {
4688 hpsa_lockup_detector =
4692 if (!hpsa_lockup_detector) {
4694 "Could not start lockup detector thread\n");
4697 add_ctlr_to_lockup_detector_list(h);
4700 static void stop_controller_lockup_detector(
struct ctlr_info *h)
4702 unsigned long flags;
4705 remove_ctlr_from_lockup_detector_list(h);
4707 if (list_empty(&hpsa_ctlr_list)) {
4708 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4711 hpsa_lockup_detector =
NULL;
4713 spin_unlock_irqrestore(&lockup_detector_lock, flags);
4721 int try_soft_reset = 0;
4722 unsigned long flags;
4724 if (number_of_controllers == 0)
4727 rc = hpsa_init_reset_devices(pdev);
4740 reinit_after_soft_reset:
4746 #define COMMANDLIST_ALIGNMENT 32
4754 INIT_LIST_HEAD(&h->
cmpQ);
4755 INIT_LIST_HEAD(&h->
reqQ);
4758 rc = hpsa_pci_init(h);
4763 h->
ctlr = number_of_controllers;
4764 number_of_controllers++;
4775 dev_err(&pdev->
dev,
"no suitable DMA available\n");
4783 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
4785 dev_info(&pdev->
dev,
"%s: <0x%x> at IRQ %d%s using DAC\n",
4788 if (hpsa_allocate_cmd_pool(h))
4790 if (hpsa_allocate_sg_chain_blocks(h))
4795 pci_set_drvdata(pdev, h);
4799 hpsa_put_ctlr_into_performant_mode(h);
4805 if (try_soft_reset) {
4816 spin_unlock_irqrestore(&h->
lock, flags);
4818 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4819 hpsa_intx_discard_completions);
4826 rc = hpsa_kdump_soft_reset(h);
4833 "Waiting for stale completions to drain.\n");
4838 rc = controller_reset_failed(h->
cfgtable);
4841 "Soft reset appears to have failed.\n");
4847 hpsa_undo_allocations_after_kdump_soft_reset(h);
4853 goto reinit_after_soft_reset;
4859 hpsa_hba_inquiry(h);
4860 hpsa_register_scsi(h);
4861 start_controller_lockup_detector(h);
4865 hpsa_free_sg_chain_blocks(h);
4866 hpsa_free_cmd_pool(h);
4874 static void hpsa_flush_cache(
struct ctlr_info *h)
4883 c = cmd_special_alloc(h);
4885 dev_warn(&h->
pdev->dev,
"cmd_special_alloc returned NULL!\n");
4891 if (c->
err_info->CommandStatus != 0)
4893 "error flushing cache on controller\n");
4894 cmd_special_free(h, c);
4899 static void hpsa_shutdown(
struct pci_dev *pdev)
4903 h = pci_get_drvdata(pdev);
4908 hpsa_flush_cache(h);
4910 hpsa_free_irqs_and_disable_msix(h);
4925 if (pci_get_drvdata(pdev) ==
NULL) {
4926 dev_err(&pdev->
dev,
"unable to remove device\n");
4929 h = pci_get_drvdata(pdev);
4930 stop_controller_lockup_detector(h);
4931 hpsa_unregister_scsi(h);
4932 hpsa_shutdown(pdev);
4936 hpsa_free_device_info(h);
4937 hpsa_free_sg_chain_blocks(h);
4951 pci_set_drvdata(pdev,
NULL);
4968 .probe = hpsa_init_one,
4970 .id_table = hpsa_pci_device_id,
4971 .shutdown = hpsa_shutdown,
4972 .suspend = hpsa_suspend,
4973 .resume = hpsa_resume,
4988 static void calc_bucket_map(
int bucket[],
int num_buckets,
4989 int nsgs,
int *bucket_map)
4994 #define MINIMUM_TRANSFER_BLOCKS 4
4995 #define NUM_BUCKETS 8
4997 for (i = 0; i <= nsgs; i++) {
5002 for (j = 0; j < 8; j++) {
5003 if (bucket[j] >= size) {
5017 unsigned long register_value;
5050 for (i = 0; i < 8; i++)
5070 hpsa_wait_for_mode_change_ack(h);
5074 " performant mode\n");
5078 h->
access = SA5_performant_access;
5087 if (hpsa_simple_mode)
5095 hpsa_get_max_perf_mode_cmds(h);
5116 hpsa_enter_performant_mode(h,
5132 static int __init hpsa_init(
void)
5134 return pci_register_driver(&hpsa_pci_driver);
5137 static void __exit hpsa_cleanup(
void)