48 #include <linux/module.h>
49 #include <linux/reboot.h>
54 #include <linux/errno.h>
55 #include <linux/types.h>
59 #include <linux/slab.h>
60 #include <linux/pci.h>
64 #include <asm/uaccess.h>
66 #include <scsi/scsi.h>
78 #define ARCMSR_SLEEPTIME 10
79 #define ARCMSR_RETRYCOUNT 12
85 static int arcmsr_abort(
struct scsi_cmnd *);
86 static int arcmsr_bus_reset(
struct scsi_cmnd *);
87 static int arcmsr_bios_param(
struct scsi_device *sdev,
90 static int arcmsr_probe(
struct pci_dev *pdev,
92 static void arcmsr_remove(
struct pci_dev *pdev);
93 static void arcmsr_shutdown(
struct pci_dev *pdev);
100 static void arcmsr_request_device_map(
unsigned long pacb);
109 static const char *arcmsr_info(
struct Scsi_Host *);
111 static int arcmsr_adjust_disk_queue_depth(
struct scsi_device *sdev,
125 .name =
"ARCMSR ARECA SATA/SAS RAID Controller"
128 .queuecommand = arcmsr_queue_command,
129 .eh_abort_handler = arcmsr_abort,
130 .eh_bus_reset_handler = arcmsr_bus_reset,
131 .bios_param = arcmsr_bios_param,
132 .change_queue_depth = arcmsr_adjust_disk_queue_depth,
164 static struct pci_driver arcmsr_pci_driver = {
166 .id_table = arcmsr_device_id_table,
167 .probe = arcmsr_probe,
168 .remove = arcmsr_remove,
169 .shutdown = arcmsr_shutdown,
203 void __iomem *mem_base0, *mem_base1;
259 handle_state = arcmsr_interrupt(acb);
263 static int arcmsr_bios_param(
struct scsi_device *sdev,
271 ret =
scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
279 cylinders = total_capacity / (heads *
sectors);
280 if (cylinders > 1024) {
283 cylinders = total_capacity / (heads *
sectors);
316 for (i = 0; i < 2000; i++) {
334 for (i = 0; i < 2000; i++) {
354 for (i = 0; i < 2000; i++) {
373 if (arcmsr_hba_wait_msgint_ready(acb))
378 timeout, retry count down = %d \n", acb->
host->host_no, retry_count);
380 }
while (retry_count != 0);
386 int retry_count = 30;
389 if (arcmsr_hbb_wait_msgint_ready(acb))
394 timeout,retry count down = %d \n", acb->
host->host_no, retry_count);
396 }
while (retry_count != 0);
402 int retry_count = 30;
406 if (arcmsr_hbc_wait_msgint_ready(pACB)) {
411 timeout,retry count down = %d \n", pACB->
host->host_no, retry_count);
413 }
while (retry_count != 0);
421 arcmsr_flush_hba_cache(acb);
426 arcmsr_flush_hbb_cache(acb);
430 arcmsr_flush_hbc_cache(acb);
443 unsigned long roundup_ccbsize;
444 unsigned long max_xfer_len;
445 unsigned long max_sg_entrys;
455 if((firm_config_version & 0xFF) >= 3){
456 max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;
457 max_sg_entrys = (max_xfer_len/4096);
459 acb->
host->max_sectors = max_xfer_len/512;
460 acb->
host->sg_tablesize = max_sg_entrys;
471 ccb_tmp = dma_coherent;
472 acb->
vir2phy_offset = (
unsigned long)dma_coherent - (
unsigned long)dma_coherent_handle;
478 INIT_LIST_HEAD(&ccb_tmp->
list);
481 dma_coherent_handle = dma_coherent_handle + roundup_ccbsize;
502 for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
503 diff = (*acb_dev_map)^
readb(devicemap);
506 *acb_dev_map =
readb(devicemap);
509 if((temp & 0x01)==1 && (diff & 0x01) == 1) {
511 }
else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
513 if (psdev !=
NULL ) {
540 for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
541 diff = (*acb_dev_map)^
readb(devicemap);
544 *acb_dev_map =
readb(devicemap);
547 if((temp & 0x01)==1 && (diff & 0x01) == 1) {
549 }
else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
551 if (psdev !=
NULL ) {
577 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
578 diff = (*acb_dev_map)^
readb(devicemap);
581 *acb_dev_map =
readb(devicemap);
584 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
586 }
else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
617 goto pci_disable_dev;
624 "scsi%d: No suitable DMA mask available\n",
626 goto scsi_host_release;
630 bus = pdev->
bus->number;
631 dev_fun = pdev->
devfn;
643 pci_set_drvdata(pdev, host);
647 goto scsi_host_release;
656 arcmsr_define_adapter_type(acb);
657 error = arcmsr_remap_pciregion(acb);
659 goto pci_release_regs;
661 error = arcmsr_get_firmware_spec(acb);
663 goto unmap_pci_region;
665 error = arcmsr_alloc_ccb_pool(acb);
669 arcmsr_iop_init(acb);
670 error = scsi_add_host(host, &pdev->
dev);
672 goto RAID_controller_stop;
676 goto scsi_host_remove;
695 RAID_controller_stop:
696 arcmsr_stop_adapter_bgrb(acb);
697 arcmsr_flush_adapter_cache(acb);
698 arcmsr_free_ccb_pool(acb);
700 arcmsr_free_hbb_mu(acb);
702 arcmsr_unmap_pciregion(acb);
716 if (!arcmsr_hba_wait_msgint_ready(acb)) {
718 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
719 , acb->
host->host_no);
730 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
732 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
733 , acb->
host->host_no);
743 if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
745 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
746 , pACB->
host->host_no);
756 rtnval = arcmsr_abort_hba_allcmd(acb);
761 rtnval = arcmsr_abort_hbb_allcmd(acb);
766 rtnval = arcmsr_abort_hbc_allcmd(acb);
776 if (!arcmsr_hbb_wait_msgint_ready(pacb)) {
796 arcmsr_pci_unmap_dma(ccb);
811 int sense_data_length =
814 memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
815 memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
817 sensebuffer->
Valid = 1;
821 static
u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
853 id = ccb->
pcmd->device->id;
854 lun = ccb->
pcmd->device->lun;
859 arcmsr_ccb_complete(ccb);
865 arcmsr_ccb_complete(ccb);
874 arcmsr_ccb_complete(ccb);
880 arcmsr_report_sense_info(ccb);
881 arcmsr_ccb_complete(ccb);
887 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
888 but got unknown DeviceStatus = 0x%x \n"
895 arcmsr_ccb_complete(ccb);
908 id = abortcmd->
device->id;
909 lun = abortcmd->
device->lun;
911 arcmsr_ccb_complete(pCCB);
913 acb->
host->host_no, pCCB);
919 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
920 " ccboutstandingcount = %d \n"
929 arcmsr_report_ccb_state(acb, pCCB, error);
953 arcmsr_drain_donequeue(acb, pCCB, error);
968 arcmsr_drain_donequeue(acb, pCCB, error);
985 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
989 arcmsr_drain_donequeue(acb, pCCB, error);
994 static void arcmsr_remove(
struct pci_dev *pdev)
996 struct Scsi_Host *host = pci_get_drvdata(pdev);
1004 arcmsr_disable_outbound_ints(acb);
1005 arcmsr_stop_adapter_bgrb(acb);
1006 arcmsr_flush_adapter_cache(acb);
1013 arcmsr_interrupt(acb);
1020 arcmsr_abort_allcmd(acb);
1021 arcmsr_done4abort_postqueue(acb);
1027 arcmsr_ccb_complete(ccb);
1032 arcmsr_free_ccb_pool(acb);
1033 arcmsr_free_hbb_mu(acb);
1034 arcmsr_unmap_pciregion(acb);
1038 pci_set_drvdata(pdev,
NULL);
1041 static void arcmsr_shutdown(
struct pci_dev *pdev)
1043 struct Scsi_Host *host = pci_get_drvdata(pdev);
1047 arcmsr_disable_outbound_ints(acb);
1049 arcmsr_stop_adapter_bgrb(acb);
1050 arcmsr_flush_adapter_cache(acb);
1053 static int arcmsr_module_init(
void)
1056 error = pci_register_driver(&arcmsr_pci_driver);
1060 static void arcmsr_module_exit(
void)
1107 __le32 address_lo, address_hi;
1108 int arccdbsize = 0x30;
1122 if (
unlikely(nseg > acb->
host->sg_tablesize || nseg < 0))
1129 if (address_hi == 0) {
1132 pdma_sg->
address = address_lo;
1135 arccdbsize +=
sizeof (
struct SG32ENTRY);
1140 pdma_sg->
address = address_lo;
1143 arccdbsize +=
sizeof (
struct SG64ENTRY);
1148 arcmsr_cdb->
msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0);
1149 if ( arccdbsize > 256)
1180 ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
1196 uint32_t ccb_post_stamp, arc_cdb_size;
1199 ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1);
1215 if (!arcmsr_hba_wait_msgint_ready(acb)) {
1217 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1218 , acb->
host->host_no);
1228 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
1230 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1231 , acb->
host->host_no);
1241 if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
1243 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1244 , pACB->
host->host_no);
1252 arcmsr_stop_hba_bgrb(acb);
1257 arcmsr_stop_hbb_bgrb(acb);
1261 arcmsr_stop_hbc_bgrb(acb);
1380 int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
1388 if (my_empty_len >= iop_len)
1390 while (iop_len > 0) {
1392 memcpy(pQbuffer, iop_data, 1);
1417 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1421 (allxfer_len < 124)) {
1423 memcpy(iop_data, pQbuffer, 1);
1431 arcmsr_iop_message_wrote(acb);
1446 arcmsr_iop2drv_data_wrote_handle(acb);
1450 arcmsr_iop2drv_data_read_handle(acb);
1467 arcmsr_iop2drv_data_wrote_handle(pACB);
1470 arcmsr_iop2drv_data_read_handle(pACB);
1473 arcmsr_hbc_message_isr(pACB);
1488 arcmsr_drain_donequeue(acb, pCCB, error);
1505 arcmsr_drain_donequeue(acb, pCCB, error);
1517 uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
1528 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
1533 arcmsr_drain_donequeue(acb, ccb, error);
1592 arcmsr_hba_doorbell_isr(acb);
1595 arcmsr_hba_postqueue_isr(acb);
1599 arcmsr_hba_message_isr(acb);
1610 if (!outbound_doorbell)
1619 arcmsr_iop2drv_data_wrote_handle(acb);
1622 arcmsr_iop2drv_data_read_handle(acb);
1625 arcmsr_hbb_postqueue_isr(acb);
1629 arcmsr_hbb_message_isr(acb);
1644 if (!host_interrupt_status) {
1650 arcmsr_hbc_doorbell_isr(pACB);
1654 arcmsr_hbc_postqueue_isr(pACB);
1662 if (arcmsr_handle_hba_isr(acb)) {
1669 if (arcmsr_handle_hbb_isr(acb)) {
1675 if (arcmsr_handle_hbc_isr(acb)) {
1690 intmask_org = arcmsr_disable_outbound_ints(acb);
1691 arcmsr_stop_adapter_bgrb(acb);
1692 arcmsr_flush_adapter_cache(acb);
1693 arcmsr_enable_outbound_ints(acb, intmask_org);
1700 int32_t wqbuf_firstindex, wqbuf_lastindex;
1702 struct QBUFFER __iomem *pwbuffer;
1705 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
1711 while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
1712 pQbuffer = &acb->
wqbuffer[wqbuf_firstindex];
1713 memcpy(iop_data, pQbuffer, 1);
1721 arcmsr_iop_message_wrote(acb);
1729 int retvalue = 0, transfer_len = 0;
1737 sg = scsi_sglist(cmd);
1739 if (scsi_sg_count(cmd) > 1) {
1743 transfer_len += sg->
length;
1750 switch(controlcode) {
1753 unsigned char *ver_addr;
1754 uint8_t *pQbuffer, *ptmpQbuffer;
1763 ptmpQbuffer = ver_addr;
1765 && (allxfer_len < 1031)) {
1767 memcpy(ptmpQbuffer, pQbuffer, 1);
1781 iop_data = prbuffer->
data;
1783 while (iop_len > 0) {
1793 pcmdmessagefld->
cmdmessage.Length = allxfer_len;
1804 unsigned char *ver_addr;
1805 int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
1806 uint8_t *pQbuffer, *ptmpuserbuffer;
1820 ptmpuserbuffer = ver_addr;
1821 user_len = pcmdmessagefld->
cmdmessage.Length;
1825 if (wqbuf_lastindex != wqbuf_firstindex) {
1834 sensebuffer->
Valid = 1;
1837 my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
1839 if (my_empty_len >= user_len) {
1840 while (user_len > 0) {
1843 memcpy(pQbuffer, ptmpuserbuffer, 1);
1862 sensebuffer->
Valid = 1;
1953 int8_t *hello_string =
"Hello! I am ARCMSR";
1971 arcmsr_iop_parking(acb);
1979 arcmsr_flush_adapter_cache(acb);
1986 sg = scsi_sglist(cmd);
1995 unsigned long flags;
1997 if (!list_empty(head)) {
1999 list_del_init(&ccb->
list);
2011 switch (cmd->
cmnd[0]) {
2013 unsigned char inqdata[36];
2030 strncpy(&inqdata[8],
"Areca ", 8);
2032 strncpy(&inqdata[16],
"RAID controller ", 16);
2034 strncpy(&inqdata[32],
"R001", 4);
2036 sg = scsi_sglist(cmd);
2039 memcpy(buffer, inqdata,
sizeof(inqdata));
2040 sg = scsi_sglist(cmd);
2048 if (arcmsr_iop_message_xfer(acb, cmd))
2058 static int arcmsr_queue_command_lck(
struct scsi_cmnd *cmd,
2064 int target = cmd->
device->id;
2065 int lun = cmd->
device->lun;
2079 arcmsr_handle_virtual_command(acb, cmd);
2083 ARCMSR_MAX_OUTSTANDING_CMD)
2085 ccb = arcmsr_get_freeccb(acb);
2088 if (arcmsr_build_ccb( acb, ccb, cmd ) ==
FAILED) {
2093 arcmsr_post_ccb(acb, ccb);
2102 char *acb_firm_model = acb->firm_model;
2103 char *acb_firm_version = acb->firm_version;
2104 char *acb_device_map = acb->device_map;
2110 if (!arcmsr_hba_wait_msgint_ready(acb)) {
2112 miscellaneous data' timeout \n", acb->host->host_no);
2117 *acb_firm_model =
readb(iop_firm_model);
2125 *acb_firm_version =
readb(iop_firm_version);
2133 *acb_device_map =
readb(iop_device_map);
2161 char __iomem *iop_firm_version;
2186 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2188 miscellaneous data' timeout \n", acb->
host->host_no);
2193 *acb_firm_model =
readb(iop_firm_model);
2200 *acb_firm_version =
readb(iop_firm_version);
2208 *acb_device_map =
readb(iop_device_map);
2236 uint32_t intmask_org, Index, firmware_state = 0;
2254 for (Index = 0; Index < 2000; Index++) {
2261 if (Index >= 2000) {
2263 miscellaneous data' timeout \n", pACB->
host->host_no);
2268 *acb_firm_model =
readb(iop_firm_model);
2275 *acb_firm_version =
readb(iop_firm_version);
2281 pACB->
host->host_no,
2295 return arcmsr_get_hba_config(acb);
2297 return arcmsr_get_hbb_config(acb);
2299 return arcmsr_get_hbc_config(acb);
2308 uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
2311 polling_hba_ccb_retry:
2322 if (poll_count > 100){
2326 goto polling_hba_ccb_retry;
2331 poll_ccb_done = (ccb == poll_ccb) ? 1:0;
2335 " poll command abort successfully \n"
2336 , acb->
host->host_no
2337 , ccb->
pcmd->device->id
2338 , ccb->
pcmd->device->lun
2341 arcmsr_ccb_complete(ccb);
2345 " command done ccb = '0x%p'"
2346 "ccboutstandingcount = %d \n"
2347 , acb->
host->host_no
2353 arcmsr_report_ccb_state(acb, ccb, error);
2364 uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0;
2367 polling_hbb_ccb_retry:
2380 if (poll_count > 100){
2384 goto polling_hbb_ccb_retry;
2395 poll_ccb_done = (ccb == poll_ccb) ? 1:0;
2399 " poll command abort successfully \n"
2401 ,ccb->
pcmd->device->id
2402 ,ccb->
pcmd->device->lun
2405 arcmsr_ccb_complete(ccb);
2409 " command done ccb = '0x%p'"
2410 "ccboutstandingcount = %d \n"
2411 , acb->
host->host_no
2417 arcmsr_report_ccb_state(acb, ccb, error);
2429 uint32_t poll_ccb_done = 0, poll_count = 0;
2431 polling_hbc_ccb_retry:
2435 if (poll_ccb_done) {
2440 if (poll_count > 100) {
2444 goto polling_hbc_ccb_retry;
2448 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
2451 poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0;
2456 " poll command abort successfully \n"
2457 , acb->
host->host_no
2458 , pCCB->
pcmd->device->id
2459 , pCCB->
pcmd->device->lun
2462 arcmsr_ccb_complete(pCCB);
2466 " command done ccb = '0x%p'"
2467 "ccboutstandingcount = %d \n"
2468 , acb->
host->host_no
2474 arcmsr_report_ccb_state(acb, pCCB, error);
2485 rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb);
2490 rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb);
2494 rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb);
2502 uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
2511 cdb_phyaddr = (
uint32_t)(dma_coherent_handle);
2512 cdb_phyaddr_hi32 = (
uint32_t)((cdb_phyaddr >> 16) >> 16);
2522 if (cdb_phyaddr_hi32 != 0) {
2525 intmask_org = arcmsr_disable_outbound_ints(acb);
2531 if (!arcmsr_hba_wait_msgint_ready(acb)) {
2533 part physical address timeout\n",
2534 acb->
host->host_no);
2537 arcmsr_enable_outbound_ints(acb, intmask_org);
2543 unsigned long post_queue_phyaddr;
2548 intmask_org = arcmsr_disable_outbound_ints(acb);
2552 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2554 acb->
host->host_no);
2562 writel(cdb_phyaddr_hi32, rwbuffer++);
2564 writel(post_queue_phyaddr, rwbuffer++);
2566 writel(post_queue_phyaddr + 1056, rwbuffer++);
2571 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2573 timeout \n",acb->
host->host_no);
2576 arcmsr_hbb_enable_driver_mode(acb);
2577 arcmsr_enable_outbound_ints(acb, intmask_org);
2581 if (cdb_phyaddr_hi32 != 0) {
2590 if (!arcmsr_hbc_wait_msgint_ready(acb)) {
2592 timeout \n", acb->
host->host_no);
2626 }
while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
2698 static void arcmsr_request_device_map(
unsigned long pacb)
2703 arcmsr_request_hba_device_map(acb);
2707 arcmsr_request_hbb_device_map(acb);
2711 arcmsr_request_hbc_device_map(acb);
2721 if (!arcmsr_hba_wait_msgint_ready(acb)) {
2723 rebulid' timeout \n", acb->
host->host_no);
2732 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2734 rebulid' timeout \n",acb->
host->host_no);
2744 if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
2746 rebulid' timeout \n", pACB->
host->host_no);
2754 arcmsr_start_hba_bgrb(acb);
2757 arcmsr_start_hbb_bgrb(acb);
2760 arcmsr_start_hbc_bgrb(acb);
2806 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
2827 for (i = 0; i < 64; i++) {
2828 pci_read_config_byte(acb->
pdev, i, &value[i]);
2831 if ((acb->
dev_id == 0x1680)) {
2833 }
else if ((acb->
dev_id == 0x1880)) {
2845 pci_write_config_byte(acb->
pdev, 0x84, 0x20);
2849 for (i = 0; i < 64; i++) {
2850 pci_write_config_byte(acb->
pdev, i, value[i]);
2859 intmask_org = arcmsr_disable_outbound_ints(acb);
2860 arcmsr_wait_firmware_ready(acb);
2861 arcmsr_iop_confirm(acb);
2863 arcmsr_start_adapter_bgrb(acb);
2865 arcmsr_clear_doorbell_queue_buffer(acb);
2866 arcmsr_enable_eoi_mode(acb);
2868 arcmsr_enable_outbound_ints(acb, intmask_org);
2878 unsigned long flags;
2882 intmask_org = arcmsr_disable_outbound_ints(acb);
2884 rtnval = arcmsr_abort_allcmd(acb);
2886 arcmsr_done4abort_postqueue(acb);
2900 arcmsr_enable_outbound_ints(acb, intmask_org);
2906 static int arcmsr_bus_reset(
struct scsi_cmnd *cmd)
2910 int retry_count = 0;
2920 printk(
KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
2927 if (!arcmsr_iop_reset(acb)) {
2930 arcmsr_hardware_reset(acb);
2935 printk(
KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->
host->host_no, retry_count);
2938 printk(
KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->
host->host_no);
2946 intmask_org = arcmsr_disable_outbound_ints(acb);
2947 arcmsr_get_firmware_spec(acb);
2948 arcmsr_start_adapter_bgrb(acb);
2954 arcmsr_enable_outbound_ints(acb, intmask_org);
2961 printk(
KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
2974 if (!arcmsr_iop_reset(acb)) {
2990 printk(
KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n");
2997 if (!arcmsr_iop_reset(acb)) {
3000 arcmsr_hardware_reset(acb);
3005 printk(
KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->
host->host_no, retry_count);
3008 printk(
KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->
host->host_no);
3016 intmask_org = arcmsr_disable_outbound_ints(acb);
3017 arcmsr_get_firmware_spec(acb);
3018 arcmsr_start_adapter_bgrb(acb);
3024 arcmsr_enable_outbound_ints(acb, intmask_org);
3031 printk(
KERN_ERR "arcmsr: scsi bus reset eh returns with success\n");
3050 rtn = arcmsr_polling_ccbdone(acb, ccb);
3054 static int arcmsr_abort(
struct scsi_cmnd *cmd)
3061 "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
3078 rtn = arcmsr_abort_one_cmd(acb, ccb);
3086 static const char *arcmsr_info(
struct Scsi_Host *host)
3090 static char buf[256];
3093 switch (acb->
pdev->device) {
3123 sprintf(
buf,
"Areca %s Host Adapter RAID Controller%s\n %s",
3124 type, raid6 ?
"( RAID6 capable)" :
"",