32 #include <linux/module.h>
34 MODULE_AUTHOR(
"Deanna Bonds, with _lots_ of help from Mark Salyzyn");
40 #include <asm/uaccess.h>
42 #include <linux/stat.h>
43 #include <linux/slab.h>
44 #include <linux/pci.h>
49 #include <linux/kernel.h>
50 #include <linux/sched.h>
51 #include <linux/reboot.h>
56 #include <linux/string.h>
60 #include <asm/processor.h>
61 #include <asm/pgtable.h>
64 #include <scsi/scsi.h>
83 #elif defined(__ia64__)
85 #elif defined(__sparc__)
87 #elif defined(__alpha__)
109 static int sys_tbl_ind;
110 static int sys_tbl_len;
113 static int hba_count = 0;
115 static struct class *adpt_sysfs_class;
117 static long adpt_unlocked_ioctl(
struct file *,
unsigned int,
unsigned long);
119 static long compat_adpt_ioctl(
struct file *,
unsigned int,
unsigned long);
123 .unlocked_ioctl = adpt_unlocked_ioctl,
125 .release = adpt_close,
127 .compat_ioctl = compat_adpt_ioctl,
139 adpt_wait_queue_head_t *
wq;
144 static u32 adpt_post_wait_id = 0;
153 static inline int dpt_dma64(
adpt_hba *pHba)
155 return (
sizeof(
dma_addr_t) > 4 && (pHba)->dma64);
196 PINFO(
"Detecting Adaptec I2O RAID controllers...\n");
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR(
"Could not Init an I2O RAID device\n");
204 PERROR(
"Will not try to detect others.\n");
212 for (pHba = hba_chain; pHba; pHba =
next) {
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
224 if (hba_chain ==
NULL)
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
236 PDEBUG(
"HBA's in HOLD state\n");
239 for (pHba = hba_chain; pHba; pHba = pHba->
next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
247 PDEBUG(
"HBA's in OPERATIONAL state\n");
249 printk(
"dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba =
next) {
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
266 if (IS_ERR(adpt_sysfs_class)) {
268 adpt_sysfs_class =
NULL;
271 for (pHba = hba_chain; pHba; pHba =
next) {
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
279 if (adpt_sysfs_class) {
282 "dpti%d", pHba->
unit);
285 "create device in dpt_i2o class\n",
295 adpt_i2o_sys_shutdown();
305 static int adpt_release(
struct Scsi_Host *host)
309 adpt_i2o_delete_hba(pHba);
315 static void adpt_inquiry(
adpt_hba* pHba)
329 memset(msg, 0,
sizeof(msg));
335 memset((
void*)buf, 0, 36);
338 direction = 0x00000000;
357 msg[6] = scsidir|0x20a00000| 6 ;
361 memset(scb, 0,
sizeof(scb));
371 memcpy(mptr, scb,
sizeof(scb));
377 if (dpt_dma64(pHba)) {
378 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
380 *mptr++ = 0xD0000000|direction|len;
381 *mptr++ = dma_low(addr);
382 *mptr++ = dma_high(addr);
384 *mptr++ = 0xD0000000|direction|len;
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
405 adpt_i2o_status_get(pHba);
415 pHba = (
adpt_hba *) host->hostdata[0];
461 pHba->
host->resetting = 1;
480 cmd->
device->hostdata = pDev;
491 return adpt_scsi_to_i2o(pHba, cmd, pDev);
506 if (capacity < 0x2000 ) {
511 else if (capacity < 0x20000) {
516 else if (capacity < 0x40000) {
521 else if (capacity < 0x80000) {
530 cylinders =
sector_div(capacity, heads * sectors);
533 if(sdev->type == 5) {
543 PDEBUG(
"adpt_bios_param: exit\n");
548 static const char *adpt_info(
struct Scsi_Host *host)
552 pHba = (
adpt_hba *) host->hostdata[0];
588 for (pHba = hba_chain; pHba; pHba = pHba->
next) {
589 if (pHba->
host == host) {
601 len +=
sprintf(buffer+len,
"SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
603 len +=
sprintf(buffer+len,
"\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
609 if(pos > offset + length) {
621 len +=
sprintf(buffer+len,
"Devices:\n");
623 for(
id = 0;
id <
MAX_ID;
id++) {
632 if(pos > offset + length) {
641 len +=
sprintf(buffer+len,
"\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
643 scsi_device_online(d->
pScsi_dev)?
"online":
"offline");
647 if(pos > offset + length) {
668 *(buffer + len) =
'\0';
670 *start = buffer + (offset - begin);
671 len -= (offset - begin);
702 spin_unlock(pHba->
host->host_lock);
708 spin_unlock_irqrestore(&d->
list_lock, flags);
710 spin_lock(pHba->
host->host_lock);
714 spin_unlock_irqrestore(&d->
list_lock, flags);
716 spin_lock(pHba->
host->host_lock);
724 static u32 adpt_ioctl_to_context(
adpt_hba * pHba,
void *reply)
726 #if BITS_PER_LONG == 32
727 return (
u32)(
unsigned long)reply;
734 for (i = 0; i <
nr; i++) {
740 spin_unlock_irqrestore(pHba->
host->host_lock, flags);
744 "ioctl commands\n", pHba->
name);
755 static void *adpt_ioctl_from_context(
adpt_hba *pHba,
u32 context)
757 #if BITS_PER_LONG == 32
758 return (
void *)(
unsigned long)context;
772 static int adpt_abort(
struct scsi_cmnd * cmd)
784 if ((dptdevice = (
void*) (cmd->
device->hostdata)) ==
NULL) {
789 memset(msg, 0,
sizeof(msg));
794 msg[4] = adpt_cmd_to_context(cmd);
796 spin_lock_irq(pHba->
host->host_lock);
797 rcode = adpt_i2o_post_wait(pHba, msg,
sizeof(msg),
FOREVER);
799 spin_unlock_irq(pHba->
host->host_lock);
813 #define I2O_DEVICE_RESET 0x27
817 static int adpt_device_reset(
struct scsi_cmnd* cmd)
825 pHba = (
void*) cmd->
device->host->hostdata[0];
831 memset(msg, 0,
sizeof(msg));
838 spin_lock_irq(pHba->
host->host_lock);
839 old_state = d->
state;
841 rcode = adpt_i2o_post_wait(pHba, msg,
sizeof(msg),
FOREVER);
842 d->
state = old_state;
844 spin_unlock_irq(pHba->
host->host_lock);
859 #define I2O_HBA_BUS_RESET 0x87
861 static int adpt_bus_reset(
struct scsi_cmnd* cmd)
868 memset(msg, 0,
sizeof(msg));
875 spin_lock_irq(pHba->
host->host_lock);
876 rcode = adpt_i2o_post_wait(pHba, msg,
sizeof(msg),
FOREVER);
878 spin_unlock_irq(pHba->
host->host_lock);
889 static int __adpt_reset(
struct scsi_cmnd* cmd)
895 rcode = adpt_hba_reset(pHba);
905 static int adpt_reset(
struct scsi_cmnd* cmd)
909 spin_lock_irq(cmd->
device->host->host_lock);
910 rc = __adpt_reset(cmd);
911 spin_unlock_irq(cmd->
device->host->host_lock);
917 static int adpt_hba_reset(
adpt_hba* pHba)
924 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
926 adpt_i2o_delete_hba(pHba);
930 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
931 adpt_i2o_delete_hba(pHba);
936 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
937 adpt_i2o_delete_hba(pHba);
940 PDEBUG(
"%s: in OPERATIONAL state\n",pHba->
name);
942 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
943 adpt_i2o_delete_hba(pHba);
947 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
948 adpt_i2o_delete_hba(pHba);
953 adpt_fail_posted_scbs(pHba);
963 static void adpt_i2o_sys_shutdown(
void)
969 printk(
KERN_INFO" This could take a few minutes if there are many devices attached\n");
974 for (pHba = hba_chain; pHba; pHba = pNext) {
976 adpt_i2o_delete_hba(pHba);
984 for(p1 = adpt_post_wait_queue;
p1;) {
990 adpt_post_wait_queue =
NULL;
1000 ulong base_addr0_phys = 0;
1001 ulong base_addr1_phys = 0;
1002 u32 hba_map0_area_size = 0;
1003 u32 hba_map1_area_size = 0;
1008 int raptorFlag =
FALSE;
1015 PERROR(
"dpti: adpt_config_hba: pci request region failed\n");
1029 if (!dma64 && pci_set_dma_mask(pDev,
DMA_BIT_MASK(32)) != 0)
1042 hba_map0_area_size = 0x400000;
1044 if(hba_map0_area_size > 0x100000 ){
1045 hba_map0_area_size = 0x100000;
1055 #if BITS_PER_LONG == 64
1063 if (raptorFlag ==
TRUE) {
1064 if (hba_map0_area_size > 128)
1065 hba_map0_area_size = 128;
1066 if (hba_map1_area_size > 524288)
1067 hba_map1_area_size = 524288;
1069 if (hba_map0_area_size > 524288)
1070 hba_map0_area_size = 524288;
1074 base_addr_virt =
ioremap(base_addr0_phys,hba_map0_area_size);
1075 if (!base_addr_virt) {
1077 PERROR(
"dpti: adpt_config_hba: io remap failed\n");
1081 if(raptorFlag ==
TRUE) {
1082 msg_addr_virt =
ioremap(base_addr1_phys, hba_map1_area_size );
1083 if (!msg_addr_virt) {
1084 PERROR(
"dpti: adpt_config_hba: io remap failed on BAR1\n");
1090 msg_addr_virt = base_addr_virt;
1096 if (msg_addr_virt != base_addr_virt)
1105 if(hba_chain !=
NULL){
1106 for(p = hba_chain; p->
next; p = p->
next);
1112 pHba->
unit = hba_count;
1124 pHba->
irq_mask = base_addr_virt+0x30;
1136 pHba->
dma64 = dma64;
1142 if(raptorFlag == 0){
1144 " %d at %p size=%x irq=%d%s\n",
1145 hba_count-1, base_addr_virt,
1146 hba_map0_area_size, pDev->
irq,
1147 dma64 ?
" (64-bit DMA)" :
"");
1150 hba_count-1, pDev->
irq,
1151 dma64 ?
" (64-bit DMA)" :
"");
1152 printk(
KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1153 printk(
KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1158 adpt_i2o_delete_hba(pHba);
1166 static void adpt_i2o_delete_hba(
adpt_hba* pHba)
1185 for( p1 = hba_chain;
p1; p2 =
p1,p1=p1->
next){
1188 p2->
next = p1->next;
1190 hba_chain = p1->
next;
1225 for(d = pHba->
devices; d ; d = next){
1230 for(j = 0; j <
MAX_ID; j++){
1232 for(pDev = pHba->
channel[i].device[j]; pDev; pDev = pNext){
1240 if (adpt_sysfs_class)
1247 if (adpt_sysfs_class) {
1249 adpt_sysfs_class =
NULL;
1262 printk(
KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1267 if(!d || d->
tid == 0) {
1286 static int adpt_i2o_post_wait(
adpt_hba* pHba,
u32* msg,
int len,
int timeout)
1291 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1309 wait_data->
next = adpt_post_wait_queue;
1310 adpt_post_wait_queue = wait_data;
1311 adpt_post_wait_id++;
1312 adpt_post_wait_id &= 0x7fff;
1313 wait_data->
id = adpt_post_wait_id;
1314 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1316 wait_data->
wq = &adpt_wq_i2o_post;
1321 msg[2] |= 0x80000000 | ((
u32)wait_data->
id);
1323 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1326 spin_unlock_irq(pHba->
host->host_lock);
1339 spin_lock_irq(pHba->
host->host_lock);
1352 for(p1 = adpt_post_wait_queue;
p1; p2 =
p1, p1 = p1->
next) {
1353 if(p1 == wait_data) {
1358 p2->
next = p1->next;
1360 adpt_post_wait_queue = p1->
next;
1365 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1404 static void adpt_i2o_post_wait_complete(
u32 context,
int status)
1422 spin_lock(&adpt_post_wait_lock);
1423 for(p1 = adpt_post_wait_queue;
p1; p1 = p1->
next) {
1424 if(p1->
id == context) {
1426 spin_unlock(&adpt_post_wait_lock);
1431 spin_unlock(&adpt_post_wait_lock);
1435 for(p1 = adpt_post_wait_queue;
p1; p1 = p1->
next) {
1452 adpt_i2o_quiesce_hba(pHba);
1469 if(status ==
NULL) {
1470 adpt_send_nop(pHba, m);
1482 msg[6]=dma_low(addr);
1483 msg[7]=dma_high(addr);
1490 while(*status == 0){
1503 if(*status == 0x01 ) {
1504 PDEBUG(
"%s: Reset in progress...\n", pHba->
name);
1524 adpt_send_nop(pHba, m);
1526 adpt_i2o_status_get(pHba);
1527 if(*status == 0x02 ||
1532 PDEBUG(
"%s: Reset completed.\n", pHba->
name);
1545 static int adpt_i2o_parse_lct(
adpt_hba* pHba)
1567 for(i=0;i<
max;i++) {
1583 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1586 bus_no = buf[0]>>16;
1588 scsi_lun = (buf[2]>>8 )&0xff;
1589 if(bus_no >= MAX_CHANNEL) {
1593 if (scsi_id >= MAX_ID){
1615 d->controller = pHba;
1622 adpt_i2o_report_hba_unit(pHba, d);
1623 adpt_i2o_install_device(pHba, d);
1626 for(d = pHba->
devices; d ; d = d->next) {
1637 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1639 pHba->
channel[bus_no].scsi_id = buf[1];
1640 PDEBUG(
"Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1644 if(bus_no >= MAX_CHANNEL) {
1652 for(d = pHba->
devices; d ; d = d->next) {
1660 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1661 bus_no = buf[0]>>16;
1663 scsi_lun = (buf[2]>>8 )&0xff;
1664 if(bus_no >= MAX_CHANNEL) {
1667 if (scsi_id >= MAX_ID) {
1670 if( pHba->
channel[bus_no].device[scsi_id] ==
NULL){
1677 for( pDev = pHba->
channel[bus_no].device[scsi_id];
1692 pDev->
type = (buf[0])&0xff;
1693 pDev->
flags = (buf[0]>>8)&0xff;
1741 minor = iminor(inode);
1742 if (minor >= hba_count) {
1747 for (pHba = hba_chain; pHba; pHba = pHba->
next) {
1748 if (pHba->
unit == minor) {
1770 static int adpt_close(
struct inode *inode,
struct file *file)
1775 minor = iminor(inode);
1776 if (minor >= hba_count) {
1780 for (pHba = hba_chain; pHba; pHba = pHba->
next) {
1781 if (pHba->
unit == minor) {
1821 user_reply = &user_msg[
size];
1831 get_user(reply_size, &user_reply[0]);
1832 reply_size = reply_size>>16;
1842 sg_offset = (msg[0]>>4)&0xf;
1843 msg[2] = 0x40000000;
1844 msg[3] = adpt_ioctl_to_context(pHba, reply);
1845 if (msg[3] == (
u32)-1)
1871 printk(
KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1872 pHba->
name,sg_size,i,sg_count);
1876 sg_list[sg_index++] =
p;
1899 rcode = adpt_i2o_post_wait(pHba, msg, size,
FOREVER);
1901 printk(
"adpt_i2o_passthru: post wait failed %d %p\n",
1905 spin_unlock_irqrestore(pHba->
host->host_lock, flags);
1975 if(sg_list[--sg_index]) {
1986 #if defined __ia64__
1987 static void adpt_ia64_info(
sysInfo_S* si)
1996 #if defined __sparc__
1997 static void adpt_sparc_info(
sysInfo_S* si)
2005 #if defined __alpha__
2006 static void adpt_alpha_info(
sysInfo_S* si)
2015 #if defined __i386__
2016 static void adpt_i386_info(
sysInfo_S* si)
2044 static int adpt_system_info(
void __user *buffer)
2048 memset(&si, 0,
sizeof(si));
2057 #if defined __i386__
2058 adpt_i386_info(&si);
2059 #elif defined (__ia64__)
2060 adpt_ia64_info(&si);
2061 #elif defined(__sparc__)
2062 adpt_sparc_info(&si);
2063 #elif defined (__alpha__)
2064 adpt_alpha_info(&si);
2076 static int adpt_ioctl(
struct inode *inode,
struct file *file,
uint cmd,
ulong arg)
2084 minor = iminor(inode);
2089 for (pHba = hba_chain; pHba; pHba = pHba->
next) {
2090 if (pHba->
unit == minor) {
2110 return adpt_i2o_passthru(pHba, argp);
2115 #define FLG_OSD_PCI_VALID 0x0001
2116 #define FLG_OSD_DMA 0x0002
2117 #define FLG_OSD_I2O 0x0004
2118 memset(&HbaInfo, 0,
sizeof(HbaInfo));
2121 HbaInfo.
blinkState = adpt_read_blink_led(pHba);
2133 return adpt_system_info(argp);
2136 value = (
u32)adpt_read_blink_led(pHba);
2145 adpt_hba_reset(pHba);
2147 spin_unlock_irqrestore(pHba->
host->host_lock, flags);
2159 static long adpt_unlocked_ioctl(
struct file *file,
uint cmd,
ulong arg)
2161 struct inode *
inode;
2164 inode = file->f_dentry->d_inode;
2167 ret = adpt_ioctl(inode, file, cmd, arg);
2173 #ifdef CONFIG_COMPAT
2174 static long compat_adpt_ioctl(
struct file *file,
2175 unsigned int cmd,
unsigned long arg)
2177 struct inode *
inode;
2180 inode = file->f_dentry->d_inode;
2194 ret = adpt_ioctl(inode, file, cmd, arg);
2237 m < pHba->reply_pool_pa +
2252 if(old_m >= 0x100000){
2259 old_context =
readl(msg+12);
2260 writel(old_context, reply+12);
2261 adpt_send_nop(pHba, old_m);
2263 context =
readl(reply+8);
2264 if(context & 0x40000000){
2265 void *p = adpt_ioctl_from_context(pHba,
readl(reply+12));
2271 if(context & 0x80000000){
2272 status =
readl(reply+16);
2278 if(!(context & 0x40000000)) {
2279 cmd = adpt_cmd_from_context(pHba,
2282 printk(
KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->
name, cmd, context);
2285 adpt_i2o_post_wait_complete(context, status);
2287 cmd = adpt_cmd_from_context (pHba,
readl(reply+12));
2291 adpt_i2o_to_scsi(reply, cmd);
2301 spin_unlock_irqrestore(pHba->
host->host_lock, flags);
2320 memset(msg, 0 ,
sizeof(msg));
2321 len = scsi_bufflen(cmd);
2322 direction = 0x00000000;
2324 scsidir = 0x00000000;
2334 scsidir =0x40000000;
2337 direction=0x04000000;
2338 scsidir =0x80000000;
2343 scsidir =0x40000000;
2358 msg[3] = adpt_cmd_to_context(cmd);
2367 msg[6] = scsidir|0x20a00000|cmd->
cmd_len;
2376 if (dpt_dma64(pHba)) {
2378 *mptr++ = (0x7C<<24)+(2<<16)+0x02;
2393 *mptr++ = direction|0x10000000|
sg_dma_len(sg);
2396 *mptr++ = dma_low(addr);
2397 if (dpt_dma64(pHba))
2398 *mptr++ = dma_high(addr);
2403 reqlen = mptr -
msg;
2419 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2433 printk(
"%s: scsi_host_alloc returned NULL\n", pHba->
name);
2436 host->hostdata[0] = (
unsigned long)pHba;
2463 u32 reply_flags =
readl(reply) & 0xff00;
2467 u16 detailed_status =
readl(reply+16) &0xffff;
2468 dev_status = (detailed_status & 0xff);
2469 hba_status = detailed_status >> 8;
2472 scsi_set_resid(cmd, scsi_bufflen(cmd) -
readl(reply+20));
2497 printk(
KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2537 printk(
KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2539 hba_status, dev_status, cmd->
cmnd[0]);
2554 printk(
KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2556 hba_status, dev_status, cmd->
cmnd[0]);
2587 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2589 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2593 spin_unlock_irqrestore(pHba->
host->host_lock, flags);
2622 for (d = pHba->
devices; d; d = d->next) {
2632 for(i=0;i<
max;i++) {
2641 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2645 bus_no = buf[0]>>16;
2646 if (bus_no >= MAX_CHANNEL) {
2648 "%s: Channel number %d out of range\n",
2649 pHba->
name, bus_no);
2654 scsi_lun = (buf[2]>>8 )&0xff;
2672 d->controller = pHba;
2678 adpt_i2o_report_hba_unit(pHba, d);
2679 adpt_i2o_install_device(pHba, d);
2707 pDev->
type = (buf[0])&0xff;
2708 pDev->
flags = (buf[0]>>8)&0xff;
2722 if(!scsi_device_online(pDev->
pScsi_dev)) {
2724 pHba->
name,bus_no,scsi_id,scsi_lun);
2746 for (pI2o_dev = pHba->
devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2764 static void adpt_fail_posted_scbs(
adpt_hba* pHba)
2770 unsigned long flags;
2779 spin_unlock_irqrestore(&d->
list_lock, flags);
2794 static int adpt_i2o_activate_hba(
adpt_hba* pHba)
2799 if (adpt_i2o_status_get(pHba) < 0) {
2800 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2804 if (adpt_i2o_status_get(pHba) < 0) {
2819 adpt_i2o_reset_hba(pHba);
2826 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2833 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2839 if (adpt_i2o_hrt_get(pHba) < 0) {
2850 static int adpt_i2o_online_hba(
adpt_hba* pHba)
2852 if (adpt_i2o_systab_send(pHba) < 0) {
2853 adpt_i2o_delete_hba(pHba);
2858 if (adpt_i2o_enable_hba(pHba) < 0) {
2859 adpt_i2o_delete_hba(pHba);
2895 static s32 adpt_i2o_init_outbound_q(
adpt_hba* pHba)
2922 adpt_send_nop(pHba, m);
2935 writel(0xD0000004, &msg[6]);
2944 if (*status != 0x01 ) {
2962 if(*status != 0x04 ) {
2988 adpt_i2o_status_get(pHba);
3017 "dpti%d: Get Status Block failed; Out of memory. \n",
3056 while(status_block[87]!=0xff){
3078 if (dpt_dma64(pHba)) {
3128 static int adpt_i2o_lct_get(
adpt_hba* pHba)
3154 msg[4] = 0xFFFFFFFF;
3155 msg[5] = 0x00000000;
3156 msg[6] = 0xD0000000|pHba->
lct_size;
3159 if ((ret=adpt_i2o_post_wait(pHba, msg,
sizeof(msg), 360))) {
3174 PDEBUG(
"%s: Hardware resource table read.\n", pHba->
name);
3178 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf,
sizeof(buf))>=0) {
3198 static int adpt_i2o_build_sys_table(
void)
3205 sys_tbl, sys_tbl_pa);
3216 memset(sys_tbl, 0, sys_tbl_len);
3222 for(pHba = hba_chain; pHba; pHba = pHba->
next) {
3225 if (adpt_i2o_status_get(pHba)) {
3237 sys_tbl->
iops[
count].last_changed = sys_tbl_ind - 1;
3240 sys_tbl->
iops[
count].inbound_low = dma_low(addr);
3241 sys_tbl->
iops[
count].inbound_high = dma_high(addr);
3250 for(count = 0; count < (sys_tbl_len >>2); count++) {
3252 count, table[count]);
3272 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3275 printk(
" Vendor: %-12.12s", buf);
3277 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3280 printk(
" Device: %-12.12s", buf);
3282 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3285 printk(
" Rev: %-12.12s\n", buf);
3308 static const char *adpt_i2o_get_class_name(
int class)
3311 static char *i2o_class_name[] = {
3313 "Device Driver Module",
3318 "Fibre Channel Port",
3319 "Fibre Channel Device",
3323 "Floppy Controller",
3325 "Secondary Bus Port",
3326 "Peer Transport Agent",
3331 switch(
class&0xFFF) {
3358 case I2O_CLASS_BUS_ADAPTER_PORT:
3365 return i2o_class_name[
idx];
3389 msg[4]= (0xD0000000 |
size);
3392 if ((ret = adpt_i2o_post_wait(pHba, msg,
sizeof(msg),20))) {
3411 static int adpt_i2o_query_scalar(
adpt_hba* pHba,
int tid,
3425 if (resblk_va ==
NULL) {
3432 if (opblk_va ==
NULL) {
3434 resblk_va, resblk_pa);
3442 memcpy(opblk_va, opblk,
sizeof(opblk));
3444 opblk_va, opblk_pa,
sizeof(opblk),
3445 resblk_va, resblk_pa,
sizeof(
u8)*(8+buflen));
3447 if (size == -
ETIME) {
3449 resblk_va, resblk_pa);
3452 }
else if (size == -
EINTR) {
3454 resblk_va, resblk_pa);
3459 memcpy(buf, resblk_va+8, buflen);
3462 resblk_va, resblk_pa);
3478 static int adpt_i2o_issue_params(
int cmd,
adpt_hba* pHba,
int tid,
3479 void *opblk_va,
dma_addr_t opblk_pa,
int oplen,
3480 void *resblk_va,
dma_addr_t resblk_pa,
int reslen)
3487 msg[1] = cmd << 24 |
HOST_TID << 12 | tid;
3491 msg[5] = 0x54000000 | oplen;
3492 msg[6] = (
u32)opblk_pa;
3493 msg[7] = 0xD0000000 | reslen;
3494 msg[8] = (
u32)resblk_pa;
3496 if ((wait_status = adpt_i2o_post_wait(pHba, msg,
sizeof(msg), 20))) {
3497 printk(
"adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3501 if (res[1]&0x00FF0000) {
3503 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3507 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3508 return -((res[1] >> 16) & 0xFF);
3511 return 4 + ((res[1] & 0x0000FFFF) << 2);
3520 adpt_i2o_status_get(pHba);
3534 if((ret = adpt_i2o_post_wait(pHba, msg,
sizeof(msg), 240))) {
3541 adpt_i2o_status_get(pHba);
3549 static int adpt_i2o_enable_hba(
adpt_hba* pHba)
3554 adpt_i2o_status_get(pHba);
3570 if ((ret = adpt_i2o_post_wait(pHba, msg,
sizeof(msg), 240))) {
3577 adpt_i2o_status_get(pHba);
3582 static int adpt_i2o_systab_send(
adpt_hba* pHba)
3591 msg[4] = (0<<16) | ((pHba->
unit+2) << 12);
3599 msg[6] = 0x54000000 | sys_tbl_len;
3600 msg[7] = (
u32)sys_tbl_pa;
3601 msg[8] = 0x54000000 | 0;
3603 msg[10] = 0xD4000000 | 0;
3606 if ((ret=adpt_i2o_post_wait(pHba, msg,
sizeof(msg), 120))) {
3628 static static void adpt_delay(
int millisec)
3631 for (i = 0; i < millisec; i++) {
3641 .proc_name =
"dpt_i2o",
3642 .proc_info = adpt_proc_info,
3644 .queuecommand = adpt_queue,
3645 .eh_abort_handler = adpt_abort,
3646 .eh_device_reset_handler = adpt_device_reset,
3647 .eh_bus_reset_handler = adpt_bus_reset,
3648 .eh_host_reset_handler = adpt_reset,
3649 .bios_param = adpt_bios_param,
3650 .slave_configure = adpt_slave_configure,
3657 static int __init adpt_init(
void)
3664 error = adpt_detect(&driver_template);
3667 if (hba_chain ==
NULL)
3670 for (pHba = hba_chain; pHba; pHba = pHba->
next) {
3671 error = scsi_add_host(pHba->
host, &pHba->
pDev->dev);
3678 for (pHba = hba_chain; pHba; pHba =
next) {
3685 static void __exit adpt_exit(
void)
3689 for (pHba = hba_chain; pHba; pHba = pHba->
next)
3691 for (pHba = hba_chain; pHba; pHba =
next) {
3693 adpt_release(pHba->
host);